summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-05-07 22:03:58 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-07 22:03:58 -0700
commit80f232121b69cc69a31ccb2b38c1665d770b0710 (patch)
tree106263eac4ff03b899df695e00dd11e593e74fe2 /drivers
parent82efe439599439a5e1e225ce5740e6cfb777a7dd (diff)
parenta9e41a529681b38087c91ebc0bb91e12f510ca2d (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Highlights: 1) Support AES128-CCM ciphers in kTLS, from Vakul Garg. 2) Add fib_sync_mem to control the amount of dirty memory we allow to queue up between synchronize RCU calls, from David Ahern. 3) Make flow classifier more lockless, from Vlad Buslov. 4) Add PHY downshift support to aquantia driver, from Heiner Kallweit. 5) Add SKB cache for TCP rx and tx, from Eric Dumazet. This reduces contention on SLAB spinlocks in heavy RPC workloads. 6) Partial GSO offload support in XFRM, from Boris Pismenny. 7) Add fast link down support to ethtool, from Heiner Kallweit. 8) Use siphash for IP ID generator, from Eric Dumazet. 9) Pull nexthops even further out from ipv4/ipv6 routes and FIB entries, from David Ahern. 10) Move skb->xmit_more into a per-cpu variable, from Florian Westphal. 11) Improve eBPF verifier speed and increase maximum program size, from Alexei Starovoitov. 12) Eliminate per-bucket spinlocks in rhashtable, and instead use bit spinlocks. From Neil Brown. 13) Allow tunneling with GUE encap in ipvs, from Jacky Hu. 14) Improve link partner cap detection in generic PHY code, from Heiner Kallweit. 15) Add layer 2 encap support to bpf_skb_adjust_room(), from Alan Maguire. 16) Remove SKB list implementation assumptions in SCTP, your's truly. 17) Various cleanups, optimizations, and simplifications in r8169 driver. From Heiner Kallweit. 18) Add memory accounting on TX and RX path of SCTP, from Xin Long. 19) Switch PHY drivers over to use dynamic featue detection, from Heiner Kallweit. 20) Support flow steering without masking in dpaa2-eth, from Ioana Ciocoi. 21) Implement ndo_get_devlink_port in netdevsim driver, from Jiri Pirko. 22) Increase the strict parsing of current and future netlink attributes, also export such policies to userspace. From Johannes Berg. 23) Allow DSA tag drivers to be modular, from Andrew Lunn. 24) Remove legacy DSA probing support, also from Andrew Lunn. 25) Allow ll_temac driver to be used on non-x86 platforms, from Esben Haabendal. 26) Add a generic tracepoint for TX queue timeouts to ease debugging, from Cong Wang. 27) More indirect call optimizations, from Paolo Abeni" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1763 commits) cxgb4: Fix error path in cxgb4_init_module net: phy: improve pause mode reporting in phy_print_status dt-bindings: net: Fix a typo in the phy-mode list for ethernet bindings net: macb: Change interrupt and napi enable order in open net: ll_temac: Improve error message on error IRQ net/sched: remove block pointer from common offload structure net: ethernet: support of_get_mac_address new ERR_PTR error net: usb: smsc: fix warning reported by kbuild test robot staging: octeon-ethernet: Fix of_get_mac_address ERR_PTR check net: dsa: support of_get_mac_address new ERR_PTR error net: dsa: sja1105: Fix status initialization in sja1105_get_ethtool_stats vrf: sit mtu should not be updated when vrf netdev is the link net: dsa: Fix error cleanup path in dsa_init_module l2tp: Fix possible NULL pointer dereference taprio: add null check on sched_nest to avoid potential null pointer dereference net: mvpp2: cls: fix less than zero check on a u32 variable net_sched: sch_fq: handle non connected flows net_sched: sch_fq: do not assume EDT packets are ordered net: hns3: use devm_kcalloc when allocating desc_cb net: hns3: some cleanup for struct hns3_enet_ring ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/iphase.c4
-rw-r--r--drivers/block/drbd/drbd_nl.c8
-rw-r--r--drivers/block/drbd/drbd_nla.c3
-rw-r--r--drivers/block/nbd.c38
-rw-r--r--drivers/bluetooth/Kconfig15
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/btbcm.c10
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c36
-rw-r--r--drivers/bluetooth/btmtksdio.c1101
-rw-r--r--drivers/bluetooth/btmtkuart.c2
-rw-r--r--drivers/bluetooth/btqca.c7
-rw-r--r--drivers/bluetooth/btqca.h13
-rw-r--r--drivers/bluetooth/btsdio.c15
-rw-r--r--drivers/bluetooth/hci_bcm.c20
-rw-r--r--drivers/bluetooth/hci_h5.c2
-rw-r--r--drivers/bluetooth/hci_qca.c91
-rw-r--r--drivers/infiniband/core/addr.c8
-rw-r--r--drivers/infiniband/core/iwpm_util.c8
-rw-r--r--drivers/infiniband/core/nldev.c45
-rw-r--r--drivers/infiniband/core/sa_query.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/restrack.c8
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c3
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c4
-rw-r--r--drivers/infiniband/hw/mlx5/main.c24
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c3
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c2
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c6
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c9
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c6
-rw-r--r--drivers/isdn/hisax/config.c6
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c9
-rw-r--r--drivers/media/rc/bpf-lirc.c6
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/appletalk/ipddp.c6
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/bonding/bond_netlink.c8
-rw-r--r--drivers/net/dsa/Kconfig4
-rw-r--r--drivers/net/dsa/Makefile1
-rw-r--r--drivers/net/dsa/b53/b53_common.c34
-rw-r--r--drivers/net/dsa/b53/b53_priv.h1
-rw-r--r--drivers/net/dsa/bcm_sf2.c9
-rw-r--r--drivers/net/dsa/lantiq_gswip.c812
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c1
-rw-r--r--drivers/net/dsa/mt7530.c20
-rw-r--r--drivers/net/dsa/mt7530.h1
-rw-r--r--drivers/net/dsa/mv88e6060.c217
-rw-r--r--drivers/net/dsa/mv88e6060.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c287
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h11
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/smi.c158
-rw-r--r--drivers/net/dsa/mv88e6xxx/smi.h59
-rw-r--r--drivers/net/dsa/sja1105/Kconfig17
-rw-r--r--drivers/net/dsa/sja1105/Makefile9
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h159
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c601
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c532
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.h43
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ethtool.c419
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c1675
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c590
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c987
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.h253
-rw-r--r--drivers/net/dummy.c15
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c2
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c2
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c11
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c35
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c6
-rw-r--r--drivers/net/ethernet/aquantia/Kconfig3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c125
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h15
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c12
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c41
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c121
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c48
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c188
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h34
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c12
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c29
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c56
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h37
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h16
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c13
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c36
-rw-r--r--drivers/net/ethernet/arc/emac_main.c2
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c10
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c71
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h10
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c52
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c271
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c46
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h263
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c25
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c13
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c5
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c85
-rw-r--r--drivers/net/ethernet/cavium/Kconfig1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c41
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c107
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c10
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/ethoc.c2
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c88
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h25
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c97
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c40
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h49
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c953
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h40
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c207
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h36
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c110
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c1017
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c910
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h71
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c73
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c52
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c148
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c30
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c108
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h20
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c355
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ddp.c481
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c82
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c413
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c58
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c171
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h110
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h192
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c335
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c1392
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h179
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c551
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h61
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c366
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h56
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c768
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c824
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c273
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c524
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h48
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c706
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h102
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c720
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h28
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c9
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h68
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h17
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c839
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c451
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c3
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c17
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h52
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c704
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h138
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c253
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c28
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c104
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c280
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c130
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c403
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c224
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c257
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c474
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c182
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h92
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c158
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c388
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchib.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c29
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c2
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c541
-rw-r--r--drivers/net/ethernet/netronome/Kconfig1
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile4
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/ctrl.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c236
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/fw.h33
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h17
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm.c220
-rw-r--r--drivers/net/ethernet/netronome/nfp/ccm.h81
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c203
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c58
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h22
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c155
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h103
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c117
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c618
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c366
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c29
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h23
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c133
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h10
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c131
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c29
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c62
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h8
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c2
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c6
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c24
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c9
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c835
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c20
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c9
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c10
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c4
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c1
-rw-r--r--drivers/net/ethernet/sfc/tx.c12
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c46
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c3
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c3
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c2
-rw-r--r--drivers/net/ethernet/ti/Kconfig19
-rw-r--r--drivers/net/ethernet/ti/Makefile9
-rw-r--r--drivers/net/ethernet/ti/cpmac.c14
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c12
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c9
-rw-r--r--drivers/net/ethernet/ti/cpsw.c1542
-rw-r--r--drivers/net/ethernet/ti/cpsw.h9
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c55
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h12
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c719
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c132
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h429
-rw-r--r--drivers/net/ethernet/ti/cpsw_sl.c328
-rw-r--r--drivers/net/ethernet/ti/cpsw_sl.h73
-rw-r--r--drivers/net/ethernet/ti/cpts.c14
-rw-r--r--drivers/net/ethernet/ti/cpts.h14
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c37
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h13
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c32
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c45
-rw-r--r--drivers/net/ethernet/ti/netcp.h10
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c12
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c10
-rw-r--r--drivers/net/ethernet/ti/netcp_sgmii.c9
-rw-r--r--drivers/net/ethernet/ti/netcp_xgbepcsr.c9
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig5
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h26
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c529
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_mdio.c53
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c42
-rw-r--r--drivers/net/geneve.c3
-rw-r--r--drivers/net/gtp.c7
-rw-r--r--drivers/net/hippi/rrunner.c4
-rw-r--r--drivers/net/hyperv/netvsc.c17
-rw-r--r--drivers/net/hyperv/netvsc_drv.c10
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c31
-rw-r--r--drivers/net/loopback.c14
-rw-r--r--drivers/net/macsec.c78
-rw-r--r--drivers/net/macvlan.c50
-rw-r--r--drivers/net/net_failover.c8
-rw-r--r--drivers/net/netdevsim/Makefile6
-rw-r--r--drivers/net/netdevsim/bpf.c107
-rw-r--r--drivers/net/netdevsim/bus.c341
-rw-r--r--drivers/net/netdevsim/dev.c447
-rw-r--r--drivers/net/netdevsim/devlink.c295
-rw-r--r--drivers/net/netdevsim/fib.c102
-rw-r--r--drivers/net/netdevsim/ipsec.c3
-rw-r--r--drivers/net/netdevsim/netdev.c428
-rw-r--r--drivers/net/netdevsim/netdevsim.h145
-rw-r--r--drivers/net/netdevsim/sdev.c69
-rw-r--r--drivers/net/phy/Kconfig19
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/amd.c2
-rw-r--r--drivers/net/phy/aquantia_main.c526
-rw-r--r--drivers/net/phy/asix.c2
-rw-r--r--drivers/net/phy/at803x.c32
-rw-r--r--drivers/net/phy/bcm-cygnus.c149
-rw-r--r--drivers/net/phy/bcm-phy-lib.c52
-rw-r--r--drivers/net/phy/bcm-phy-lib.h20
-rw-r--r--drivers/net/phy/bcm63xx.c4
-rw-r--r--drivers/net/phy/bcm7xxx.c82
-rw-r--r--drivers/net/phy/broadcom.c34
-rw-r--r--drivers/net/phy/cicada.c4
-rw-r--r--drivers/net/phy/davicom.c8
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/phy/dp83822.c2
-rw-r--r--drivers/net/phy/dp83848.c2
-rw-r--r--drivers/net/phy/dp83867.c2
-rw-r--r--drivers/net/phy/dp83tc811.c2
-rw-r--r--drivers/net/phy/et1011c.c2
-rw-r--r--drivers/net/phy/fixed_phy.c2
-rw-r--r--drivers/net/phy/icplus.c6
-rw-r--r--drivers/net/phy/intel-xway.c20
-rw-r--r--drivers/net/phy/lxt.c8
-rw-r--r--drivers/net/phy/marvell.c142
-rw-r--r--drivers/net/phy/marvell10g.c15
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c7
-rw-r--r--drivers/net/phy/mdio-mux-meson-g12a.c380
-rw-r--r--drivers/net/phy/mdio_bus.c33
-rw-r--r--drivers/net/phy/mdio_device.c13
-rw-r--r--drivers/net/phy/meson-gxl.c19
-rw-r--r--drivers/net/phy/micrel.c72
-rw-r--r--drivers/net/phy/microchip.c2
-rw-r--r--drivers/net/phy/mscc.c479
-rw-r--r--drivers/net/phy/national.c2
-rw-r--r--drivers/net/phy/phy-c45.c37
-rw-r--r--drivers/net/phy/phy-core.c272
-rw-r--r--drivers/net/phy/phy.c47
-rw-r--r--drivers/net/phy/phy_device.c208
-rw-r--r--drivers/net/phy/qsemi.c2
-rw-r--r--drivers/net/phy/realtek.c26
-rw-r--r--drivers/net/phy/rockchip.c33
-rw-r--r--drivers/net/phy/smsc.c12
-rw-r--r--drivers/net/phy/ste10Xp.c4
-rw-r--r--drivers/net/phy/uPD60620.c2
-rw-r--r--drivers/net/phy/vitesse.c34
-rw-r--r--drivers/net/sb1000.c9
-rw-r--r--drivers/net/team/team.c34
-rw-r--r--drivers/net/tun.c23
-rw-r--r--drivers/net/usb/cdc_mbim.c1
-rw-r--r--drivers/net/usb/ipheth.c60
-rw-r--r--drivers/net/usb/qmi_wwan.c65
-rw-r--r--drivers/net/usb/r8152.c53
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/usb/smsc95xx.c4
-rw-r--r--drivers/net/veth.c14
-rw-r--r--drivers/net/virtio_net.c21
-rw-r--r--drivers/net/vrf.c12
-rw-r--r--drivers/net/vxlan.c1
-rw-r--r--drivers/net/wimax/i2400m/control.c1
-rw-r--r--drivers/net/wimax/i2400m/tx.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/testmode.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c39
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c38
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c78
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c10
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c35
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c74
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.h47
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h11
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c18
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c24
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h91
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_lp.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c36
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c26
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c58
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c68
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c44
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c5
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-debug.c8
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h136
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h181
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h67
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c605
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c148
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c148
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c91
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c232
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c94
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c10
-rw-r--r--drivers/net/wireless/intersil/p54/p54pci.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c29
-rw-r--r--drivers/net/wireless/marvell/mwifiex/Kconfig4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c10
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h69
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_rx.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c8
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c24
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig1
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile3
-rw-r--r--drivers/net/wireless/mediatek/mt76/agg-rx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c164
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c41
-rw-r--r--drivers/net/wireless/mediatek/mt76/mmio.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h119
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c35
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/core.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c39
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c54
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c116
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/regs.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Kconfig7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Makefile5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c205
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c98
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c229
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c775
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h300
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c499
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c1655
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.h520
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h195
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c150
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h203
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c49
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h44
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c286
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c185
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c266
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_regs.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c188
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c107
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c148
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c379
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/bus.h25
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c86
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c379
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h7
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c91
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h7
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/debug.c4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c16
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c32
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c23
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c31
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h87
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.c117
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.h5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800.h19
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c628
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.h3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.c124
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.h1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800pci.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800soc.c13
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c28
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c6
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mmio.h2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.h3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c22
-rw-r--r--drivers/net/wireless/ray_cs.c8
-rw-r--r--drivers/net/wireless/realtek/Kconfig1
-rw-r--r--drivers/net/wireless/realtek/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/Kconfig54
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile22
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c637
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h52
-rw-r--r--drivers/net/wireless/realtek/rtw88/efuse.c160
-rw-r--r--drivers/net/wireless/realtek/rtw88/efuse.h26
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c633
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h222
-rw-r--r--drivers/net/wireless/realtek/rtw88/hci.h211
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c965
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.h35
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c481
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c1211
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h1104
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c1211
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h237
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c1727
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h134
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.c166
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.h20
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h421
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.c391
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.h67
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c1594
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h170
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b_table.c20783
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b_table.h18
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c1890
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h186
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.c11753
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.h17
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.c151
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.h41
-rw-r--r--drivers/net/wireless/realtek/rtw88/sec.c120
-rw-r--r--drivers/net/wireless/realtek/rtw88/sec.h39
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c367
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h89
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.c72
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.h34
-rw-r--r--drivers/net/wireless/rndis_wlan.c12
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c199
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c30
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c232
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c129
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c96
-rw-r--r--drivers/net/wireless/rsi/rsi_boot_params.h63
-rw-r--r--drivers/net/wireless/rsi/rsi_hal.h44
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h21
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h26
-rw-r--r--drivers/net/wireless/rsi/rsi_sdio.h5
-rw-r--r--drivers/net/wireless/rsi/rsi_usb.h3
-rw-r--r--drivers/net/wireless/st/cw1200/main.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c15
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/vendor_cmd.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h4
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c3
-rw-r--r--drivers/net/xen-netback/common.h18
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/net/xen-netback/xenbus.c17
-rw-r--r--drivers/net/xen-netfront.c5
-rw-r--r--drivers/nfc/st95hf/core.c4
-rw-r--r--drivers/of/of_net.c54
-rw-r--r--drivers/s390/net/qeth_core.h131
-rw-r--r--drivers/s390/net/qeth_core_main.c919
-rw-r--r--drivers/s390/net/qeth_core_mpc.h2
-rw-r--r--drivers/s390/net/qeth_core_sys.c10
-rw-r--r--drivers/s390/net/qeth_ethtool.c17
-rw-r--r--drivers/s390/net/qeth_l2_main.c99
-rw-r--r--drivers/s390/net/qeth_l3_main.c263
-rw-r--r--drivers/s390/net/qeth_l3_sys.c26
-rw-r--r--drivers/ssb/bridge_pcmcia_80211.c9
-rw-r--r--drivers/staging/octeon/ethernet.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c3
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c3
-rw-r--r--drivers/target/target_core_user.c9
796 files changed, 93388 insertions, 14378 deletions
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 82532c299bb5..5278c57dce73 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -2826,8 +2826,8 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
case 0x6:
{
ia_cmds.status = 0;
- printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
- printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
+ printk("skb = 0x%p\n", skb_peek(&iadev->tx_backlog));
+ printk("rtn_q: 0x%p\n",ia_deque_rtn_q(&iadev->tx_return_q));
}
break;
case 0x8:
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index f2471172a961..1cb5a0b85fd9 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -114,7 +114,7 @@ static int drbd_msg_put_info(struct sk_buff *skb, const char *info)
if (!info || !info[0])
return 0;
- nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
+ nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
if (!nla)
return err;
@@ -135,7 +135,7 @@ static int drbd_msg_sprintf_info(struct sk_buff *skb, const char *fmt, ...)
int err = -EMSGSIZE;
int len;
- nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
+ nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
if (!nla)
return err;
@@ -3269,7 +3269,7 @@ static int nla_put_drbd_cfg_context(struct sk_buff *skb,
struct drbd_device *device)
{
struct nlattr *nla;
- nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
+ nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_CONTEXT);
if (!nla)
goto nla_put_failure;
if (device &&
@@ -3837,7 +3837,7 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
if (err)
goto nla_put_failure;
- nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
+ nla = nla_nest_start_noflag(skb, DRBD_NLA_STATE_INFO);
if (!nla)
goto nla_put_failure;
if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
diff --git a/drivers/block/drbd/drbd_nla.c b/drivers/block/drbd/drbd_nla.c
index 8e261cb5198b..6a09b0b98018 100644
--- a/drivers/block/drbd/drbd_nla.c
+++ b/drivers/block/drbd/drbd_nla.c
@@ -35,7 +35,8 @@ int drbd_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla,
err = drbd_nla_check_mandatory(maxtype, nla);
if (!err)
- err = nla_parse_nested(tb, maxtype, nla, policy, NULL);
+ err = nla_parse_nested_deprecated(tb, maxtype, nla, policy,
+ NULL);
return err;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 90ba9f4c03f3..053958a8a2ba 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -44,6 +44,9 @@
#include <linux/nbd-netlink.h>
#include <net/genetlink.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/nbd.h>
+
static DEFINE_IDR(nbd_index_idr);
static DEFINE_MUTEX(nbd_index_mutex);
static int nbd_total_devices = 0;
@@ -510,6 +513,10 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
if (sent) {
if (sent >= sizeof(request)) {
skip = sent - sizeof(request);
+
+ /* initialize handle for tracing purposes */
+ handle = nbd_cmd_handle(cmd);
+
goto send_pages;
}
iov_iter_advance(&from, sent);
@@ -526,11 +533,14 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
handle = nbd_cmd_handle(cmd);
memcpy(request.handle, &handle, sizeof(handle));
+ trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
+
dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
req, nbdcmd_to_ascii(type),
(unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
result = sock_xmit(nbd, index, 1, &from,
(type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
+ trace_nbd_header_sent(req, handle);
if (result <= 0) {
if (was_interrupted(result)) {
/* If we havne't sent anything we can just return BUSY,
@@ -603,6 +613,7 @@ send_pages:
bio = next;
}
out:
+ trace_nbd_payload_sent(req, handle);
nsock->pending = NULL;
nsock->sent = 0;
return 0;
@@ -650,6 +661,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
tag, req);
return ERR_PTR(-ENOENT);
}
+ trace_nbd_header_received(req, handle);
cmd = blk_mq_rq_to_pdu(req);
mutex_lock(&cmd->lock);
@@ -703,6 +715,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
}
}
out:
+ trace_nbd_payload_received(req, handle);
mutex_unlock(&cmd->lock);
return ret ? ERR_PTR(ret) : cmd;
}
@@ -1797,8 +1810,10 @@ again:
ret = -EINVAL;
goto out;
}
- ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr,
- nbd_sock_policy, info->extack);
+ ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
+ attr,
+ nbd_sock_policy,
+ info->extack);
if (ret != 0) {
printk(KERN_ERR "nbd: error processing sock list\n");
ret = -EINVAL;
@@ -1968,8 +1983,10 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
ret = -EINVAL;
goto out;
}
- ret = nla_parse_nested(socks, NBD_SOCK_MAX, attr,
- nbd_sock_policy, info->extack);
+ ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
+ attr,
+ nbd_sock_policy,
+ info->extack);
if (ret != 0) {
printk(KERN_ERR "nbd: error processing sock list\n");
ret = -EINVAL;
@@ -1999,22 +2016,22 @@ out:
static const struct genl_ops nbd_connect_genl_ops[] = {
{
.cmd = NBD_CMD_CONNECT,
- .policy = nbd_attr_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nbd_genl_connect,
},
{
.cmd = NBD_CMD_DISCONNECT,
- .policy = nbd_attr_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nbd_genl_disconnect,
},
{
.cmd = NBD_CMD_RECONFIGURE,
- .policy = nbd_attr_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nbd_genl_reconfigure,
},
{
.cmd = NBD_CMD_STATUS,
- .policy = nbd_attr_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = nbd_genl_status,
},
};
@@ -2031,6 +2048,7 @@ static struct genl_family nbd_genl_family __ro_after_init = {
.ops = nbd_connect_genl_ops,
.n_ops = ARRAY_SIZE(nbd_connect_genl_ops),
.maxattr = NBD_ATTR_MAX,
+ .policy = nbd_attr_policy,
.mcgrps = nbd_mcast_grps,
.n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
};
@@ -2050,7 +2068,7 @@ static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
*/
if (refcount_read(&nbd->config_refs))
connected = 1;
- dev_opt = nla_nest_start(reply, NBD_DEVICE_ITEM);
+ dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM);
if (!dev_opt)
return -EMSGSIZE;
ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
@@ -2098,7 +2116,7 @@ static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- dev_list = nla_nest_start(reply, NBD_ATTR_DEVICE_LIST);
+ dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
if (index == -1) {
ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
if (ret) {
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 7b2e76e7f22f..b9c34ff9a0d3 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -336,7 +336,7 @@ config BT_MRVL
The core driver to support Marvell Bluetooth devices.
This driver is required if you want to support
- Marvell Bluetooth devices, such as 8688/8787/8797/8887/8897/8977/8997.
+ Marvell Bluetooth devices, such as 8688/8787/8797/8887/8897/8977/8987/8997.
Say Y here to compile Marvell Bluetooth driver
into the kernel or say M to compile it as module.
@@ -350,7 +350,7 @@ config BT_MRVL_SDIO
The driver for Marvell Bluetooth chipsets with SDIO interface.
This driver is required if you want to use Marvell Bluetooth
- devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8887/SD8897/SD8977/SD8997
+ devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8887/SD8897/SD8977/SD8987/SD8997
chipsets are supported.
Say Y here to compile support for Marvell BT-over-SDIO driver
@@ -379,6 +379,17 @@ config BT_WILINK
Say Y here to compile support for Texas Instrument's WiLink7 driver
into the kernel or say M to compile it as module (btwilink).
+config BT_MTKSDIO
+ tristate "MediaTek HCI SDIO driver"
+ depends on MMC
+ help
+ MediaTek Bluetooth HCI SDIO driver.
+ This driver is required if you want to use MediaTek Bluetooth
+ with SDIO interface.
+
+ Say Y here to compile support for MediaTek Bluetooth SDIO devices
+ into the kernel or say M to compile it as module (btmtksdio).
+
config BT_MTKUART
tristate "MediaTek HCI UART driver"
depends on SERIAL_DEV_BUS
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index b7e393cfc1e3..34887b9b3a85 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_BT_ATH3K) += ath3k.o
obj-$(CONFIG_BT_MRVL) += btmrvl.o
obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o
obj-$(CONFIG_BT_WILINK) += btwilink.o
+obj-$(CONFIG_BT_MTKSDIO) += btmtksdio.o
obj-$(CONFIG_BT_MTKUART) += btmtkuart.o
obj-$(CONFIG_BT_QCOMSMD) += btqcomsmd.o
obj-$(CONFIG_BT_BCM) += btbcm.o
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index d5d6e6e5da3b..8e17963ab65a 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -34,9 +34,11 @@
#define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}})
#define BDADDR_BCM20702A1 (&(bdaddr_t) {{0x00, 0x00, 0xa0, 0x02, 0x70, 0x20}})
+#define BDADDR_BCM2076B1 (&(bdaddr_t) {{0x79, 0x56, 0x00, 0xa0, 0x76, 0x20}})
#define BDADDR_BCM43430A0 (&(bdaddr_t) {{0xac, 0x1f, 0x12, 0xa0, 0x43, 0x43}})
#define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}})
#define BDADDR_BCM4330B1 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb1, 0x30, 0x43}})
+#define BDADDR_BCM43341B (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0x1b, 0x34, 0x43}})
int btbcm_check_bdaddr(struct hci_dev *hdev)
{
@@ -69,6 +71,9 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
* The address 20:70:02:A0:00:00 indicates a BCM20702A1 controller
* with no configured address.
*
+ * The address 20:76:A0:00:56:79 indicates a BCM2076B1 controller
+ * with no configured address.
+ *
* The address 43:24:B3:00:00:00 indicates a BCM4324B3 controller
* with waiting for configuration state.
*
@@ -80,9 +85,11 @@ int btbcm_check_bdaddr(struct hci_dev *hdev)
*/
if (!bacmp(&bda->bdaddr, BDADDR_BCM20702A0) ||
!bacmp(&bda->bdaddr, BDADDR_BCM20702A1) ||
+ !bacmp(&bda->bdaddr, BDADDR_BCM2076B1) ||
!bacmp(&bda->bdaddr, BDADDR_BCM4324B3) ||
!bacmp(&bda->bdaddr, BDADDR_BCM4330B1) ||
- !bacmp(&bda->bdaddr, BDADDR_BCM43430A0)) {
+ !bacmp(&bda->bdaddr, BDADDR_BCM43430A0) ||
+ !bacmp(&bda->bdaddr, BDADDR_BCM43341B)) {
bt_dev_info(hdev, "BCM: Using default device address (%pMR)",
&bda->bdaddr);
set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks);
@@ -333,6 +340,7 @@ struct bcm_subver_table {
static const struct bcm_subver_table bcm_uart_subver_table[] = {
{ 0x4103, "BCM4330B1" }, /* 002.001.003 */
{ 0x410e, "BCM43341B0" }, /* 002.001.014 */
+ { 0x4204, "BCM2076B1" }, /* 002.002.004 */
{ 0x4406, "BCM4324B3" }, /* 002.004.006 */
{ 0x6109, "BCM4335C0" }, /* 003.001.009 */
{ 0x610c, "BCM4354" }, /* 003.001.012 */
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 047b75ce1deb..0f3a020703ab 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -235,6 +235,29 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8977 = {
.fw_dump_end = 0xf8,
};
+static const struct btmrvl_sdio_card_reg btmrvl_reg_8987 = {
+ .cfg = 0x00,
+ .host_int_mask = 0x08,
+ .host_intstatus = 0x0c,
+ .card_status = 0x5c,
+ .sq_read_base_addr_a0 = 0xf8,
+ .sq_read_base_addr_a1 = 0xf9,
+ .card_revision = 0xc8,
+ .card_fw_status0 = 0xe8,
+ .card_fw_status1 = 0xe9,
+ .card_rx_len = 0xea,
+ .card_rx_unit = 0xeb,
+ .io_port_0 = 0xe4,
+ .io_port_1 = 0xe5,
+ .io_port_2 = 0xe6,
+ .int_read_to_clear = true,
+ .host_int_rsr = 0x04,
+ .card_misc_cfg = 0xd8,
+ .fw_dump_ctrl = 0xf0,
+ .fw_dump_start = 0xf1,
+ .fw_dump_end = 0xf8,
+};
+
static const struct btmrvl_sdio_card_reg btmrvl_reg_8997 = {
.cfg = 0x00,
.host_int_mask = 0x08,
@@ -312,6 +335,15 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8977 = {
.supports_fw_dump = true,
};
+static const struct btmrvl_sdio_device btmrvl_sdio_sd8987 = {
+ .helper = NULL,
+ .firmware = "mrvl/sd8987_uapsta.bin",
+ .reg = &btmrvl_reg_8987,
+ .support_pscan_win_report = true,
+ .sd_blksz_fw_dl = 256,
+ .supports_fw_dump = true,
+};
+
static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = {
.helper = NULL,
.firmware = "mrvl/sd8997_uapsta.bin",
@@ -343,6 +375,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8977 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9146),
.driver_data = (unsigned long)&btmrvl_sdio_sd8977 },
+ /* Marvell SD8987 Bluetooth device */
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x914A),
+ .driver_data = (unsigned long)&btmrvl_sdio_sd8987 },
/* Marvell SD8997 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9142),
.driver_data = (unsigned long)&btmrvl_sdio_sd8997 },
@@ -1797,4 +1832,5 @@ MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8977_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8987_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8997_uapsta.bin");
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
new file mode 100644
index 000000000000..813338288453
--- /dev/null
+++ b/drivers/bluetooth/btmtksdio.c
@@ -0,0 +1,1101 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 MediaTek Inc.
+
+/*
+ * Bluetooth support for MediaTek SDIO devices
+ *
+ * This file is written based on btsdio.c and btmtkuart.c.
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <asm/unaligned.h>
+#include <linux/atomic.h>
+#include <linux/firmware.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/skbuff.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio_func.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "h4_recv.h"
+
+#define VERSION "0.1"
+
+#define FIRMWARE_MT7663 "mediatek/mt7663pr2h.bin"
+#define FIRMWARE_MT7668 "mediatek/mt7668pr2h.bin"
+
+#define MTKBTSDIO_AUTOSUSPEND_DELAY 8000
+
+static bool enable_autosuspend;
+
+struct btmtksdio_data {
+ const char *fwname;
+};
+
+static const struct btmtksdio_data mt7663_data = {
+ .fwname = FIRMWARE_MT7663,
+};
+
+static const struct btmtksdio_data mt7668_data = {
+ .fwname = FIRMWARE_MT7668,
+};
+
+static const struct sdio_device_id btmtksdio_table[] = {
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7663),
+ .driver_data = (kernel_ulong_t)&mt7663_data },
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7668),
+ .driver_data = (kernel_ulong_t)&mt7668_data },
+ { } /* Terminating entry */
+};
+
+#define MTK_REG_CHLPCR 0x4 /* W1S */
+#define C_INT_EN_SET BIT(0)
+#define C_INT_EN_CLR BIT(1)
+#define C_FW_OWN_REQ_SET BIT(8) /* For write */
+#define C_COM_DRV_OWN BIT(8) /* For read */
+#define C_FW_OWN_REQ_CLR BIT(9)
+
+#define MTK_REG_CSDIOCSR 0x8
+#define SDIO_RE_INIT_EN BIT(0)
+#define SDIO_INT_CTL BIT(2)
+
+#define MTK_REG_CHCR 0xc
+#define C_INT_CLR_CTRL BIT(1)
+
+/* CHISR have the same bits field definition with CHIER */
+#define MTK_REG_CHISR 0x10
+#define MTK_REG_CHIER 0x14
+#define FW_OWN_BACK_INT BIT(0)
+#define RX_DONE_INT BIT(1)
+#define TX_EMPTY BIT(2)
+#define TX_FIFO_OVERFLOW BIT(8)
+#define RX_PKT_LEN GENMASK(31, 16)
+
+#define MTK_REG_CTDR 0x18
+
+#define MTK_REG_CRDR 0x1c
+
+#define MTK_SDIO_BLOCK_SIZE 256
+
+#define BTMTKSDIO_TX_WAIT_VND_EVT 1
+
+enum {
+ MTK_WMT_PATCH_DWNLD = 0x1,
+ MTK_WMT_TEST = 0x2,
+ MTK_WMT_WAKEUP = 0x3,
+ MTK_WMT_HIF = 0x4,
+ MTK_WMT_FUNC_CTRL = 0x6,
+ MTK_WMT_RST = 0x7,
+ MTK_WMT_SEMAPHORE = 0x17,
+};
+
+enum {
+ BTMTK_WMT_INVALID,
+ BTMTK_WMT_PATCH_UNDONE,
+ BTMTK_WMT_PATCH_DONE,
+ BTMTK_WMT_ON_UNDONE,
+ BTMTK_WMT_ON_DONE,
+ BTMTK_WMT_ON_PROGRESS,
+};
+
+struct mtkbtsdio_hdr {
+ __le16 len;
+ __le16 reserved;
+ u8 bt_type;
+} __packed;
+
+struct mtk_wmt_hdr {
+ u8 dir;
+ u8 op;
+ __le16 dlen;
+ u8 flag;
+} __packed;
+
+struct mtk_hci_wmt_cmd {
+ struct mtk_wmt_hdr hdr;
+ u8 data[256];
+} __packed;
+
+struct btmtk_hci_wmt_evt {
+ struct hci_event_hdr hhdr;
+ struct mtk_wmt_hdr whdr;
+} __packed;
+
+struct btmtk_hci_wmt_evt_funcc {
+ struct btmtk_hci_wmt_evt hwhdr;
+ __be16 status;
+} __packed;
+
+struct btmtk_tci_sleep {
+ u8 mode;
+ __le16 duration;
+ __le16 host_duration;
+ u8 host_wakeup_pin;
+ u8 time_compensation;
+} __packed;
+
+struct btmtk_hci_wmt_params {
+ u8 op;
+ u8 flag;
+ u16 dlen;
+ const void *data;
+ u32 *status;
+};
+
+struct btmtksdio_dev {
+ struct hci_dev *hdev;
+ struct sdio_func *func;
+ struct device *dev;
+
+ struct work_struct tx_work;
+ unsigned long tx_state;
+ struct sk_buff_head txq;
+
+ struct sk_buff *evt_skb;
+
+ const struct btmtksdio_data *data;
+};
+
+static int mtk_hci_wmt_sync(struct hci_dev *hdev,
+ struct btmtk_hci_wmt_params *wmt_params)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc;
+ u32 hlen, status = BTMTK_WMT_INVALID;
+ struct btmtk_hci_wmt_evt *wmt_evt;
+ struct mtk_hci_wmt_cmd wc;
+ struct mtk_wmt_hdr *hdr;
+ int err;
+
+ hlen = sizeof(*hdr) + wmt_params->dlen;
+ if (hlen > 255)
+ return -EINVAL;
+
+ hdr = (struct mtk_wmt_hdr *)&wc;
+ hdr->dir = 1;
+ hdr->op = wmt_params->op;
+ hdr->dlen = cpu_to_le16(wmt_params->dlen + 1);
+ hdr->flag = wmt_params->flag;
+ memcpy(wc.data, wmt_params->data, wmt_params->dlen);
+
+ set_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
+
+ err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
+ if (err < 0) {
+ clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
+ return err;
+ }
+
+ /* The vendor specific WMT commands are all answered by a vendor
+ * specific event and will not have the Command Status or Command
+ * Complete as with usual HCI command flow control.
+ *
+ * After sending the command, wait for BTMTKSDIO_TX_WAIT_VND_EVT
+ * state to be cleared. The driver specific event receive routine
+ * will clear that state and with that indicate completion of the
+ * WMT command.
+ */
+ err = wait_on_bit_timeout(&bdev->tx_state, BTMTKSDIO_TX_WAIT_VND_EVT,
+ TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
+ if (err == -EINTR) {
+ bt_dev_err(hdev, "Execution of wmt command interrupted");
+ clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
+ return err;
+ }
+
+ if (err) {
+ bt_dev_err(hdev, "Execution of wmt command timed out");
+ clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state);
+ return -ETIMEDOUT;
+ }
+
+ /* Parse and handle the return WMT event */
+ wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data;
+ if (wmt_evt->whdr.op != hdr->op) {
+ bt_dev_err(hdev, "Wrong op received %d expected %d",
+ wmt_evt->whdr.op, hdr->op);
+ err = -EIO;
+ goto err_free_skb;
+ }
+
+ switch (wmt_evt->whdr.op) {
+ case MTK_WMT_SEMAPHORE:
+ if (wmt_evt->whdr.flag == 2)
+ status = BTMTK_WMT_PATCH_UNDONE;
+ else
+ status = BTMTK_WMT_PATCH_DONE;
+ break;
+ case MTK_WMT_FUNC_CTRL:
+ wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt;
+ if (be16_to_cpu(wmt_evt_funcc->status) == 0x404)
+ status = BTMTK_WMT_ON_DONE;
+ else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420)
+ status = BTMTK_WMT_ON_PROGRESS;
+ else
+ status = BTMTK_WMT_ON_UNDONE;
+ break;
+ }
+
+ if (wmt_params->status)
+ *wmt_params->status = status;
+
+err_free_skb:
+ kfree_skb(bdev->evt_skb);
+ bdev->evt_skb = NULL;
+
+ return err;
+}
+
+static int btmtksdio_tx_packet(struct btmtksdio_dev *bdev,
+ struct sk_buff *skb)
+{
+ struct mtkbtsdio_hdr *sdio_hdr;
+ int err;
+
+ /* Make sure that there are enough rooms for SDIO header */
+ if (unlikely(skb_headroom(skb) < sizeof(*sdio_hdr))) {
+ err = pskb_expand_head(skb, sizeof(*sdio_hdr), 0,
+ GFP_ATOMIC);
+ if (err < 0)
+ return err;
+ }
+
+ /* Prepend MediaTek SDIO Specific Header */
+ skb_push(skb, sizeof(*sdio_hdr));
+
+ sdio_hdr = (void *)skb->data;
+ sdio_hdr->len = cpu_to_le16(skb->len);
+ sdio_hdr->reserved = cpu_to_le16(0);
+ sdio_hdr->bt_type = hci_skb_pkt_type(skb);
+
+ err = sdio_writesb(bdev->func, MTK_REG_CTDR, skb->data,
+ round_up(skb->len, MTK_SDIO_BLOCK_SIZE));
+ if (err < 0)
+ goto err_skb_pull;
+
+ bdev->hdev->stat.byte_tx += skb->len;
+
+ kfree_skb(skb);
+
+ return 0;
+
+err_skb_pull:
+ skb_pull(skb, sizeof(*sdio_hdr));
+
+ return err;
+}
+
+static u32 btmtksdio_drv_own_query(struct btmtksdio_dev *bdev)
+{
+ return sdio_readl(bdev->func, MTK_REG_CHLPCR, NULL);
+}
+
+static void btmtksdio_tx_work(struct work_struct *work)
+{
+ struct btmtksdio_dev *bdev = container_of(work, struct btmtksdio_dev,
+ tx_work);
+ struct sk_buff *skb;
+ int err;
+
+ pm_runtime_get_sync(bdev->dev);
+
+ sdio_claim_host(bdev->func);
+
+ while ((skb = skb_dequeue(&bdev->txq))) {
+ err = btmtksdio_tx_packet(bdev, skb);
+ if (err < 0) {
+ bdev->hdev->stat.err_tx++;
+ skb_queue_head(&bdev->txq, skb);
+ break;
+ }
+ }
+
+ sdio_release_host(bdev->func);
+
+ pm_runtime_mark_last_busy(bdev->dev);
+ pm_runtime_put_autosuspend(bdev->dev);
+}
+
+static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ struct hci_event_hdr *hdr = (void *)skb->data;
+ int err;
+
+ /* Fix up the vendor event id with 0xff for vendor specific instead
+ * of 0xe4 so that event send via monitoring socket can be parsed
+ * properly.
+ */
+ if (hdr->evt == 0xe4)
+ hdr->evt = HCI_EV_VENDOR;
+
+ /* When someone waits for the WMT event, the skb is being cloned
+ * and being processed the events from there then.
+ */
+ if (test_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state)) {
+ bdev->evt_skb = skb_clone(skb, GFP_KERNEL);
+ if (!bdev->evt_skb) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ }
+
+ err = hci_recv_frame(hdev, skb);
+ if (err < 0)
+ goto err_free_skb;
+
+ if (hdr->evt == HCI_EV_VENDOR) {
+ if (test_and_clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT,
+ &bdev->tx_state)) {
+ /* Barrier to sync with other CPUs */
+ smp_mb__after_atomic();
+ wake_up_bit(&bdev->tx_state, BTMTKSDIO_TX_WAIT_VND_EVT);
+ }
+ }
+
+ return 0;
+
+err_free_skb:
+ kfree_skb(bdev->evt_skb);
+ bdev->evt_skb = NULL;
+
+err_out:
+ return err;
+}
+
+static const struct h4_recv_pkt mtk_recv_pkts[] = {
+ { H4_RECV_ACL, .recv = hci_recv_frame },
+ { H4_RECV_SCO, .recv = hci_recv_frame },
+ { H4_RECV_EVENT, .recv = btmtksdio_recv_event },
+};
+
+static int btmtksdio_rx_packet(struct btmtksdio_dev *bdev, u16 rx_size)
+{
+ const struct h4_recv_pkt *pkts = mtk_recv_pkts;
+ int pkts_count = ARRAY_SIZE(mtk_recv_pkts);
+ struct mtkbtsdio_hdr *sdio_hdr;
+ int err, i, pad_size;
+ struct sk_buff *skb;
+ u16 dlen;
+
+ if (rx_size < sizeof(*sdio_hdr))
+ return -EILSEQ;
+
+ /* A SDIO packet is exactly containing a Bluetooth packet */
+ skb = bt_skb_alloc(rx_size, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, rx_size);
+
+ err = sdio_readsb(bdev->func, skb->data, MTK_REG_CRDR, rx_size);
+ if (err < 0)
+ goto err_kfree_skb;
+
+ sdio_hdr = (void *)skb->data;
+
+ /* We assume the default error as -EILSEQ simply to make the error path
+ * be cleaner.
+ */
+ err = -EILSEQ;
+
+ if (rx_size != le16_to_cpu(sdio_hdr->len)) {
+ bt_dev_err(bdev->hdev, "Rx size in sdio header is mismatched ");
+ goto err_kfree_skb;
+ }
+
+ hci_skb_pkt_type(skb) = sdio_hdr->bt_type;
+
+ /* Remove MediaTek SDIO header */
+ skb_pull(skb, sizeof(*sdio_hdr));
+
+ /* We have to dig into the packet to get payload size and then know how
+ * many padding bytes at the tail, these padding bytes should be removed
+ * before the packet is indicated to the core layer.
+ */
+ for (i = 0; i < pkts_count; i++) {
+ if (sdio_hdr->bt_type == (&pkts[i])->type)
+ break;
+ }
+
+ if (i >= pkts_count) {
+ bt_dev_err(bdev->hdev, "Invalid bt type 0x%02x",
+ sdio_hdr->bt_type);
+ goto err_kfree_skb;
+ }
+
+ /* Remaining bytes cannot hold a header*/
+ if (skb->len < (&pkts[i])->hlen) {
+ bt_dev_err(bdev->hdev, "The size of bt header is mismatched");
+ goto err_kfree_skb;
+ }
+
+ switch ((&pkts[i])->lsize) {
+ case 1:
+ dlen = skb->data[(&pkts[i])->loff];
+ break;
+ case 2:
+ dlen = get_unaligned_le16(skb->data +
+ (&pkts[i])->loff);
+ break;
+ default:
+ goto err_kfree_skb;
+ }
+
+ pad_size = skb->len - (&pkts[i])->hlen - dlen;
+
+ /* Remaining bytes cannot hold a payload */
+ if (pad_size < 0) {
+ bt_dev_err(bdev->hdev, "The size of bt payload is mismatched");
+ goto err_kfree_skb;
+ }
+
+ /* Remove padding bytes */
+ skb_trim(skb, skb->len - pad_size);
+
+ /* Complete frame */
+ (&pkts[i])->recv(bdev->hdev, skb);
+
+ bdev->hdev->stat.byte_rx += rx_size;
+
+ return 0;
+
+err_kfree_skb:
+ kfree_skb(skb);
+
+ return err;
+}
+
+static void btmtksdio_interrupt(struct sdio_func *func)
+{
+ struct btmtksdio_dev *bdev = sdio_get_drvdata(func);
+ u32 int_status;
+ u16 rx_size;
+
+ /* It is required that the host gets ownership from the device before
+ * accessing any register, however, if SDIO host is not being released,
+ * a potential deadlock probably happens in a circular wait between SDIO
+ * IRQ work and PM runtime work. So, we have to explicitly release SDIO
+ * host here and claim again after the PM runtime work is all done.
+ */
+ sdio_release_host(bdev->func);
+
+ pm_runtime_get_sync(bdev->dev);
+
+ sdio_claim_host(bdev->func);
+
+ /* Disable interrupt */
+ sdio_writel(func, C_INT_EN_CLR, MTK_REG_CHLPCR, 0);
+
+ int_status = sdio_readl(func, MTK_REG_CHISR, NULL);
+
+ /* Ack an interrupt as soon as possible before any operation on
+ * hardware.
+ *
+ * Note that we don't ack any status during operations to avoid race
+ * condition between the host and the device such as it's possible to
+ * mistakenly ack RX_DONE for the next packet and then cause interrupts
+ * not be raised again but there is still pending data in the hardware
+ * FIFO.
+ */
+ sdio_writel(func, int_status, MTK_REG_CHISR, NULL);
+
+ if (unlikely(!int_status))
+ bt_dev_err(bdev->hdev, "CHISR is 0");
+
+ if (int_status & FW_OWN_BACK_INT)
+ bt_dev_dbg(bdev->hdev, "Get fw own back");
+
+ if (int_status & TX_EMPTY)
+ schedule_work(&bdev->tx_work);
+ else if (unlikely(int_status & TX_FIFO_OVERFLOW))
+ bt_dev_warn(bdev->hdev, "Tx fifo overflow");
+
+ if (int_status & RX_DONE_INT) {
+ rx_size = (int_status & RX_PKT_LEN) >> 16;
+
+ if (btmtksdio_rx_packet(bdev, rx_size) < 0)
+ bdev->hdev->stat.err_rx++;
+ }
+
+ /* Enable interrupt */
+ sdio_writel(func, C_INT_EN_SET, MTK_REG_CHLPCR, 0);
+
+ pm_runtime_mark_last_busy(bdev->dev);
+ pm_runtime_put_autosuspend(bdev->dev);
+}
+
+static int btmtksdio_open(struct hci_dev *hdev)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ int err;
+ u32 status;
+
+ sdio_claim_host(bdev->func);
+
+ err = sdio_enable_func(bdev->func);
+ if (err < 0)
+ goto err_release_host;
+
+ /* Get ownership from the device */
+ sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
+ if (err < 0)
+ goto err_disable_func;
+
+ err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
+ status & C_COM_DRV_OWN, 2000, 1000000);
+ if (err < 0) {
+ bt_dev_err(bdev->hdev, "Cannot get ownership from device");
+ goto err_disable_func;
+ }
+
+ /* Disable interrupt & mask out all interrupt sources */
+ sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, &err);
+ if (err < 0)
+ goto err_disable_func;
+
+ sdio_writel(bdev->func, 0, MTK_REG_CHIER, &err);
+ if (err < 0)
+ goto err_disable_func;
+
+ err = sdio_claim_irq(bdev->func, btmtksdio_interrupt);
+ if (err < 0)
+ goto err_disable_func;
+
+ err = sdio_set_block_size(bdev->func, MTK_SDIO_BLOCK_SIZE);
+ if (err < 0)
+ goto err_release_irq;
+
+ /* SDIO CMD 5 allows the SDIO device back to idle state an
+ * synchronous interrupt is supported in SDIO 4-bit mode
+ */
+ sdio_writel(bdev->func, SDIO_INT_CTL | SDIO_RE_INIT_EN,
+ MTK_REG_CSDIOCSR, &err);
+ if (err < 0)
+ goto err_release_irq;
+
+ /* Setup write-1-clear for CHISR register */
+ sdio_writel(bdev->func, C_INT_CLR_CTRL, MTK_REG_CHCR, &err);
+ if (err < 0)
+ goto err_release_irq;
+
+ /* Setup interrupt sources */
+ sdio_writel(bdev->func, RX_DONE_INT | TX_EMPTY | TX_FIFO_OVERFLOW,
+ MTK_REG_CHIER, &err);
+ if (err < 0)
+ goto err_release_irq;
+
+ /* Enable interrupt */
+ sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, &err);
+ if (err < 0)
+ goto err_release_irq;
+
+ sdio_release_host(bdev->func);
+
+ return 0;
+
+err_release_irq:
+ sdio_release_irq(bdev->func);
+
+err_disable_func:
+ sdio_disable_func(bdev->func);
+
+err_release_host:
+ sdio_release_host(bdev->func);
+
+ return err;
+}
+
+static int btmtksdio_close(struct hci_dev *hdev)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ u32 status;
+ int err;
+
+ sdio_claim_host(bdev->func);
+
+ /* Disable interrupt */
+ sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
+
+ sdio_release_irq(bdev->func);
+
+ /* Return ownership to the device */
+ sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, NULL);
+
+ err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
+ !(status & C_COM_DRV_OWN), 2000, 1000000);
+ if (err < 0)
+ bt_dev_err(bdev->hdev, "Cannot return ownership to device");
+
+ sdio_disable_func(bdev->func);
+
+ sdio_release_host(bdev->func);
+
+ return 0;
+}
+
+static int btmtksdio_flush(struct hci_dev *hdev)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+
+ skb_queue_purge(&bdev->txq);
+
+ cancel_work_sync(&bdev->tx_work);
+
+ return 0;
+}
+
+static int btmtksdio_func_query(struct hci_dev *hdev)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ int status, err;
+ u8 param = 0;
+
+ /* Query whether the function is enabled */
+ wmt_params.op = MTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 4;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = &status;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to query function status (%d)", err);
+ return err;
+ }
+
+ return status;
+}
+
+static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
+{
+ struct btmtk_hci_wmt_params wmt_params;
+ const struct firmware *fw;
+ const u8 *fw_ptr;
+ size_t fw_size;
+ int err, dlen;
+ u8 flag;
+
+ err = request_firmware(&fw, fwname, &hdev->dev);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to load firmware file (%d)", err);
+ return err;
+ }
+
+ fw_ptr = fw->data;
+ fw_size = fw->size;
+
+ /* The size of patch header is 30 bytes, should be skip */
+ if (fw_size < 30) {
+ err = -EINVAL;
+ goto free_fw;
+ }
+
+ fw_size -= 30;
+ fw_ptr += 30;
+ flag = 1;
+
+ wmt_params.op = MTK_WMT_PATCH_DWNLD;
+ wmt_params.status = NULL;
+
+ while (fw_size > 0) {
+ dlen = min_t(int, 250, fw_size);
+
+ /* Tell device the position in sequence */
+ if (fw_size - dlen <= 0)
+ flag = 3;
+ else if (fw_size < fw->size - 30)
+ flag = 2;
+
+ wmt_params.flag = flag;
+ wmt_params.dlen = dlen;
+ wmt_params.data = fw_ptr;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)",
+ err);
+ goto free_fw;
+ }
+
+ fw_size -= dlen;
+ fw_ptr += dlen;
+ }
+
+ wmt_params.op = MTK_WMT_RST;
+ wmt_params.flag = 4;
+ wmt_params.dlen = 0;
+ wmt_params.data = NULL;
+ wmt_params.status = NULL;
+
+ /* Activate funciton the firmware providing to */
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt rst (%d)", err);
+ goto free_fw;
+ }
+
+ /* Wait a few moments for firmware activation done */
+ usleep_range(10000, 12000);
+
+free_fw:
+ release_firmware(fw);
+ return err;
+}
+
+static int btmtksdio_setup(struct hci_dev *hdev)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ struct btmtk_hci_wmt_params wmt_params;
+ ktime_t calltime, delta, rettime;
+ struct btmtk_tci_sleep tci_sleep;
+ unsigned long long duration;
+ struct sk_buff *skb;
+ int err, status;
+ u8 param = 0x1;
+
+ calltime = ktime_get();
+
+ /* Query whether the firmware is already download */
+ wmt_params.op = MTK_WMT_SEMAPHORE;
+ wmt_params.flag = 1;
+ wmt_params.dlen = 0;
+ wmt_params.data = NULL;
+ wmt_params.status = &status;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to query firmware status (%d)", err);
+ return err;
+ }
+
+ if (status == BTMTK_WMT_PATCH_DONE) {
+ bt_dev_info(hdev, "Firmware already downloaded");
+ goto ignore_setup_fw;
+ }
+
+ /* Setup a firmware which the device definitely requires */
+ err = mtk_setup_firmware(hdev, bdev->data->fwname);
+ if (err < 0)
+ return err;
+
+ignore_setup_fw:
+ /* Query whether the device is already enabled */
+ err = readx_poll_timeout(btmtksdio_func_query, hdev, status,
+ status < 0 || status != BTMTK_WMT_ON_PROGRESS,
+ 2000, 5000000);
+ /* -ETIMEDOUT happens */
+ if (err < 0)
+ return err;
+
+ /* The other errors happen in btusb_mtk_func_query */
+ if (status < 0)
+ return status;
+
+ if (status == BTMTK_WMT_ON_DONE) {
+ bt_dev_info(hdev, "function already on");
+ goto ignore_func_on;
+ }
+
+ /* Enable Bluetooth protocol */
+ wmt_params.op = MTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 0;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ return err;
+ }
+
+ignore_func_on:
+ /* Apply the low power environment setup */
+ tci_sleep.mode = 0x5;
+ tci_sleep.duration = cpu_to_le16(0x640);
+ tci_sleep.host_duration = cpu_to_le16(0x640);
+ tci_sleep.host_wakeup_pin = 0;
+ tci_sleep.time_compensation = 0;
+
+ skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep,
+ HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ bt_dev_err(hdev, "Failed to apply low power setting (%d)", err);
+ return err;
+ }
+ kfree_skb(skb);
+
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long)ktime_to_ns(delta) >> 10;
+
+ pm_runtime_set_autosuspend_delay(bdev->dev,
+ MTKBTSDIO_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(bdev->dev);
+
+ err = pm_runtime_set_active(bdev->dev);
+ if (err < 0)
+ return err;
+
+ /* Default forbid runtime auto suspend, that can be allowed by
+ * enable_autosuspend flag or the PM runtime entry under sysfs.
+ */
+ pm_runtime_forbid(bdev->dev);
+ pm_runtime_enable(bdev->dev);
+
+ if (enable_autosuspend)
+ pm_runtime_allow(bdev->dev);
+
+ bt_dev_info(hdev, "Device setup in %llu usecs", duration);
+
+ return 0;
+}
+
+static int btmtksdio_shutdown(struct hci_dev *hdev)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+ struct btmtk_hci_wmt_params wmt_params;
+ u8 param = 0x0;
+ int err;
+
+ /* Get back the state to be consistent with the state
+ * in btmtksdio_setup.
+ */
+ pm_runtime_get_sync(bdev->dev);
+
+ /* Disable the device */
+ wmt_params.op = MTK_WMT_FUNC_CTRL;
+ wmt_params.flag = 0;
+ wmt_params.dlen = sizeof(param);
+ wmt_params.data = &param;
+ wmt_params.status = NULL;
+
+ err = mtk_hci_wmt_sync(hdev, &wmt_params);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err);
+ return err;
+ }
+
+ pm_runtime_put_noidle(bdev->dev);
+ pm_runtime_disable(bdev->dev);
+
+ return 0;
+}
+
+static int btmtksdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btmtksdio_dev *bdev = hci_get_drvdata(hdev);
+
+ switch (hci_skb_pkt_type(skb)) {
+ case HCI_COMMAND_PKT:
+ hdev->stat.cmd_tx++;
+ break;
+
+ case HCI_ACLDATA_PKT:
+ hdev->stat.acl_tx++;
+ break;
+
+ case HCI_SCODATA_PKT:
+ hdev->stat.sco_tx++;
+ break;
+
+ default:
+ return -EILSEQ;
+ }
+
+ skb_queue_tail(&bdev->txq, skb);
+
+ schedule_work(&bdev->tx_work);
+
+ return 0;
+}
+
+static int btmtksdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ struct btmtksdio_dev *bdev;
+ struct hci_dev *hdev;
+ int err;
+
+ bdev = devm_kzalloc(&func->dev, sizeof(*bdev), GFP_KERNEL);
+ if (!bdev)
+ return -ENOMEM;
+
+ bdev->data = (void *)id->driver_data;
+ if (!bdev->data)
+ return -ENODEV;
+
+ bdev->dev = &func->dev;
+ bdev->func = func;
+
+ INIT_WORK(&bdev->tx_work, btmtksdio_tx_work);
+ skb_queue_head_init(&bdev->txq);
+
+ /* Initialize and register HCI device */
+ hdev = hci_alloc_dev();
+ if (!hdev) {
+ dev_err(&func->dev, "Can't allocate HCI device\n");
+ return -ENOMEM;
+ }
+
+ bdev->hdev = hdev;
+
+ hdev->bus = HCI_SDIO;
+ hci_set_drvdata(hdev, bdev);
+
+ hdev->open = btmtksdio_open;
+ hdev->close = btmtksdio_close;
+ hdev->flush = btmtksdio_flush;
+ hdev->setup = btmtksdio_setup;
+ hdev->shutdown = btmtksdio_shutdown;
+ hdev->send = btmtksdio_send_frame;
+ SET_HCIDEV_DEV(hdev, &func->dev);
+
+ hdev->manufacturer = 70;
+ set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
+
+ err = hci_register_dev(hdev);
+ if (err < 0) {
+ dev_err(&func->dev, "Can't register HCI device\n");
+ hci_free_dev(hdev);
+ return err;
+ }
+
+ sdio_set_drvdata(func, bdev);
+
+ /* pm_runtime_enable would be done after the firmware is being
+ * downloaded because the core layer probably already enables
+ * runtime PM for this func such as the case host->caps &
+ * MMC_CAP_POWER_OFF_CARD.
+ */
+ if (pm_runtime_enabled(bdev->dev))
+ pm_runtime_disable(bdev->dev);
+
+ /* As explaination in drivers/mmc/core/sdio_bus.c tells us:
+ * Unbound SDIO functions are always suspended.
+ * During probe, the function is set active and the usage count
+ * is incremented. If the driver supports runtime PM,
+ * it should call pm_runtime_put_noidle() in its probe routine and
+ * pm_runtime_get_noresume() in its remove routine.
+ *
+ * So, put a pm_runtime_put_noidle here !
+ */
+ pm_runtime_put_noidle(bdev->dev);
+
+ return 0;
+}
+
+static void btmtksdio_remove(struct sdio_func *func)
+{
+ struct btmtksdio_dev *bdev = sdio_get_drvdata(func);
+ struct hci_dev *hdev;
+
+ if (!bdev)
+ return;
+
+ /* Be consistent the state in btmtksdio_probe */
+ pm_runtime_get_noresume(bdev->dev);
+
+ hdev = bdev->hdev;
+
+ sdio_set_drvdata(func, NULL);
+ hci_unregister_dev(hdev);
+ hci_free_dev(hdev);
+}
+
+#ifdef CONFIG_PM
+static int btmtksdio_runtime_suspend(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct btmtksdio_dev *bdev;
+ u32 status;
+ int err;
+
+ bdev = sdio_get_drvdata(func);
+ if (!bdev)
+ return 0;
+
+ sdio_claim_host(bdev->func);
+
+ sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, &err);
+ if (err < 0)
+ goto out;
+
+ err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
+ !(status & C_COM_DRV_OWN), 2000, 1000000);
+out:
+ bt_dev_info(bdev->hdev, "status (%d) return ownership to device", err);
+
+ sdio_release_host(bdev->func);
+
+ return err;
+}
+
+static int btmtksdio_runtime_resume(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct btmtksdio_dev *bdev;
+ u32 status;
+ int err;
+
+ bdev = sdio_get_drvdata(func);
+ if (!bdev)
+ return 0;
+
+ sdio_claim_host(bdev->func);
+
+ sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err);
+ if (err < 0)
+ goto out;
+
+ err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status,
+ status & C_COM_DRV_OWN, 2000, 1000000);
+out:
+ bt_dev_info(bdev->hdev, "status (%d) get ownership from device", err);
+
+ sdio_release_host(bdev->func);
+
+ return err;
+}
+
+static UNIVERSAL_DEV_PM_OPS(btmtksdio_pm_ops, btmtksdio_runtime_suspend,
+ btmtksdio_runtime_resume, NULL);
+#define BTMTKSDIO_PM_OPS (&btmtksdio_pm_ops)
+#else /* CONFIG_PM */
+#define BTMTKSDIO_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+static struct sdio_driver btmtksdio_driver = {
+ .name = "btmtksdio",
+ .probe = btmtksdio_probe,
+ .remove = btmtksdio_remove,
+ .id_table = btmtksdio_table,
+ .drv = {
+ .owner = THIS_MODULE,
+ .pm = BTMTKSDIO_PM_OPS,
+ }
+};
+
+module_sdio_driver(btmtksdio_driver);
+
+module_param(enable_autosuspend, bool, 0644);
+MODULE_PARM_DESC(enable_autosuspend, "Enable autosuspend by default");
+
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek Bluetooth SDIO driver ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(FIRMWARE_MT7663);
+MODULE_FIRMWARE(FIRMWARE_MT7668);
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index b0b680dd69f4..f5dbeec8e274 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -661,7 +661,7 @@ static int btmtkuart_change_baudrate(struct hci_dev *hdev)
{
struct btmtkuart_dev *bdev = hci_get_drvdata(hdev);
struct btmtk_hci_wmt_params wmt_params;
- u32 baudrate;
+ __le32 baudrate;
u8 param;
int err;
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 612268574fc7..cc12eecd9e4d 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -336,7 +336,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
{
struct rome_config config;
int err;
- u8 rom_ver;
+ u8 rom_ver = 0;
bt_dev_dbg(hdev, "QCA setup on UART");
@@ -344,7 +344,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
/* Download rampatch file */
config.type = TLV_TYPE_PATCH;
- if (soc_type == QCA_WCN3990) {
+ if (qca_is_wcn399x(soc_type)) {
/* Firmware files to download are based on ROM version.
* ROM version is derived from last two bytes of soc_ver.
*/
@@ -365,7 +365,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
/* Download NVM configuration */
config.type = TLV_TYPE_NVM;
- if (soc_type == QCA_WCN3990)
+ if (qca_is_wcn399x(soc_type))
snprintf(config.fwname, sizeof(config.fwname),
"qca/crnv%02x.bin", rom_ver);
else
@@ -410,6 +410,7 @@ int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
}
EXPORT_SYMBOL_GPL(qca_set_bdaddr);
+
MODULE_AUTHOR("Ben Young Tae Kim <ytkim@qca.qualcomm.com>");
MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h
index c72c56ea7480..4c4fe2b5b7b7 100644
--- a/drivers/bluetooth/btqca.h
+++ b/drivers/bluetooth/btqca.h
@@ -41,7 +41,7 @@
#define QCA_WCN3990_POWERON_PULSE 0xFC
#define QCA_WCN3990_POWEROFF_PULSE 0xC0
-enum qca_bardrate {
+enum qca_baudrate {
QCA_BAUDRATE_115200 = 0,
QCA_BAUDRATE_57600,
QCA_BAUDRATE_38400,
@@ -132,7 +132,8 @@ enum qca_btsoc_type {
QCA_INVALID = -1,
QCA_AR3002,
QCA_ROME,
- QCA_WCN3990
+ QCA_WCN3990,
+ QCA_WCN3998,
};
#if IS_ENABLED(CONFIG_BT_QCA)
@@ -142,6 +143,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
enum qca_btsoc_type soc_type, u32 soc_ver);
int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version);
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
+{
+ return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3998;
+}
#else
static inline int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
@@ -165,4 +170,8 @@ static inline int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
return -EOPNOTSUPP;
}
+static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
+{
+ return false;
+}
#endif
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 282d1af1d3ba..4cfa9abe03c8 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -376,20 +376,7 @@ static struct sdio_driver btsdio_driver = {
.id_table = btsdio_table,
};
-static int __init btsdio_init(void)
-{
- BT_INFO("Generic Bluetooth SDIO driver ver %s", VERSION);
-
- return sdio_register_driver(&btsdio_driver);
-}
-
-static void __exit btsdio_exit(void)
-{
- sdio_unregister_driver(&btsdio_driver);
-}
-
-module_init(btsdio_init);
-module_exit(btsdio_exit);
+module_sdio_driver(btsdio_driver);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Generic Bluetooth SDIO driver ver " VERSION);
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index ddbe518c3e5b..b5d31d583d60 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -228,9 +228,15 @@ static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
int err;
if (powered && !dev->res_enabled) {
- err = regulator_bulk_enable(BCM_NUM_SUPPLIES, dev->supplies);
- if (err)
- return err;
+ /* Intel Macs use bcm_apple_get_resources() and don't
+ * have regulator supplies configured.
+ */
+ if (dev->supplies[0].supply) {
+ err = regulator_bulk_enable(BCM_NUM_SUPPLIES,
+ dev->supplies);
+ if (err)
+ return err;
+ }
/* LPO clock needs to be 32.768 kHz */
err = clk_set_rate(dev->lpo_clk, 32768);
@@ -259,7 +265,13 @@ static int bcm_gpio_set_power(struct bcm_device *dev, bool powered)
if (!powered && dev->res_enabled) {
clk_disable_unprepare(dev->txco_clk);
clk_disable_unprepare(dev->lpo_clk);
- regulator_bulk_disable(BCM_NUM_SUPPLIES, dev->supplies);
+
+ /* Intel Macs use bcm_apple_get_resources() and don't
+ * have regulator supplies configured.
+ */
+ if (dev->supplies[0].supply)
+ regulator_bulk_disable(BCM_NUM_SUPPLIES,
+ dev->supplies);
}
/* wait for device to power on and come out of reset */
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 069d1c8fde73..3f02ae560120 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -536,7 +536,7 @@ static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
skb_put_data(h5->rx_skb, byte, 1);
h5->rx_pending--;
- BT_DBG("unsliped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
+ BT_DBG("unslipped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
}
static void h5_reset_rx(struct h5 *h5)
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 237aea34b69f..57322c42bb2d 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -54,9 +54,6 @@
#define HCI_IBS_WAKE_ACK 0xFC
#define HCI_MAX_IBS_SIZE 10
-/* Controller states */
-#define STATE_IN_BAND_SLEEP_ENABLED 1
-
#define IBS_WAKE_RETRANS_TIMEOUT_MS 100
#define IBS_TX_IDLE_TIMEOUT_MS 2000
#define CMD_TRANS_TIMEOUT_MS 100
@@ -67,6 +64,10 @@
/* Controller debug log header */
#define QCA_DEBUG_HANDLE 0x2EDC
+enum qca_flags {
+ QCA_IBS_ENABLED,
+};
+
/* HCI_IBS transmit side sleep protocol states */
enum tx_ibs_states {
HCI_IBS_TX_ASLEEP,
@@ -174,6 +175,21 @@ static int qca_power_setup(struct hci_uart *hu, bool on);
static void qca_power_shutdown(struct hci_uart *hu);
static int qca_power_off(struct hci_dev *hdev);
+static enum qca_btsoc_type qca_soc_type(struct hci_uart *hu)
+{
+ enum qca_btsoc_type soc_type;
+
+ if (hu->serdev) {
+ struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
+
+ soc_type = qsd->btsoc_type;
+ } else {
+ soc_type = QCA_ROME;
+ }
+
+ return soc_type;
+}
+
static void __serial_clock_on(struct tty_struct *tty)
{
/* TODO: Some chipset requires to enable UART clock on client
@@ -506,8 +522,10 @@ static int qca_open(struct hci_uart *hu)
if (hu->serdev) {
qcadev = serdev_device_get_drvdata(hu->serdev);
- if (qcadev->btsoc_type != QCA_WCN3990) {
+ if (!qca_is_wcn399x(qcadev->btsoc_type)) {
gpiod_set_value_cansleep(qcadev->bt_en, 1);
+ /* Controller needs time to bootup. */
+ msleep(150);
} else {
hu->init_speed = qcadev->init_speed;
hu->oper_speed = qcadev->oper_speed;
@@ -612,7 +630,7 @@ static int qca_close(struct hci_uart *hu)
if (hu->serdev) {
qcadev = serdev_device_get_drvdata(hu->serdev);
- if (qcadev->btsoc_type == QCA_WCN3990)
+ if (qca_is_wcn399x(qcadev->btsoc_type))
qca_power_shutdown(hu);
else
gpiod_set_value_cansleep(qcadev->bt_en, 0);
@@ -775,7 +793,7 @@ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb)
/* Don't go to sleep in middle of patch download or
* Out-Of-Band(GPIOs control) sleep is selected.
*/
- if (!test_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags)) {
+ if (!test_bit(QCA_IBS_ENABLED, &qca->flags)) {
skb_queue_tail(&qca->txq, skb);
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
return 0;
@@ -963,7 +981,6 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
struct qca_data *qca = hu->priv;
- struct qca_serdev *qcadev;
struct sk_buff *skb;
u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
@@ -985,18 +1002,17 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
skb_queue_tail(&qca->txq, skb);
hci_uart_tx_wakeup(hu);
- qcadev = serdev_device_get_drvdata(hu->serdev);
-
/* Wait for the baudrate change request to be sent */
while (!skb_queue_empty(&qca->txq))
usleep_range(100, 200);
- serdev_device_wait_until_sent(hu->serdev,
+ if (hu->serdev)
+ serdev_device_wait_until_sent(hu->serdev,
msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
/* Give the controller time to process the request */
- if (qcadev->btsoc_type == QCA_WCN3990)
+ if (qca_is_wcn399x(qca_soc_type(hu)))
msleep(10);
else
msleep(300);
@@ -1072,10 +1088,7 @@ static unsigned int qca_get_speed(struct hci_uart *hu,
static int qca_check_speeds(struct hci_uart *hu)
{
- struct qca_serdev *qcadev;
-
- qcadev = serdev_device_get_drvdata(hu->serdev);
- if (qcadev->btsoc_type == QCA_WCN3990) {
+ if (qca_is_wcn399x(qca_soc_type(hu))) {
if (!qca_get_speed(hu, QCA_INIT_SPEED) &&
!qca_get_speed(hu, QCA_OPER_SPEED))
return -EINVAL;
@@ -1091,7 +1104,6 @@ static int qca_check_speeds(struct hci_uart *hu)
static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
{
unsigned int speed, qca_baudrate;
- struct qca_serdev *qcadev;
int ret = 0;
if (speed_type == QCA_INIT_SPEED) {
@@ -1099,6 +1111,8 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
if (speed)
host_set_baudrate(hu, speed);
} else {
+ enum qca_btsoc_type soc_type = qca_soc_type(hu);
+
speed = qca_get_speed(hu, QCA_OPER_SPEED);
if (!speed)
return 0;
@@ -1106,8 +1120,7 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
/* Disable flow control for wcn3990 to deassert RTS while
* changing the baudrate of chip and host.
*/
- qcadev = serdev_device_get_drvdata(hu->serdev);
- if (qcadev->btsoc_type == QCA_WCN3990)
+ if (qca_is_wcn399x(soc_type))
hci_uart_set_flow_control(hu, true);
qca_baudrate = qca_get_baudrate_value(speed);
@@ -1119,7 +1132,7 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
host_set_baudrate(hu, speed);
error:
- if (qcadev->btsoc_type == QCA_WCN3990)
+ if (qca_is_wcn399x(soc_type))
hci_uart_set_flow_control(hu, false);
}
@@ -1181,20 +1194,18 @@ static int qca_setup(struct hci_uart *hu)
struct hci_dev *hdev = hu->hdev;
struct qca_data *qca = hu->priv;
unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
- struct qca_serdev *qcadev;
+ enum qca_btsoc_type soc_type = qca_soc_type(hu);
int ret;
int soc_ver = 0;
- qcadev = serdev_device_get_drvdata(hu->serdev);
-
ret = qca_check_speeds(hu);
if (ret)
return ret;
/* Patch downloading has to be done without IBS mode */
- clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
+ clear_bit(QCA_IBS_ENABLED, &qca->flags);
- if (qcadev->btsoc_type == QCA_WCN3990) {
+ if (qca_is_wcn399x(soc_type)) {
bt_dev_info(hdev, "setting up wcn3990");
/* Enable NON_PERSISTENT_SETUP QUIRK to ensure to execute
@@ -1225,7 +1236,7 @@ static int qca_setup(struct hci_uart *hu)
qca_baudrate = qca_get_baudrate_value(speed);
}
- if (qcadev->btsoc_type != QCA_WCN3990) {
+ if (!qca_is_wcn399x(soc_type)) {
/* Get QCA version information */
ret = qca_read_soc_version(hdev, &soc_ver);
if (ret)
@@ -1234,9 +1245,9 @@ static int qca_setup(struct hci_uart *hu)
bt_dev_info(hdev, "QCA controller version 0x%08x", soc_ver);
/* Setup patch / NVM configurations */
- ret = qca_uart_setup(hdev, qca_baudrate, qcadev->btsoc_type, soc_ver);
+ ret = qca_uart_setup(hdev, qca_baudrate, soc_type, soc_ver);
if (!ret) {
- set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
+ set_bit(QCA_IBS_ENABLED, &qca->flags);
qca_debugfs_init(hdev);
} else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */
@@ -1250,7 +1261,7 @@ static int qca_setup(struct hci_uart *hu)
}
/* Setup bdaddr */
- if (qcadev->btsoc_type == QCA_WCN3990)
+ if (qca_is_wcn399x(soc_type))
hu->hdev->set_bdaddr = qca_set_bdaddr;
else
hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
@@ -1273,7 +1284,7 @@ static struct hci_uart_proto qca_proto = {
.dequeue = qca_dequeue,
};
-static const struct qca_vreg_data qca_soc_data = {
+static const struct qca_vreg_data qca_soc_data_wcn3990 = {
.soc_type = QCA_WCN3990,
.vregs = (struct qca_vreg []) {
{ "vddio", 1800000, 1900000, 15000 },
@@ -1284,6 +1295,17 @@ static const struct qca_vreg_data qca_soc_data = {
.num_vregs = 4,
};
+static const struct qca_vreg_data qca_soc_data_wcn3998 = {
+ .soc_type = QCA_WCN3998,
+ .vregs = (struct qca_vreg []) {
+ { "vddio", 1800000, 1900000, 10000 },
+ { "vddxo", 1800000, 1900000, 80000 },
+ { "vddrf", 1300000, 1352000, 300000 },
+ { "vddch0", 3300000, 3300000, 450000 },
+ },
+ .num_vregs = 4,
+};
+
static void qca_power_shutdown(struct hci_uart *hu)
{
struct qca_data *qca = hu->priv;
@@ -1294,7 +1316,7 @@ static void qca_power_shutdown(struct hci_uart *hu)
* data in skb's.
*/
spin_lock_irqsave(&qca->hci_ibs_lock, flags);
- clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
+ clear_bit(QCA_IBS_ENABLED, &qca->flags);
qca_flush(hu);
spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
@@ -1417,8 +1439,8 @@ static int qca_serdev_probe(struct serdev_device *serdev)
qcadev->serdev_hu.serdev = serdev;
data = of_device_get_match_data(&serdev->dev);
serdev_device_set_drvdata(serdev, qcadev);
- if (data && data->soc_type == QCA_WCN3990) {
- qcadev->btsoc_type = QCA_WCN3990;
+ if (data && qca_is_wcn399x(data->soc_type)) {
+ qcadev->btsoc_type = data->soc_type;
qcadev->bt_power = devm_kzalloc(&serdev->dev,
sizeof(struct qca_power),
GFP_KERNEL);
@@ -1482,7 +1504,7 @@ static void qca_serdev_remove(struct serdev_device *serdev)
{
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
- if (qcadev->btsoc_type == QCA_WCN3990)
+ if (qca_is_wcn399x(qcadev->btsoc_type))
qca_power_shutdown(&qcadev->serdev_hu);
else
clk_disable_unprepare(qcadev->susclk);
@@ -1492,7 +1514,8 @@ static void qca_serdev_remove(struct serdev_device *serdev)
static const struct of_device_id qca_bluetooth_of_match[] = {
{ .compatible = "qcom,qca6174-bt" },
- { .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data},
+ { .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
+ { .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 0dce94e3c495..744b6ec0acb0 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -42,7 +42,7 @@
#include <net/neighbour.h>
#include <net/route.h>
#include <net/netevent.h>
-#include <net/addrconf.h>
+#include <net/ipv6_stubs.h>
#include <net/ip6_route.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_sa.h>
@@ -86,8 +86,8 @@ static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh)
if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
return false;
- ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
- nlmsg_len(nlh), ib_nl_addr_policy, NULL);
+ ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
+ nlmsg_len(nlh), ib_nl_addr_policy, NULL);
if (ret)
return false;
@@ -351,7 +351,7 @@ static bool has_gateway(const struct dst_entry *dst, sa_family_t family)
if (family == AF_INET) {
rt = container_of(dst, struct rtable, dst);
- return rt->rt_uses_gateway;
+ return rt->rt_gw_family == AF_INET;
}
rt6 = container_of(dst, struct rt6_info, dst);
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index a5d2a20ee697..41929bb83739 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -506,14 +506,14 @@ int iwpm_parse_nlmsg(struct netlink_callback *cb, int policy_max,
int ret;
const char *err_str = "";
- ret = nlmsg_validate(cb->nlh, nlh_len, policy_max - 1, nlmsg_policy,
- NULL);
+ ret = nlmsg_validate_deprecated(cb->nlh, nlh_len, policy_max - 1,
+ nlmsg_policy, NULL);
if (ret) {
err_str = "Invalid attribute";
goto parse_nlmsg_error;
}
- ret = nlmsg_parse(cb->nlh, nlh_len, nltb, policy_max - 1,
- nlmsg_policy, NULL);
+ ret = nlmsg_parse_deprecated(cb->nlh, nlh_len, nltb, policy_max - 1,
+ nlmsg_policy, NULL);
if (ret) {
err_str = "Unable to parse the nlmsg";
goto parse_nlmsg_error;
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 11ed58d3fce5..85324012bf07 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -292,7 +292,8 @@ static int fill_res_info_entry(struct sk_buff *msg,
{
struct nlattr *entry_attr;
- entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
+ entry_attr = nla_nest_start_noflag(msg,
+ RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY);
if (!entry_attr)
return -EMSGSIZE;
@@ -327,7 +328,7 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
if (fill_nldev_handle(msg, device))
return -EMSGSIZE;
- table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
+ table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY);
if (!table_attr)
return -EMSGSIZE;
@@ -607,8 +608,8 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 index;
int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
@@ -652,8 +653,8 @@ static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 index;
int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
- extack);
+ err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
@@ -721,8 +722,8 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 port;
int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (err ||
!tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
!tb[RDMA_NLDEV_ATTR_PORT_INDEX])
@@ -777,8 +778,8 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
int err;
unsigned int p;
- err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, NULL);
+ err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NULL);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
@@ -832,8 +833,8 @@ static int nldev_res_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 index;
int ret;
- ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
@@ -981,8 +982,8 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct sk_buff *msg;
int ret;
- ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ ret = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || !fe->id || !tb[fe->id])
return -EINVAL;
@@ -1070,8 +1071,8 @@ static int res_get_common_dumpit(struct sk_buff *skb,
u32 index, port = 0;
bool filled = false;
- err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, NULL);
+ err = nlmsg_parse_deprecated(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, NULL);
/*
* Right now, we are expecting the device index to get res information,
* but it is possible to extend this code to return all devices in
@@ -1108,7 +1109,7 @@ static int res_get_common_dumpit(struct sk_buff *skb,
goto err;
}
- table_attr = nla_nest_start(skb, fe->nldev_attr);
+ table_attr = nla_nest_start_noflag(skb, fe->nldev_attr);
if (!table_attr) {
ret = -EMSGSIZE;
goto err;
@@ -1134,7 +1135,7 @@ static int res_get_common_dumpit(struct sk_buff *skb,
filled = true;
- entry_attr = nla_nest_start(skb, fe->entry);
+ entry_attr = nla_nest_start_noflag(skb, fe->entry);
if (!entry_attr) {
ret = -EMSGSIZE;
rdma_restrack_put(res);
@@ -1249,8 +1250,8 @@ static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
char type[IFNAMSIZ];
int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_NAME] ||
!tb[RDMA_NLDEV_ATTR_LINK_TYPE] || !tb[RDMA_NLDEV_ATTR_NDEV_NAME])
return -EINVAL;
@@ -1293,8 +1294,8 @@ static int nldev_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
u32 index;
int err;
- err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
+ err = nlmsg_parse_deprecated(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
return -EINVAL;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 7925e45ea88a..bb534959abf0 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1028,8 +1028,8 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb,
!(NETLINK_CB(skb).sk))
return -EPERM;
- ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
- nlmsg_len(nlh), ib_nl_policy, NULL);
+ ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
+ nlmsg_len(nlh), ib_nl_policy, NULL);
attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
if (ret || !attr)
goto settimeout_out;
@@ -1080,8 +1080,8 @@ static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
return 0;
- ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
- nlmsg_len(nlh), ib_nl_policy, NULL);
+ ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
+ nlmsg_len(nlh), ib_nl_policy, NULL);
if (ret)
return 0;
diff --git a/drivers/infiniband/hw/cxgb4/restrack.c b/drivers/infiniband/hw/cxgb4/restrack.c
index 9a7520ee41e0..f82d46ed969d 100644
--- a/drivers/infiniband/hw/cxgb4/restrack.c
+++ b/drivers/infiniband/hw/cxgb4/restrack.c
@@ -149,7 +149,7 @@ static int fill_res_qp_entry(struct sk_buff *msg,
if (qhp->ucontext)
return 0;
- table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
goto err;
@@ -216,7 +216,7 @@ static int fill_res_ep_entry(struct sk_buff *msg,
if (!uep)
return 0;
- table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
goto err_free_uep;
@@ -387,7 +387,7 @@ static int fill_res_cq_entry(struct sk_buff *msg,
if (ibcq->uobject)
return 0;
- table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
goto err;
@@ -447,7 +447,7 @@ static int fill_res_mr_entry(struct sk_buff *msg,
if (!stag)
return 0;
- table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER);
if (!table_attr)
goto err;
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index a922db58be14..2b07032dbdda 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -423,8 +423,7 @@ tx_finish:
static u16 hfi1_vnic_select_queue(struct net_device *netdev,
struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev);
struct opa_vnic_skb_mdata *mdata;
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index 6bcc63aaa50b..be95ac5aeb30 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -148,7 +148,7 @@ int mlx5_cmd_alloc_memic(struct mlx5_memic *memic, phys_addr_t *addr,
return ret;
}
- *addr = pci_resource_start(dev->pdev, 0) +
+ *addr = dev->bar_addr +
MLX5_GET64(alloc_memic_out, out, memic_start_addr);
return 0;
@@ -167,7 +167,7 @@ int mlx5_cmd_dealloc_memic(struct mlx5_memic *memic, u64 addr, u64 length)
u64 start_page_idx;
int err;
- addr -= pci_resource_start(dev->pdev, 0);
+ addr -= dev->bar_addr;
start_page_idx = (addr - hw_start_addr) >> PAGE_SHIFT;
MLX5_SET(dealloc_memic_in, in, opcode, MLX5_CMD_OP_DEALLOC_MEMIC);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d3dd290ae1b1..1aaa2056d188 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -181,7 +181,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
ibdev->rep->vport);
if (rep_ndev == ndev)
roce->netdev = ndev;
- } else if (ndev->dev.parent == &mdev->pdev->dev) {
+ } else if (ndev->dev.parent == mdev->device) {
roce->netdev = ndev;
}
write_unlock(&roce->netdev_lock);
@@ -2011,7 +2011,7 @@ static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1;
- return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
+ return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page;
}
static int get_command(unsigned long offset)
@@ -2202,7 +2202,7 @@ static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
page_idx + npages)
return -EINVAL;
- pfn = ((pci_resource_start(dev->mdev->pdev, 0) +
+ pfn = ((dev->mdev->bar_addr +
MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
PAGE_SHIFT) +
page_idx;
@@ -2285,7 +2285,7 @@ struct ib_dm *mlx5_ib_alloc_dm(struct ib_device *ibdev,
goto err_free;
start_offset = memic_addr & ~PAGE_MASK;
- page_idx = (memic_addr - pci_resource_start(memic->dev->pdev, 0) -
+ page_idx = (memic_addr - memic->dev->bar_addr -
MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
PAGE_SHIFT;
@@ -2328,7 +2328,7 @@ int mlx5_ib_dealloc_dm(struct ib_dm *ibdm)
if (ret)
return ret;
- page_idx = (dm->dev_addr - pci_resource_start(memic->dev->pdev, 0) -
+ page_idx = (dm->dev_addr - memic->dev->bar_addr -
MLX5_CAP64_DEV_MEM(memic->dev, memic_bar_start_addr)) >>
PAGE_SHIFT;
bitmap_clear(to_mucontext(ibdm->uobject->context)->dm_pages,
@@ -4356,9 +4356,13 @@ static void delay_drop_handler(struct work_struct *work)
static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe,
struct ib_event *ibev)
{
+ u8 port = (eqe->data.port.port >> 4) & 0xf;
+
switch (eqe->sub_type) {
case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT:
- schedule_work(&ibdev->delay_drop.delay_drop_work);
+ if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) ==
+ IB_LINK_LAYER_ETHERNET)
+ schedule_work(&ibdev->delay_drop.delay_drop_work);
break;
default: /* do nothing */
return;
@@ -5675,7 +5679,8 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
}
if (bound) {
- dev_dbg(&mpi->mdev->pdev->dev, "removing port from unaffiliated list.\n");
+ dev_dbg(mpi->mdev->device,
+ "removing port from unaffiliated list.\n");
mlx5_ib_dbg(dev, "port %d bound\n", i + 1);
list_del(&mpi->list);
break;
@@ -5874,7 +5879,7 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
dev->ib_dev.phys_port_cnt = dev->num_ports;
dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev);
- dev->ib_dev.dev.parent = &mdev->pdev->dev;
+ dev->ib_dev.dev.parent = mdev->device;
mutex_init(&dev->cap_mask_mutex);
INIT_LIST_HEAD(&dev->qp_list);
@@ -6563,7 +6568,8 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
if (!bound) {
list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list);
- dev_dbg(&mdev->pdev->dev, "no suitable IB device found to bind to, added to unaffiliated list.\n");
+ dev_dbg(mdev->device,
+ "no suitable IB device found to bind to, added to unaffiliated list.\n");
}
mutex_unlock(&mlx5_ib_multiport_mutex);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index c85f00255884..ca921fd40499 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1194,8 +1194,7 @@ static struct ib_mr *mlx5_ib_get_memic_mr(struct ib_pd *pd, u64 memic_addr,
MLX5_SET64(mkc, mkc, len, length);
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET64(mkc, mkc, start_addr,
- memic_addr - pci_resource_start(dev->mdev->pdev, 0));
+ MLX5_SET64(mkc, mkc, start_addr, memic_addr - dev->mdev->bar_addr);
err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, inlen);
if (err)
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 8aafb2208899..581144e224e2 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -5122,7 +5122,7 @@ out:
wmb();
/* currently we support only regular doorbells */
- mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset, NULL);
+ mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset);
/* Make sure doorbells don't leak out of SQ spinlock
* and reach the HCA out of order.
*/
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 032883180f65..0010a3ed64f1 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1407,7 +1407,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
if (neigh->nud_state & NUD_VALID) {
nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
" is %pM, Gateway is 0x%08X \n", dst_ip,
- neigh->ha, ntohl(rt->rt_gateway));
+ neigh->ha, ntohl(rt->rt_gw4));
if (arpindex >= 0) {
if (ether_addr_equal(nesadapter->arp_table[arpindex].mac_addr, neigh->ha)) {
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
index ae70cd18903e..aeff68f582d3 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
@@ -95,8 +95,7 @@ static netdev_tx_t opa_netdev_start_xmit(struct sk_buff *skb,
}
static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
struct opa_vnic_skb_mdata *mdata;
@@ -106,8 +105,7 @@ static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
mdata = skb_push(skb, sizeof(*mdata));
mdata->entropy = opa_vnic_calc_entropy(skb);
mdata->vl = opa_vnic_get_vl(adapter, skb);
- rc = adapter->rn_ops->ndo_select_queue(netdev, skb,
- sb_dev, fallback);
+ rc = adapter->rn_ops->ndo_select_queue(netdev, skb, sb_dev);
skb_pull(skb, sizeof(*mdata));
return rc;
}
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index ecdeb89645d0..149b1aca52a2 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -958,6 +958,7 @@ static void write_iso_callback(struct urb *urb)
*/
static int starturbs(struct bc_state *bcs)
{
+ struct usb_device *udev = bcs->cs->hw.bas->udev;
struct bas_bc_state *ubc = bcs->hw.bas;
struct urb *urb;
int j, k;
@@ -975,8 +976,8 @@ static int starturbs(struct bc_state *bcs)
rc = -EFAULT;
goto error;
}
- usb_fill_int_urb(urb, bcs->cs->hw.bas->udev,
- usb_rcvisocpipe(urb->dev, 3 + 2 * bcs->channel),
+ usb_fill_int_urb(urb, udev,
+ usb_rcvisocpipe(udev, 3 + 2 * bcs->channel),
ubc->isoinbuf + k * BAS_INBUFSIZE,
BAS_INBUFSIZE, read_iso_callback, bcs,
BAS_FRAMETIME);
@@ -1006,8 +1007,8 @@ static int starturbs(struct bc_state *bcs)
rc = -EFAULT;
goto error;
}
- usb_fill_int_urb(urb, bcs->cs->hw.bas->udev,
- usb_sndisocpipe(urb->dev, 4 + 2 * bcs->channel),
+ usb_fill_int_urb(urb, udev,
+ usb_sndisocpipe(udev, 4 + 2 * bcs->channel),
ubc->isooutbuf->data,
sizeof(ubc->isooutbuf->data),
write_iso_callback, &ubc->isoouturbs[k],
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index 362aa5450a5e..29c22d74afe0 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -2041,9 +2041,9 @@ setup_hw(struct hfc_pci *hc)
}
printk(KERN_INFO
- "HFC-PCI: defined at mem %#lx fifo %#lx(%#lx) IRQ %d HZ %d\n",
- (u_long) hc->hw.pci_io, (u_long) hc->hw.fifos,
- (u_long) hc->hw.dmahandle, hc->irq, HZ);
+ "HFC-PCI: defined at mem %#lx fifo %p(%pad) IRQ %d HZ %d\n",
+ (u_long) hc->hw.pci_io, hc->hw.fifos,
+ &hc->hw.dmahandle, hc->irq, HZ);
/* enable memory mapped ports, disable busmaster */
pci_write_config_word(hc->pdev, PCI_COMMAND, PCI_ENA_MEMIO);
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index b12e6cae26c2..de965115a183 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1294,9 +1294,9 @@ void HiSax_reportcard(int cardnr, int sel)
printk(KERN_DEBUG "HiSax: reportcard No %d\n", cardnr + 1);
printk(KERN_DEBUG "HiSax: Type %s\n", CardType[cs->typ]);
printk(KERN_DEBUG "HiSax: debuglevel %x\n", cs->debug);
- printk(KERN_DEBUG "HiSax: HiSax_reportcard address 0x%lX\n",
- (ulong) & HiSax_reportcard);
- printk(KERN_DEBUG "HiSax: cs 0x%lX\n", (ulong) cs);
+ printk(KERN_DEBUG "HiSax: HiSax_reportcard address 0x%px\n",
+ HiSax_reportcard);
+ printk(KERN_DEBUG "HiSax: cs 0x%px\n", cs);
printk(KERN_DEBUG "HiSax: HW_Flags %lx bc0 flg %lx bc1 flg %lx\n",
cs->HW_Flags, cs->bcs[0].Flag, cs->bcs[1].Flag);
printk(KERN_DEBUG "HiSax: bcs 0 mode %d ch%d\n",
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index a7b275ea5de1..7e0f419c14f8 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -1888,8 +1888,9 @@ static u32 isdn_ppp_mp_get_seq(int short_seq,
return seq;
}
-struct sk_buff *isdn_ppp_mp_discard(ippp_bundle *mp,
- struct sk_buff *from, struct sk_buff *to)
+static struct sk_buff *isdn_ppp_mp_discard(ippp_bundle *mp,
+ struct sk_buff *from,
+ struct sk_buff *to)
{
if (from)
while (from != to) {
@@ -1900,8 +1901,8 @@ struct sk_buff *isdn_ppp_mp_discard(ippp_bundle *mp,
return from;
}
-void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp,
- struct sk_buff *from, struct sk_buff *to)
+static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp,
+ struct sk_buff *from, struct sk_buff *to)
{
ippp_bundle *mp = net_dev->pb;
int proto;
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index 390a722e6211..ee657003c1a1 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -97,6 +97,12 @@ lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_map_update_elem_proto;
case BPF_FUNC_map_delete_elem:
return &bpf_map_delete_elem_proto;
+ case BPF_FUNC_map_push_elem:
+ return &bpf_map_push_elem_proto;
+ case BPF_FUNC_map_pop_elem:
+ return &bpf_map_pop_elem_proto;
+ case BPF_FUNC_map_peek_elem:
+ return &bpf_map_peek_elem_proto;
case BPF_FUNC_ktime_get_ns:
return &bpf_ktime_get_ns_proto;
case BPF_FUNC_tail_call:
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 7a96d168efc4..bc42f131f47c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -505,6 +505,7 @@ source "drivers/net/hyperv/Kconfig"
config NETDEVSIM
tristate "Simulated networking device"
depends on DEBUG_FS
+ select NET_DEVLINK
help
This driver is a developer testing tool and software model that can
be used to test various control path networking APIs, especially
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 3d27616d9c85..51cf5eca9c7f 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -116,11 +116,15 @@ static struct net_device * __init ipddp_init(void)
*/
static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
{
- __be32 paddr = skb_rtable(skb)->rt_gateway;
+ struct rtable *rtable = skb_rtable(skb);
+ __be32 paddr = 0;
struct ddpehdr *ddp;
struct ipddp_route *rt;
struct atalk_addr *our_addr;
+ if (rtable->rt_gw_family == AF_INET)
+ paddr = rtable->rt_gw4;
+
spin_lock(&ipddp_route_lock);
/*
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index ee610721098e..062fa7e3af4c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4118,8 +4118,7 @@ static inline int bond_slave_override(struct bonding *bond,
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
/* This helper function exists to help dev_pick_tx get the correct
* destination queue. Using a helper function skips a call to
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index b286f591242e..022044b59d6a 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -546,7 +546,7 @@ static int bond_fill_info(struct sk_buff *skb,
if (nla_put_u32(skb, IFLA_BOND_ARP_INTERVAL, bond->params.arp_interval))
goto nla_put_failure;
- targets = nla_nest_start(skb, IFLA_BOND_ARP_IP_TARGET);
+ targets = nla_nest_start_noflag(skb, IFLA_BOND_ARP_IP_TARGET);
if (!targets)
goto nla_put_failure;
@@ -644,7 +644,7 @@ static int bond_fill_info(struct sk_buff *skb,
if (!bond_3ad_get_active_agg_info(bond, &info)) {
struct nlattr *nest;
- nest = nla_nest_start(skb, IFLA_BOND_AD_INFO);
+ nest = nla_nest_start_noflag(skb, IFLA_BOND_AD_INFO);
if (!nest)
goto nla_put_failure;
@@ -711,7 +711,7 @@ static int bond_fill_linkxstats(struct sk_buff *skb,
return -EINVAL;
}
- nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BOND);
+ nest = nla_nest_start_noflag(skb, LINK_XSTATS_TYPE_BOND);
if (!nest)
return -EMSGSIZE;
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
@@ -722,7 +722,7 @@ static int bond_fill_linkxstats(struct sk_buff *skb,
else
stats = &BOND_AD_INFO(bond).stats;
- nest2 = nla_nest_start(skb, BOND_XSTATS_3AD);
+ nest2 = nla_nest_start_noflag(skb, BOND_XSTATS_3AD);
if (!nest2) {
nla_nest_end(skb, nest);
return -EMSGSIZE;
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 71bb3aebded4..c6c5ecdbcaef 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -41,7 +41,7 @@ config NET_DSA_MT7530
config NET_DSA_MV88E6060
tristate "Marvell 88E6060 ethernet switch chip support"
- depends on NET_DSA && NET_DSA_LEGACY
+ depends on NET_DSA
select NET_DSA_TAG_TRAILER
---help---
This enables support for the Marvell 88E6060 ethernet switch
@@ -51,6 +51,8 @@ source "drivers/net/dsa/microchip/Kconfig"
source "drivers/net/dsa/mv88e6xxx/Kconfig"
+source "drivers/net/dsa/sja1105/Kconfig"
+
config NET_DSA_QCA8K
tristate "Qualcomm Atheros QCA8K Ethernet switch family support"
depends on NET_DSA
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 82e5d794c41f..fefb6aaa82ba 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -18,3 +18,4 @@ obj-$(CONFIG_NET_DSA_VITESSE_VSC73XX) += vitesse-vsc73xx.o
obj-y += b53/
obj-y += microchip/
obj-y += mv88e6xxx/
+obj-y += sja1105/
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 0852e5e08177..c8040ecf4425 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -428,7 +428,6 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable,
b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
dev->vlan_enabled = enable;
- dev->vlan_filtering_enabled = enable_filtering;
}
static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
@@ -665,7 +664,7 @@ int b53_configure_vlan(struct dsa_switch *ds)
b53_do_vlan_op(dev, VTA_CMD_CLEAR);
}
- b53_enable_vlan(dev, false, dev->vlan_filtering_enabled);
+ b53_enable_vlan(dev, false, ds->vlan_filtering);
b53_for_each_port(dev, i)
b53_write16(dev, B53_VLAN_PAGE,
@@ -966,6 +965,13 @@ static int b53_setup(struct dsa_switch *ds)
b53_disable_port(ds, port);
}
+ /* Let DSA handle the case were multiple bridges span the same switch
+ * device and different VLAN awareness settings are requested, which
+ * would be breaking filtering semantics for any of the other bridge
+ * devices. (not hardware supported)
+ */
+ ds->vlan_filtering_is_global = true;
+
return ret;
}
@@ -1275,35 +1281,17 @@ EXPORT_SYMBOL(b53_phylink_mac_link_up);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
{
struct b53_device *dev = ds->priv;
- struct net_device *bridge_dev;
- unsigned int i;
u16 pvid, new_pvid;
- /* Handle the case were multiple bridges span the same switch device
- * and one of them has a different setting than what is being requested
- * which would be breaking filtering semantics for any of the other
- * bridge devices.
- */
- b53_for_each_port(dev, i) {
- bridge_dev = dsa_to_port(ds, i)->bridge_dev;
- if (bridge_dev &&
- bridge_dev != dsa_to_port(ds, port)->bridge_dev &&
- br_vlan_enabled(bridge_dev) != vlan_filtering) {
- netdev_err(bridge_dev,
- "VLAN filtering is global to the switch!\n");
- return -EINVAL;
- }
- }
-
b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
new_pvid = pvid;
- if (dev->vlan_filtering_enabled && !vlan_filtering) {
+ if (!vlan_filtering) {
/* Filtering is currently enabled, use the default PVID since
* the bridge does not expect tagging anymore
*/
dev->ports[port].pvid = pvid;
new_pvid = b53_default_pvid(dev);
- } else if (!dev->vlan_filtering_enabled && vlan_filtering) {
+ } else {
/* Filtering is currently disabled, restore the previous PVID */
new_pvid = dev->ports[port].pvid;
}
@@ -1329,7 +1317,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
if (vlan->vid_end > dev->num_vlans)
return -ERANGE;
- b53_enable_vlan(dev, true, dev->vlan_filtering_enabled);
+ b53_enable_vlan(dev, true, ds->vlan_filtering);
return 0;
}
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index e3441dcf2d21..f25bc80c4ffc 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -139,7 +139,6 @@ struct b53_device {
unsigned int num_vlans;
struct b53_vlan *vlans;
bool vlan_enabled;
- bool vlan_filtering_enabled;
unsigned int num_ports;
struct b53_port *ports;
};
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index c8e3f05e1d72..4ccb3239f5f7 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1188,10 +1188,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
if (ret)
goto out_mdio;
- pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
- priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
- priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
- priv->core, priv->irq0, priv->irq1);
+ dev_info(&pdev->dev,
+ "Starfighter 2 top: %x.%02x, core: %x.%02x, IRQs: %d, %d\n",
+ priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
+ priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
+ priv->irq0, priv->irq1);
return 0;
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index d8328866908c..553831df58fe 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -4,7 +4,25 @@
*
* Copyright (C) 2010 Lantiq Deutschland
* Copyright (C) 2012 John Crispin <john@phrozen.org>
- * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (C) 2017 - 2019 Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * The VLAN and bridge model the GSWIP hardware uses does not directly
+ * matches the model DSA uses.
+ *
+ * The hardware has 64 possible table entries for bridges with one VLAN
+ * ID, one flow id and a list of ports for each bridge. All entries which
+ * match the same flow ID are combined in the mac learning table, they
+ * act as one global bridge.
+ * The hardware does not support VLAN filter on the port, but on the
+ * bridge, this driver converts the DSA model to the hardware.
+ *
+ * The CPU gets all the exception frames which do not match any forwarding
+ * rule and the CPU port is also added to all bridges. This makes it possible
+ * to handle all the special cases easily in software.
+ * At the initialization the driver allocates one bridge table entry for
+ * each switch port which is used when the port is used without an
+ * explicit bridge. This prevents the frames from being forwarded
+ * between all LAN ports by default.
*/
#include <linux/clk.h>
@@ -148,19 +166,29 @@
#define GSWIP_PCE_PMAP2 0x454 /* Default Multicast port map */
#define GSWIP_PCE_PMAP3 0x455 /* Default Unknown Unicast port map */
#define GSWIP_PCE_GCTRL_0 0x456
+#define GSWIP_PCE_GCTRL_0_MTFL BIT(0) /* MAC Table Flushing */
#define GSWIP_PCE_GCTRL_0_MC_VALID BIT(3)
#define GSWIP_PCE_GCTRL_0_VLAN BIT(14) /* VLAN aware Switching */
#define GSWIP_PCE_GCTRL_1 0x457
#define GSWIP_PCE_GCTRL_1_MAC_GLOCK BIT(2) /* MAC Address table lock */
#define GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD BIT(3) /* Mac address table lock forwarding mode */
#define GSWIP_PCE_PCTRL_0p(p) (0x480 + ((p) * 0xA))
-#define GSWIP_PCE_PCTRL_0_INGRESS BIT(11)
+#define GSWIP_PCE_PCTRL_0_TVM BIT(5) /* Transparent VLAN mode */
+#define GSWIP_PCE_PCTRL_0_VREP BIT(6) /* VLAN Replace Mode */
+#define GSWIP_PCE_PCTRL_0_INGRESS BIT(11) /* Accept special tag in ingress */
#define GSWIP_PCE_PCTRL_0_PSTATE_LISTEN 0x0
#define GSWIP_PCE_PCTRL_0_PSTATE_RX 0x1
#define GSWIP_PCE_PCTRL_0_PSTATE_TX 0x2
#define GSWIP_PCE_PCTRL_0_PSTATE_LEARNING 0x3
#define GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING 0x7
#define GSWIP_PCE_PCTRL_0_PSTATE_MASK GENMASK(2, 0)
+#define GSWIP_PCE_VCTRL(p) (0x485 + ((p) * 0xA))
+#define GSWIP_PCE_VCTRL_UVR BIT(0) /* Unknown VLAN Rule */
+#define GSWIP_PCE_VCTRL_VIMR BIT(3) /* VLAN Ingress Member violation rule */
+#define GSWIP_PCE_VCTRL_VEMR BIT(4) /* VLAN Egress Member violation rule */
+#define GSWIP_PCE_VCTRL_VSR BIT(5) /* VLAN Security */
+#define GSWIP_PCE_VCTRL_VID0 BIT(6) /* Priority Tagged Rule */
+#define GSWIP_PCE_DEFPVID(p) (0x486 + ((p) * 0xA))
#define GSWIP_MAC_FLEN 0x8C5
#define GSWIP_MAC_CTRL_2p(p) (0x905 + ((p) * 0xC))
@@ -183,6 +211,11 @@
#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
#define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */
+#define GSWIP_TABLE_ACTIVE_VLAN 0x01
+#define GSWIP_TABLE_VLAN_MAPPING 0x02
+#define GSWIP_TABLE_MAC_BRIDGE 0x0b
+#define GSWIP_TABLE_MAC_BRIDGE_STATIC 0x01 /* Static not, aging entry */
+
#define XRX200_GPHY_FW_ALIGN (16 * 1024)
struct gswip_hw_info {
@@ -202,6 +235,12 @@ struct gswip_gphy_fw {
char *fw_name;
};
+struct gswip_vlan {
+ struct net_device *bridge;
+ u16 vid;
+ u8 fid;
+};
+
struct gswip_priv {
__iomem void *gswip;
__iomem void *mdio;
@@ -211,8 +250,22 @@ struct gswip_priv {
struct dsa_switch *ds;
struct device *dev;
struct regmap *rcu_regmap;
+ struct gswip_vlan vlans[64];
int num_gphy_fw;
struct gswip_gphy_fw *gphy_fw;
+ u32 port_vlan_filter;
+};
+
+struct gswip_pce_table_entry {
+ u16 index; // PCE_TBL_ADDR.ADDR = pData->table_index
+ u16 table; // PCE_TBL_CTRL.ADDR = pData->table
+ u16 key[8];
+ u16 val[5];
+ u16 mask;
+ u8 gmap;
+ bool type;
+ bool valid;
+ bool key_mode;
};
struct gswip_rmon_cnt_desc {
@@ -447,10 +500,153 @@ static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
return of_mdiobus_register(ds->slave_mii_bus, mdio_np);
}
+static int gswip_pce_table_entry_read(struct gswip_priv *priv,
+ struct gswip_pce_table_entry *tbl)
+{
+ int i;
+ int err;
+ u16 crtl;
+ u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
+ GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ if (err)
+ return err;
+
+ gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
+ gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
+ tbl->table | addr_mode | GSWIP_PCE_TBL_CTRL_BAS,
+ GSWIP_PCE_TBL_CTRL);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
+ tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i));
+
+ for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
+ tbl->val[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_VAL(i));
+
+ tbl->mask = gswip_switch_r(priv, GSWIP_PCE_TBL_MASK);
+
+ crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
+
+ tbl->type = !!(crtl & GSWIP_PCE_TBL_CTRL_TYPE);
+ tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
+ tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
+
+ return 0;
+}
+
+static int gswip_pce_table_entry_write(struct gswip_priv *priv,
+ struct gswip_pce_table_entry *tbl)
+{
+ int i;
+ int err;
+ u16 crtl;
+ u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
+ GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+ if (err)
+ return err;
+
+ gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
+ gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
+ tbl->table | addr_mode,
+ GSWIP_PCE_TBL_CTRL);
+
+ for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
+ gswip_switch_w(priv, tbl->key[i], GSWIP_PCE_TBL_KEY(i));
+
+ for (i = 0; i < ARRAY_SIZE(tbl->val); i++)
+ gswip_switch_w(priv, tbl->val[i], GSWIP_PCE_TBL_VAL(i));
+
+ gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
+ GSWIP_PCE_TBL_CTRL_OPMOD_MASK,
+ tbl->table | addr_mode,
+ GSWIP_PCE_TBL_CTRL);
+
+ gswip_switch_w(priv, tbl->mask, GSWIP_PCE_TBL_MASK);
+
+ crtl = gswip_switch_r(priv, GSWIP_PCE_TBL_CTRL);
+ crtl &= ~(GSWIP_PCE_TBL_CTRL_TYPE | GSWIP_PCE_TBL_CTRL_VLD |
+ GSWIP_PCE_TBL_CTRL_GMAP_MASK);
+ if (tbl->type)
+ crtl |= GSWIP_PCE_TBL_CTRL_TYPE;
+ if (tbl->valid)
+ crtl |= GSWIP_PCE_TBL_CTRL_VLD;
+ crtl |= (tbl->gmap << 7) & GSWIP_PCE_TBL_CTRL_GMAP_MASK;
+ crtl |= GSWIP_PCE_TBL_CTRL_BAS;
+ gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL);
+
+ return gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+}
+
+/* Add the LAN port into a bridge with the CPU port by
+ * default. This prevents automatic forwarding of
+ * packages between the LAN ports when no explicit
+ * bridge is configured.
+ */
+static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
+{
+ struct gswip_pce_table_entry vlan_active = {0,};
+ struct gswip_pce_table_entry vlan_mapping = {0,};
+ unsigned int cpu_port = priv->hw_info->cpu_port;
+ unsigned int max_ports = priv->hw_info->max_ports;
+ int err;
+
+ if (port >= max_ports) {
+ dev_err(priv->dev, "single port for %i supported\n", port);
+ return -EIO;
+ }
+
+ vlan_active.index = port + 1;
+ vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
+ vlan_active.key[0] = 0; /* vid */
+ vlan_active.val[0] = port + 1 /* fid */;
+ vlan_active.valid = add;
+ err = gswip_pce_table_entry_write(priv, &vlan_active);
+ if (err) {
+ dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
+ return err;
+ }
+
+ if (!add)
+ return 0;
+
+ vlan_mapping.index = port + 1;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ vlan_mapping.val[0] = 0 /* vid */;
+ vlan_mapping.val[1] = BIT(port) | BIT(cpu_port);
+ vlan_mapping.val[2] = 0;
+ err = gswip_pce_table_entry_write(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
static int gswip_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phydev)
{
struct gswip_priv *priv = ds->priv;
+ int err;
+
+ if (!dsa_is_cpu_port(ds, port)) {
+ err = gswip_add_single_port_br(priv, port, true);
+ if (err)
+ return err;
+ }
/* RMON Counter Enable for port */
gswip_switch_w(priv, GSWIP_BM_PCFG_CNTEN, GSWIP_BM_PCFGp(port));
@@ -461,8 +657,6 @@ static int gswip_port_enable(struct dsa_switch *ds, int port,
GSWIP_FDMA_PCTRLp(port));
gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
GSWIP_SDMA_PCTRLp(port));
- gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
- GSWIP_PCE_PCTRL_0p(port));
if (!dsa_is_cpu_port(ds, port)) {
u32 macconf = GSWIP_MDIO_PHY_LINK_AUTO |
@@ -535,6 +729,39 @@ static int gswip_pce_load_microcode(struct gswip_priv *priv)
return 0;
}
+static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
+
+ /* Do not allow changing the VLAN filtering options while in bridge */
+ if (!!(priv->port_vlan_filter & BIT(port)) != vlan_filtering && bridge)
+ return -EIO;
+
+ if (vlan_filtering) {
+ /* Use port based VLAN tag */
+ gswip_switch_mask(priv,
+ GSWIP_PCE_VCTRL_VSR,
+ GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
+ GSWIP_PCE_VCTRL_VEMR,
+ GSWIP_PCE_VCTRL(port));
+ gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_TVM, 0,
+ GSWIP_PCE_PCTRL_0p(port));
+ } else {
+ /* Use port based VLAN tag */
+ gswip_switch_mask(priv,
+ GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
+ GSWIP_PCE_VCTRL_VEMR,
+ GSWIP_PCE_VCTRL_VSR,
+ GSWIP_PCE_VCTRL(port));
+ gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_TVM,
+ GSWIP_PCE_PCTRL_0p(port));
+ }
+
+ return 0;
+}
+
static int gswip_setup(struct dsa_switch *ds)
{
struct gswip_priv *priv = ds->priv;
@@ -547,8 +774,10 @@ static int gswip_setup(struct dsa_switch *ds)
gswip_switch_w(priv, 0, GSWIP_SWRES);
/* disable port fetch/store dma on all ports */
- for (i = 0; i < priv->hw_info->max_ports; i++)
+ for (i = 0; i < priv->hw_info->max_ports; i++) {
gswip_port_disable(ds, i);
+ gswip_port_vlan_filtering(ds, i, false);
+ }
/* enable Switch */
gswip_mdio_mask(priv, 0, GSWIP_MDIO_GLOB_ENABLE, GSWIP_MDIO_GLOB);
@@ -578,6 +807,10 @@ static int gswip_setup(struct dsa_switch *ds)
gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
GSWIP_FDMA_PCTRLp(cpu_port));
+ /* accept special tag in ingress direction */
+ gswip_switch_mask(priv, 0, GSWIP_PCE_PCTRL_0_INGRESS,
+ GSWIP_PCE_PCTRL_0p(cpu_port));
+
gswip_switch_mask(priv, 0, GSWIP_MAC_CTRL_2_MLEN,
GSWIP_MAC_CTRL_2p(cpu_port));
gswip_switch_w(priv, VLAN_ETH_FRAME_LEN + 8, GSWIP_MAC_FLEN);
@@ -587,10 +820,15 @@ static int gswip_setup(struct dsa_switch *ds)
/* VLAN aware Switching */
gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_VLAN, GSWIP_PCE_GCTRL_0);
- /* Mac Address Table Lock */
- gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_1_MAC_GLOCK |
- GSWIP_PCE_GCTRL_1_MAC_GLOCK_MOD,
- GSWIP_PCE_GCTRL_1);
+ /* Flush MAC Table */
+ gswip_switch_mask(priv, 0, GSWIP_PCE_GCTRL_0_MTFL, GSWIP_PCE_GCTRL_0);
+
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_GCTRL_0,
+ GSWIP_PCE_GCTRL_0_MTFL);
+ if (err) {
+ dev_err(priv->dev, "MAC flushing didn't finish\n");
+ return err;
+ }
gswip_port_enable(ds, cpu_port, NULL);
return 0;
@@ -602,6 +840,551 @@ static enum dsa_tag_protocol gswip_get_tag_protocol(struct dsa_switch *ds,
return DSA_TAG_PROTO_GSWIP;
}
+static int gswip_vlan_active_create(struct gswip_priv *priv,
+ struct net_device *bridge,
+ int fid, u16 vid)
+{
+ struct gswip_pce_table_entry vlan_active = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ int idx = -1;
+ int err;
+ int i;
+
+ /* Look for a free slot */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (!priv->vlans[i].bridge) {
+ idx = i;
+ break;
+ }
+ }
+
+ if (idx == -1)
+ return -ENOSPC;
+
+ if (fid == -1)
+ fid = idx;
+
+ vlan_active.index = idx;
+ vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
+ vlan_active.key[0] = vid;
+ vlan_active.val[0] = fid;
+ vlan_active.valid = true;
+
+ err = gswip_pce_table_entry_write(priv, &vlan_active);
+ if (err) {
+ dev_err(priv->dev, "failed to write active VLAN: %d\n", err);
+ return err;
+ }
+
+ priv->vlans[idx].bridge = bridge;
+ priv->vlans[idx].vid = vid;
+ priv->vlans[idx].fid = fid;
+
+ return idx;
+}
+
+static int gswip_vlan_active_remove(struct gswip_priv *priv, int idx)
+{
+ struct gswip_pce_table_entry vlan_active = {0,};
+ int err;
+
+ vlan_active.index = idx;
+ vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
+ vlan_active.valid = false;
+ err = gswip_pce_table_entry_write(priv, &vlan_active);
+ if (err)
+ dev_err(priv->dev, "failed to delete active VLAN: %d\n", err);
+ priv->vlans[idx].bridge = NULL;
+
+ return err;
+}
+
+static int gswip_vlan_add_unaware(struct gswip_priv *priv,
+ struct net_device *bridge, int port)
+{
+ struct gswip_pce_table_entry vlan_mapping = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ unsigned int cpu_port = priv->hw_info->cpu_port;
+ bool active_vlan_created = false;
+ int idx = -1;
+ int i;
+ int err;
+
+ /* Check if there is already a page for this bridge */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge) {
+ idx = i;
+ break;
+ }
+ }
+
+ /* If this bridge is not programmed yet, add a Active VLAN table
+ * entry in a free slot and prepare the VLAN mapping table entry.
+ */
+ if (idx == -1) {
+ idx = gswip_vlan_active_create(priv, bridge, -1, 0);
+ if (idx < 0)
+ return idx;
+ active_vlan_created = true;
+
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ /* VLAN ID byte, maps to the VLAN ID of vlan active table */
+ vlan_mapping.val[0] = 0;
+ } else {
+ /* Read the existing VLAN mapping entry from the switch */
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ err = gswip_pce_table_entry_read(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ /* Update the VLAN mapping entry and write it to the switch */
+ vlan_mapping.val[1] |= BIT(cpu_port);
+ vlan_mapping.val[1] |= BIT(port);
+ err = gswip_pce_table_entry_write(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
+ /* In case an Active VLAN was creaetd delete it again */
+ if (active_vlan_created)
+ gswip_vlan_active_remove(priv, idx);
+ return err;
+ }
+
+ gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
+ return 0;
+}
+
+static int gswip_vlan_add_aware(struct gswip_priv *priv,
+ struct net_device *bridge, int port,
+ u16 vid, bool untagged,
+ bool pvid)
+{
+ struct gswip_pce_table_entry vlan_mapping = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ unsigned int cpu_port = priv->hw_info->cpu_port;
+ bool active_vlan_created = false;
+ int idx = -1;
+ int fid = -1;
+ int i;
+ int err;
+
+ /* Check if there is already a page for this bridge */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge) {
+ if (fid != -1 && fid != priv->vlans[i].fid)
+ dev_err(priv->dev, "one bridge with multiple flow ids\n");
+ fid = priv->vlans[i].fid;
+ if (priv->vlans[i].vid == vid) {
+ idx = i;
+ break;
+ }
+ }
+ }
+
+ /* If this bridge is not programmed yet, add a Active VLAN table
+ * entry in a free slot and prepare the VLAN mapping table entry.
+ */
+ if (idx == -1) {
+ idx = gswip_vlan_active_create(priv, bridge, fid, vid);
+ if (idx < 0)
+ return idx;
+ active_vlan_created = true;
+
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ /* VLAN ID byte, maps to the VLAN ID of vlan active table */
+ vlan_mapping.val[0] = vid;
+ } else {
+ /* Read the existing VLAN mapping entry from the switch */
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ err = gswip_pce_table_entry_read(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to read VLAN mapping: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ vlan_mapping.val[0] = vid;
+ /* Update the VLAN mapping entry and write it to the switch */
+ vlan_mapping.val[1] |= BIT(cpu_port);
+ vlan_mapping.val[2] |= BIT(cpu_port);
+ vlan_mapping.val[1] |= BIT(port);
+ if (untagged)
+ vlan_mapping.val[2] &= ~BIT(port);
+ else
+ vlan_mapping.val[2] |= BIT(port);
+ err = gswip_pce_table_entry_write(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
+ /* In case an Active VLAN was creaetd delete it again */
+ if (active_vlan_created)
+ gswip_vlan_active_remove(priv, idx);
+ return err;
+ }
+
+ if (pvid)
+ gswip_switch_w(priv, idx, GSWIP_PCE_DEFPVID(port));
+
+ return 0;
+}
+
+static int gswip_vlan_remove(struct gswip_priv *priv,
+ struct net_device *bridge, int port,
+ u16 vid, bool pvid, bool vlan_aware)
+{
+ struct gswip_pce_table_entry vlan_mapping = {0,};
+ unsigned int max_ports = priv->hw_info->max_ports;
+ unsigned int cpu_port = priv->hw_info->cpu_port;
+ int idx = -1;
+ int i;
+ int err;
+
+ /* Check if there is already a page for this bridge */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge &&
+ (!vlan_aware || priv->vlans[i].vid == vid)) {
+ idx = i;
+ break;
+ }
+ }
+
+ if (idx == -1) {
+ dev_err(priv->dev, "bridge to leave does not exists\n");
+ return -ENOENT;
+ }
+
+ vlan_mapping.index = idx;
+ vlan_mapping.table = GSWIP_TABLE_VLAN_MAPPING;
+ err = gswip_pce_table_entry_read(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to read VLAN mapping: %d\n", err);
+ return err;
+ }
+
+ vlan_mapping.val[1] &= ~BIT(port);
+ vlan_mapping.val[2] &= ~BIT(port);
+ err = gswip_pce_table_entry_write(priv, &vlan_mapping);
+ if (err) {
+ dev_err(priv->dev, "failed to write VLAN mapping: %d\n", err);
+ return err;
+ }
+
+ /* In case all ports are removed from the bridge, remove the VLAN */
+ if ((vlan_mapping.val[1] & ~BIT(cpu_port)) == 0) {
+ err = gswip_vlan_active_remove(priv, idx);
+ if (err) {
+ dev_err(priv->dev, "failed to write active VLAN: %d\n",
+ err);
+ return err;
+ }
+ }
+
+ /* GSWIP 2.2 (GRX300) and later program here the VID directly. */
+ if (pvid)
+ gswip_switch_w(priv, 0, GSWIP_PCE_DEFPVID(port));
+
+ return 0;
+}
+
+static int gswip_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ struct gswip_priv *priv = ds->priv;
+ int err;
+
+ /* When the bridge uses VLAN filtering we have to configure VLAN
+ * specific bridges. No bridge is configured here.
+ */
+ if (!br_vlan_enabled(bridge)) {
+ err = gswip_vlan_add_unaware(priv, bridge, port);
+ if (err)
+ return err;
+ priv->port_vlan_filter &= ~BIT(port);
+ } else {
+ priv->port_vlan_filter |= BIT(port);
+ }
+ return gswip_add_single_port_br(priv, port, false);
+}
+
+static void gswip_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ struct gswip_priv *priv = ds->priv;
+
+ gswip_add_single_port_br(priv, port, true);
+
+ /* When the bridge uses VLAN filtering we have to configure VLAN
+ * specific bridges. No bridge is configured here.
+ */
+ if (!br_vlan_enabled(bridge))
+ gswip_vlan_remove(priv, bridge, port, 0, true, false);
+}
+
+static int gswip_port_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
+ unsigned int max_ports = priv->hw_info->max_ports;
+ u16 vid;
+ int i;
+ int pos = max_ports;
+
+ /* We only support VLAN filtering on bridges */
+ if (!dsa_is_cpu_port(ds, port) && !bridge)
+ return -EOPNOTSUPP;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ int idx = -1;
+
+ /* Check if there is already a page for this VLAN */
+ for (i = max_ports; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge &&
+ priv->vlans[i].vid == vid) {
+ idx = i;
+ break;
+ }
+ }
+
+ /* If this VLAN is not programmed yet, we have to reserve
+ * one entry in the VLAN table. Make sure we start at the
+ * next position round.
+ */
+ if (idx == -1) {
+ /* Look for a free slot */
+ for (; pos < ARRAY_SIZE(priv->vlans); pos++) {
+ if (!priv->vlans[pos].bridge) {
+ idx = pos;
+ pos++;
+ break;
+ }
+ }
+
+ if (idx == -1)
+ return -ENOSPC;
+ }
+ }
+
+ return 0;
+}
+
+static void gswip_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ u16 vid;
+
+ /* We have to receive all packets on the CPU port and should not
+ * do any VLAN filtering here. This is also called with bridge
+ * NULL and then we do not know for which bridge to configure
+ * this.
+ */
+ if (dsa_is_cpu_port(ds, port))
+ return;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
+ gswip_vlan_add_aware(priv, bridge, port, vid, untagged, pvid);
+}
+
+static int gswip_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ u16 vid;
+ int err;
+
+ /* We have to receive all packets on the CPU port and should not
+ * do any VLAN filtering here. This is also called with bridge
+ * NULL and then we do not know for which bridge to configure
+ * this.
+ */
+ if (dsa_is_cpu_port(ds, port))
+ return 0;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
+ err = gswip_vlan_remove(priv, bridge, port, vid, pvid, true);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void gswip_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct gswip_pce_table_entry mac_bridge = {0,};
+ int i;
+ int err;
+
+ for (i = 0; i < 2048; i++) {
+ mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
+ mac_bridge.index = i;
+
+ err = gswip_pce_table_entry_read(priv, &mac_bridge);
+ if (err) {
+ dev_err(priv->dev, "failed to read mac brigde: %d\n",
+ err);
+ return;
+ }
+
+ if (!mac_bridge.valid)
+ continue;
+
+ if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC)
+ continue;
+
+ if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) != port)
+ continue;
+
+ mac_bridge.valid = false;
+ err = gswip_pce_table_entry_write(priv, &mac_bridge);
+ if (err) {
+ dev_err(priv->dev, "failed to write mac brigde: %d\n",
+ err);
+ return;
+ }
+ }
+}
+
+static void gswip_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct gswip_priv *priv = ds->priv;
+ u32 stp_state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
+ GSWIP_SDMA_PCTRLp(port));
+ return;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LISTEN;
+ break;
+ case BR_STATE_LEARNING:
+ stp_state = GSWIP_PCE_PCTRL_0_PSTATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ stp_state = GSWIP_PCE_PCTRL_0_PSTATE_FORWARDING;
+ break;
+ default:
+ dev_err(priv->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
+ GSWIP_SDMA_PCTRLp(port));
+ gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_PSTATE_MASK, stp_state,
+ GSWIP_PCE_PCTRL_0p(port));
+}
+
+static int gswip_port_fdb(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid, bool add)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
+ struct gswip_pce_table_entry mac_bridge = {0,};
+ unsigned int cpu_port = priv->hw_info->cpu_port;
+ int fid = -1;
+ int i;
+ int err;
+
+ if (!bridge)
+ return -EINVAL;
+
+ for (i = cpu_port; i < ARRAY_SIZE(priv->vlans); i++) {
+ if (priv->vlans[i].bridge == bridge) {
+ fid = priv->vlans[i].fid;
+ break;
+ }
+ }
+
+ if (fid == -1) {
+ dev_err(priv->dev, "Port not part of a bridge\n");
+ return -EINVAL;
+ }
+
+ mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
+ mac_bridge.key_mode = true;
+ mac_bridge.key[0] = addr[5] | (addr[4] << 8);
+ mac_bridge.key[1] = addr[3] | (addr[2] << 8);
+ mac_bridge.key[2] = addr[1] | (addr[0] << 8);
+ mac_bridge.key[3] = fid;
+ mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */
+ mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_STATIC;
+ mac_bridge.valid = add;
+
+ err = gswip_pce_table_entry_write(priv, &mac_bridge);
+ if (err)
+ dev_err(priv->dev, "failed to write mac brigde: %d\n", err);
+
+ return err;
+}
+
+static int gswip_port_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ return gswip_port_fdb(ds, port, addr, vid, true);
+}
+
+static int gswip_port_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ return gswip_port_fdb(ds, port, addr, vid, false);
+}
+
+static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct gswip_priv *priv = ds->priv;
+ struct gswip_pce_table_entry mac_bridge = {0,};
+ unsigned char addr[6];
+ int i;
+ int err;
+
+ for (i = 0; i < 2048; i++) {
+ mac_bridge.table = GSWIP_TABLE_MAC_BRIDGE;
+ mac_bridge.index = i;
+
+ err = gswip_pce_table_entry_read(priv, &mac_bridge);
+ if (err) {
+ dev_err(priv->dev, "failed to write mac brigde: %d\n",
+ err);
+ return err;
+ }
+
+ if (!mac_bridge.valid)
+ continue;
+
+ addr[5] = mac_bridge.key[0] & 0xff;
+ addr[4] = (mac_bridge.key[0] >> 8) & 0xff;
+ addr[3] = mac_bridge.key[1] & 0xff;
+ addr[2] = (mac_bridge.key[1] >> 8) & 0xff;
+ addr[1] = mac_bridge.key[2] & 0xff;
+ addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
+ if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) {
+ if (mac_bridge.val[0] & BIT(port))
+ cb(addr, 0, true, data);
+ } else {
+ if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port)
+ cb(addr, 0, false, data);
+ }
+ }
+ return 0;
+}
+
static void gswip_phylink_validate(struct dsa_switch *ds, int port,
unsigned long *supported,
struct phylink_link_state *state)
@@ -809,6 +1592,17 @@ static const struct dsa_switch_ops gswip_switch_ops = {
.setup = gswip_setup,
.port_enable = gswip_port_enable,
.port_disable = gswip_port_disable,
+ .port_bridge_join = gswip_port_bridge_join,
+ .port_bridge_leave = gswip_port_bridge_leave,
+ .port_fast_age = gswip_port_fast_age,
+ .port_vlan_filtering = gswip_port_vlan_filtering,
+ .port_vlan_prepare = gswip_port_vlan_prepare,
+ .port_vlan_add = gswip_port_vlan_add,
+ .port_vlan_del = gswip_port_vlan_del,
+ .port_stp_state_set = gswip_port_stp_state_set,
+ .port_fdb_add = gswip_port_fdb_add,
+ .port_fdb_del = gswip_port_fdb_del,
+ .port_fdb_dump = gswip_port_fdb_dump,
.phylink_validate = gswip_phylink_validate,
.phylink_mac_config = gswip_phylink_mac_config,
.phylink_mac_link_down = gswip_phylink_mac_link_down,
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index f16e1d7d8615..c026d15721f6 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -1144,6 +1144,7 @@ static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
interface = PHY_INTERFACE_MODE_GMII;
if (gbit)
break;
+ /* fall through */
case 0:
interface = PHY_INTERFACE_MODE_MII;
break;
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 7357b4fc0185..8d531c5f21f3 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -828,11 +828,9 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
VLAN_ATTR(MT7530_VLAN_TRANSPARENT));
- priv->ports[port].vlan_filtering = false;
-
for (i = 0; i < MT7530_NUM_PORTS; i++) {
if (dsa_is_user_port(ds, i) &&
- priv->ports[i].vlan_filtering) {
+ dsa_port_is_vlan_filtering(&ds->ports[i])) {
all_user_ports_removed = false;
break;
}
@@ -891,8 +889,8 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
* And the other port's port matrix cannot be broken when the
* other port is still a VLAN-aware port.
*/
- if (!priv->ports[i].vlan_filtering &&
- dsa_is_user_port(ds, i) && i != port) {
+ if (dsa_is_user_port(ds, i) && i != port &&
+ !dsa_port_is_vlan_filtering(&ds->ports[i])) {
if (dsa_to_port(ds, i)->bridge_dev != bridge)
continue;
if (priv->ports[i].enable)
@@ -910,8 +908,6 @@ mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
PCR_MATRIX(BIT(MT7530_CPU_PORT)));
priv->ports[port].pm = PCR_MATRIX(BIT(MT7530_CPU_PORT));
- mt7530_port_set_vlan_unaware(ds, port);
-
mutex_unlock(&priv->reg_mutex);
}
@@ -1013,10 +1009,6 @@ static int
mt7530_port_vlan_filtering(struct dsa_switch *ds, int port,
bool vlan_filtering)
{
- struct mt7530_priv *priv = ds->priv;
-
- priv->ports[port].vlan_filtering = vlan_filtering;
-
if (vlan_filtering) {
/* The port is being kept as VLAN-unaware port when bridge is
* set up with vlan_filtering not being set, Otherwise, the
@@ -1025,6 +1017,8 @@ mt7530_port_vlan_filtering(struct dsa_switch *ds, int port,
*/
mt7530_port_set_vlan_aware(ds, port);
mt7530_port_set_vlan_aware(ds, MT7530_CPU_PORT);
+ } else {
+ mt7530_port_set_vlan_unaware(ds, port);
}
return 0;
@@ -1139,7 +1133,7 @@ mt7530_port_vlan_add(struct dsa_switch *ds, int port,
/* The port is kept as VLAN-unaware if bridge with vlan_filtering not
* being set.
*/
- if (!priv->ports[port].vlan_filtering)
+ if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
return;
mutex_lock(&priv->reg_mutex);
@@ -1170,7 +1164,7 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
/* The port is kept as VLAN-unaware if bridge with vlan_filtering not
* being set.
*/
- if (!priv->ports[port].vlan_filtering)
+ if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
return 0;
mutex_lock(&priv->reg_mutex);
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index a95ed958df5b..1eec7bdc283a 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -410,7 +410,6 @@ struct mt7530_port {
bool enable;
u32 pm;
u16 pvid;
- bool vlan_filtering;
};
/* struct mt7530_priv - This is the main data structure for holding the state
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 0b3e51f248c2..2a2489b5196d 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -1,11 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* net/dsa/mv88e6060.c - Driver for Marvell 88e6060 switch chips
* Copyright (c) 2008-2009 Marvell Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include <linux/delay.h>
@@ -18,40 +14,16 @@
#include <net/dsa.h>
#include "mv88e6060.h"
-static int reg_read(struct dsa_switch *ds, int addr, int reg)
+static int reg_read(struct mv88e6060_priv *priv, int addr, int reg)
{
- struct mv88e6060_priv *priv = ds->priv;
-
return mdiobus_read_nested(priv->bus, priv->sw_addr + addr, reg);
}
-#define REG_READ(addr, reg) \
- ({ \
- int __ret; \
- \
- __ret = reg_read(ds, addr, reg); \
- if (__ret < 0) \
- return __ret; \
- __ret; \
- })
-
-
-static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
+static int reg_write(struct mv88e6060_priv *priv, int addr, int reg, u16 val)
{
- struct mv88e6060_priv *priv = ds->priv;
-
return mdiobus_write_nested(priv->bus, priv->sw_addr + addr, reg, val);
}
-#define REG_WRITE(addr, reg, val) \
- ({ \
- int __ret; \
- \
- __ret = reg_write(ds, addr, reg, val); \
- if (__ret < 0) \
- return __ret; \
- })
-
static const char *mv88e6060_get_name(struct mii_bus *bus, int sw_addr)
{
int ret;
@@ -76,28 +48,7 @@ static enum dsa_tag_protocol mv88e6060_get_tag_protocol(struct dsa_switch *ds,
return DSA_TAG_PROTO_TRAILER;
}
-static const char *mv88e6060_drv_probe(struct device *dsa_dev,
- struct device *host_dev, int sw_addr,
- void **_priv)
-{
- struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
- struct mv88e6060_priv *priv;
- const char *name;
-
- name = mv88e6060_get_name(bus, sw_addr);
- if (name) {
- priv = devm_kzalloc(dsa_dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return NULL;
- *_priv = priv;
- priv->bus = bus;
- priv->sw_addr = sw_addr;
- }
-
- return name;
-}
-
-static int mv88e6060_switch_reset(struct dsa_switch *ds)
+static int mv88e6060_switch_reset(struct mv88e6060_priv *priv)
{
int i;
int ret;
@@ -105,23 +56,32 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
/* Set all ports to the disabled state. */
for (i = 0; i < MV88E6060_PORTS; i++) {
- ret = REG_READ(REG_PORT(i), PORT_CONTROL);
- REG_WRITE(REG_PORT(i), PORT_CONTROL,
- ret & ~PORT_CONTROL_STATE_MASK);
+ ret = reg_read(priv, REG_PORT(i), PORT_CONTROL);
+ if (ret < 0)
+ return ret;
+ ret = reg_write(priv, REG_PORT(i), PORT_CONTROL,
+ ret & ~PORT_CONTROL_STATE_MASK);
+ if (ret)
+ return ret;
}
/* Wait for transmit queues to drain. */
usleep_range(2000, 4000);
/* Reset the switch. */
- REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
- GLOBAL_ATU_CONTROL_SWRESET |
- GLOBAL_ATU_CONTROL_LEARNDIS);
+ ret = reg_write(priv, REG_GLOBAL, GLOBAL_ATU_CONTROL,
+ GLOBAL_ATU_CONTROL_SWRESET |
+ GLOBAL_ATU_CONTROL_LEARNDIS);
+ if (ret)
+ return ret;
/* Wait up to one second for reset to complete. */
timeout = jiffies + 1 * HZ;
while (time_before(jiffies, timeout)) {
- ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
+ ret = reg_read(priv, REG_GLOBAL, GLOBAL_STATUS);
+ if (ret < 0)
+ return ret;
+
if (ret & GLOBAL_STATUS_INIT_READY)
break;
@@ -133,61 +93,69 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
return 0;
}
-static int mv88e6060_setup_global(struct dsa_switch *ds)
+static int mv88e6060_setup_global(struct mv88e6060_priv *priv)
{
+ int ret;
+
/* Disable discarding of frames with excessive collisions,
* set the maximum frame size to 1536 bytes, and mask all
* interrupt sources.
*/
- REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536);
+ ret = reg_write(priv, REG_GLOBAL, GLOBAL_CONTROL,
+ GLOBAL_CONTROL_MAX_FRAME_1536);
+ if (ret)
+ return ret;
/* Disable automatic address learning.
*/
- REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
- GLOBAL_ATU_CONTROL_LEARNDIS);
-
- return 0;
+ return reg_write(priv, REG_GLOBAL, GLOBAL_ATU_CONTROL,
+ GLOBAL_ATU_CONTROL_LEARNDIS);
}
-static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
+static int mv88e6060_setup_port(struct mv88e6060_priv *priv, int p)
{
int addr = REG_PORT(p);
+ int ret;
/* Do not force flow control, disable Ingress and Egress
* Header tagging, disable VLAN tunneling, and set the port
* state to Forwarding. Additionally, if this is the CPU
* port, enable Ingress and Egress Trailer tagging mode.
*/
- REG_WRITE(addr, PORT_CONTROL,
- dsa_is_cpu_port(ds, p) ?
+ ret = reg_write(priv, addr, PORT_CONTROL,
+ dsa_is_cpu_port(priv->ds, p) ?
PORT_CONTROL_TRAILER |
PORT_CONTROL_INGRESS_MODE |
PORT_CONTROL_STATE_FORWARDING :
PORT_CONTROL_STATE_FORWARDING);
+ if (ret)
+ return ret;
/* Port based VLAN map: give each port its own address
* database, allow the CPU port to talk to each of the 'real'
* ports, and allow each of the 'real' ports to only talk to
* the CPU port.
*/
- REG_WRITE(addr, PORT_VLAN_MAP,
- ((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) |
- (dsa_is_cpu_port(ds, p) ? dsa_user_ports(ds) :
- BIT(dsa_to_port(ds, p)->cpu_dp->index)));
+ ret = reg_write(priv, addr, PORT_VLAN_MAP,
+ ((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) |
+ (dsa_is_cpu_port(priv->ds, p) ?
+ dsa_user_ports(priv->ds) :
+ BIT(dsa_to_port(priv->ds, p)->cpu_dp->index)));
+ if (ret)
+ return ret;
/* Port Association Vector: when learning source addresses
* of packets, add the address to the address database using
* a port bitmap that has only the bit for this port set and
* the other bits clear.
*/
- REG_WRITE(addr, PORT_ASSOC_VECTOR, BIT(p));
-
- return 0;
+ return reg_write(priv, addr, PORT_ASSOC_VECTOR, BIT(p));
}
-static int mv88e6060_setup_addr(struct dsa_switch *ds)
+static int mv88e6060_setup_addr(struct mv88e6060_priv *priv)
{
u8 addr[ETH_ALEN];
+ int ret;
u16 val;
eth_random_addr(addr);
@@ -199,34 +167,43 @@ static int mv88e6060_setup_addr(struct dsa_switch *ds)
*/
val &= 0xfeff;
- REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, val);
- REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
- REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
+ ret = reg_write(priv, REG_GLOBAL, GLOBAL_MAC_01, val);
+ if (ret)
+ return ret;
+
+ ret = reg_write(priv, REG_GLOBAL, GLOBAL_MAC_23,
+ (addr[2] << 8) | addr[3]);
+ if (ret)
+ return ret;
- return 0;
+ return reg_write(priv, REG_GLOBAL, GLOBAL_MAC_45,
+ (addr[4] << 8) | addr[5]);
}
static int mv88e6060_setup(struct dsa_switch *ds)
{
+ struct mv88e6060_priv *priv = ds->priv;
int ret;
int i;
- ret = mv88e6060_switch_reset(ds);
+ priv->ds = ds;
+
+ ret = mv88e6060_switch_reset(priv);
if (ret < 0)
return ret;
/* @@@ initialise atu */
- ret = mv88e6060_setup_global(ds);
+ ret = mv88e6060_setup_global(priv);
if (ret < 0)
return ret;
- ret = mv88e6060_setup_addr(ds);
+ ret = mv88e6060_setup_addr(priv);
if (ret < 0)
return ret;
for (i = 0; i < MV88E6060_PORTS; i++) {
- ret = mv88e6060_setup_port(ds, i);
+ ret = mv88e6060_setup_port(priv, i);
if (ret < 0)
return ret;
}
@@ -243,51 +220,93 @@ static int mv88e6060_port_to_phy_addr(int port)
static int mv88e6060_phy_read(struct dsa_switch *ds, int port, int regnum)
{
+ struct mv88e6060_priv *priv = ds->priv;
int addr;
addr = mv88e6060_port_to_phy_addr(port);
if (addr == -1)
return 0xffff;
- return reg_read(ds, addr, regnum);
+ return reg_read(priv, addr, regnum);
}
static int
mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
{
+ struct mv88e6060_priv *priv = ds->priv;
int addr;
addr = mv88e6060_port_to_phy_addr(port);
if (addr == -1)
return 0xffff;
- return reg_write(ds, addr, regnum, val);
+ return reg_write(priv, addr, regnum, val);
}
static const struct dsa_switch_ops mv88e6060_switch_ops = {
.get_tag_protocol = mv88e6060_get_tag_protocol,
- .probe = mv88e6060_drv_probe,
.setup = mv88e6060_setup,
.phy_read = mv88e6060_phy_read,
.phy_write = mv88e6060_phy_write,
};
-static struct dsa_switch_driver mv88e6060_switch_drv = {
- .ops = &mv88e6060_switch_ops,
-};
-
-static int __init mv88e6060_init(void)
+static int mv88e6060_probe(struct mdio_device *mdiodev)
{
- register_switch_driver(&mv88e6060_switch_drv);
- return 0;
+ struct device *dev = &mdiodev->dev;
+ struct mv88e6060_priv *priv;
+ struct dsa_switch *ds;
+ const char *name;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->bus = mdiodev->bus;
+ priv->sw_addr = mdiodev->addr;
+
+ name = mv88e6060_get_name(priv->bus, priv->sw_addr);
+ if (!name)
+ return -ENODEV;
+
+ dev_info(dev, "switch %s detected\n", name);
+
+ ds = dsa_switch_alloc(dev, MV88E6060_PORTS);
+ if (!ds)
+ return -ENOMEM;
+
+ ds->priv = priv;
+ ds->dev = dev;
+ ds->ops = &mv88e6060_switch_ops;
+
+ dev_set_drvdata(dev, ds);
+
+ return dsa_register_switch(ds);
}
-module_init(mv88e6060_init);
-static void __exit mv88e6060_cleanup(void)
+static void mv88e6060_remove(struct mdio_device *mdiodev)
{
- unregister_switch_driver(&mv88e6060_switch_drv);
+ struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+
+ dsa_unregister_switch(ds);
}
-module_exit(mv88e6060_cleanup);
+
+static const struct of_device_id mv88e6060_of_match[] = {
+ {
+ .compatible = "marvell,mv88e6060",
+ },
+ { /* sentinel */ },
+};
+
+static struct mdio_driver mv88e6060_driver = {
+ .probe = mv88e6060_probe,
+ .remove = mv88e6060_remove,
+ .mdiodrv.driver = {
+ .name = "mv88e6060",
+ .of_match_table = mv88e6060_of_match,
+ },
+};
+
+mdio_module_driver(mv88e6060_driver);
MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
MODULE_DESCRIPTION("Driver for Marvell 88E6060 ethernet switch chip");
diff --git a/drivers/net/dsa/mv88e6060.h b/drivers/net/dsa/mv88e6060.h
index 10249bd16292..c0e7a0f2fb6a 100644
--- a/drivers/net/dsa/mv88e6060.h
+++ b/drivers/net/dsa/mv88e6060.h
@@ -117,6 +117,7 @@ struct mv88e6060_priv {
*/
struct mii_bus *bus;
int sw_addr;
+ struct dsa_switch *ds;
};
#endif
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index 50de304abe2f..e85755dde90b 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -12,3 +12,4 @@ mv88e6xxx-objs += phy.o
mv88e6xxx-objs += port.o
mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += ptp.o
mv88e6xxx-objs += serdes.o
+mv88e6xxx-objs += smi.o
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index f4e2db44ad91..28414db979b0 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -43,6 +43,7 @@
#include "port.h"
#include "ptp.h"
#include "serdes.h"
+#include "smi.h"
static void assert_reg_lock(struct mv88e6xxx_chip *chip)
{
@@ -52,149 +53,6 @@ static void assert_reg_lock(struct mv88e6xxx_chip *chip)
}
}
-/* The switch ADDR[4:1] configuration pins define the chip SMI device address
- * (ADDR[0] is always zero, thus only even SMI addresses can be strapped).
- *
- * When ADDR is all zero, the chip uses Single-chip Addressing Mode, assuming it
- * is the only device connected to the SMI master. In this mode it responds to
- * all 32 possible SMI addresses, and thus maps directly the internal devices.
- *
- * When ADDR is non-zero, the chip uses Multi-chip Addressing Mode, allowing
- * multiple devices to share the SMI interface. In this mode it responds to only
- * 2 registers, used to indirectly access the internal SMI devices.
- */
-
-static int mv88e6xxx_smi_read(struct mv88e6xxx_chip *chip,
- int addr, int reg, u16 *val)
-{
- if (!chip->smi_ops)
- return -EOPNOTSUPP;
-
- return chip->smi_ops->read(chip, addr, reg, val);
-}
-
-static int mv88e6xxx_smi_write(struct mv88e6xxx_chip *chip,
- int addr, int reg, u16 val)
-{
- if (!chip->smi_ops)
- return -EOPNOTSUPP;
-
- return chip->smi_ops->write(chip, addr, reg, val);
-}
-
-static int mv88e6xxx_smi_single_chip_read(struct mv88e6xxx_chip *chip,
- int addr, int reg, u16 *val)
-{
- int ret;
-
- ret = mdiobus_read_nested(chip->bus, addr, reg);
- if (ret < 0)
- return ret;
-
- *val = ret & 0xffff;
-
- return 0;
-}
-
-static int mv88e6xxx_smi_single_chip_write(struct mv88e6xxx_chip *chip,
- int addr, int reg, u16 val)
-{
- int ret;
-
- ret = mdiobus_write_nested(chip->bus, addr, reg, val);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_single_chip_ops = {
- .read = mv88e6xxx_smi_single_chip_read,
- .write = mv88e6xxx_smi_single_chip_write,
-};
-
-static int mv88e6xxx_smi_multi_chip_wait(struct mv88e6xxx_chip *chip)
-{
- int ret;
- int i;
-
- for (i = 0; i < 16; i++) {
- ret = mdiobus_read_nested(chip->bus, chip->sw_addr, SMI_CMD);
- if (ret < 0)
- return ret;
-
- if ((ret & SMI_CMD_BUSY) == 0)
- return 0;
- }
-
- return -ETIMEDOUT;
-}
-
-static int mv88e6xxx_smi_multi_chip_read(struct mv88e6xxx_chip *chip,
- int addr, int reg, u16 *val)
-{
- int ret;
-
- /* Wait for the bus to become free. */
- ret = mv88e6xxx_smi_multi_chip_wait(chip);
- if (ret < 0)
- return ret;
-
- /* Transmit the read command. */
- ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_CMD,
- SMI_CMD_OP_22_READ | (addr << 5) | reg);
- if (ret < 0)
- return ret;
-
- /* Wait for the read command to complete. */
- ret = mv88e6xxx_smi_multi_chip_wait(chip);
- if (ret < 0)
- return ret;
-
- /* Read the data. */
- ret = mdiobus_read_nested(chip->bus, chip->sw_addr, SMI_DATA);
- if (ret < 0)
- return ret;
-
- *val = ret & 0xffff;
-
- return 0;
-}
-
-static int mv88e6xxx_smi_multi_chip_write(struct mv88e6xxx_chip *chip,
- int addr, int reg, u16 val)
-{
- int ret;
-
- /* Wait for the bus to become free. */
- ret = mv88e6xxx_smi_multi_chip_wait(chip);
- if (ret < 0)
- return ret;
-
- /* Transmit the data to write. */
- ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_DATA, val);
- if (ret < 0)
- return ret;
-
- /* Transmit the write command. */
- ret = mdiobus_write_nested(chip->bus, chip->sw_addr, SMI_CMD,
- SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
- if (ret < 0)
- return ret;
-
- /* Wait for the write command to complete. */
- ret = mv88e6xxx_smi_multi_chip_wait(chip);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
-static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_multi_chip_ops = {
- .read = mv88e6xxx_smi_multi_chip_read,
- .write = mv88e6xxx_smi_multi_chip_write,
-};
-
int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val)
{
int err;
@@ -553,11 +411,28 @@ int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link,
int speed, int duplex, int pause,
phy_interface_t mode)
{
+ struct phylink_link_state state;
int err;
if (!chip->info->ops->port_set_link)
return 0;
+ if (!chip->info->ops->port_link_state)
+ return 0;
+
+ err = chip->info->ops->port_link_state(chip, port, &state);
+ if (err)
+ return err;
+
+ /* Has anything actually changed? We don't expect the
+ * interface mode to change without one of the other
+ * parameters also changing
+ */
+ if (state.link == link &&
+ state.speed == speed &&
+ state.duplex == duplex)
+ return 0;
+
/* Port's MAC control must not be changed unless the link is down */
err = chip->info->ops->port_set_link(chip, port, 0);
if (err)
@@ -2411,6 +2286,9 @@ static void mv88e6xxx_port_disable(struct dsa_switch *ds, int port)
mutex_lock(&chip->reg_lock);
+ if (mv88e6xxx_port_set_state(chip, port, BR_STATE_DISABLED))
+ dev_err(chip->dev, "failed to disable port\n");
+
if (chip->info->ops->serdes_irq_free)
chip->info->ops->serdes_irq_free(chip, port);
@@ -2579,8 +2457,18 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
/* Setup Switch Port Registers */
for (i = 0; i < mv88e6xxx_num_ports(chip); i++) {
- if (dsa_is_unused_port(ds, i))
+ if (dsa_is_unused_port(ds, i)) {
+ err = mv88e6xxx_port_set_state(chip, i,
+ BR_STATE_DISABLED);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_serdes_power(chip, i, false);
+ if (err)
+ goto unlock;
+
continue;
+ }
err = mv88e6xxx_setup_port(chip, i);
if (err)
@@ -4615,30 +4503,6 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
return chip;
}
-static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus, int sw_addr)
-{
- if (sw_addr == 0)
- chip->smi_ops = &mv88e6xxx_smi_single_chip_ops;
- else if (chip->info->multi_chip)
- chip->smi_ops = &mv88e6xxx_smi_multi_chip_ops;
- else
- return -EINVAL;
-
- chip->bus = bus;
- chip->sw_addr = sw_addr;
-
- return 0;
-}
-
-static void mv88e6xxx_ports_cmode_init(struct mv88e6xxx_chip *chip)
-{
- int i;
-
- for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
- chip->ports[i].cmode = MV88E6XXX_PORT_STS_CMODE_INVALID;
-}
-
static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
int port)
{
@@ -4647,58 +4511,6 @@ static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
return chip->info->tag_protocol;
}
-#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
-static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
- struct device *host_dev, int sw_addr,
- void **priv)
-{
- struct mv88e6xxx_chip *chip;
- struct mii_bus *bus;
- int err;
-
- bus = dsa_host_dev_to_mii_bus(host_dev);
- if (!bus)
- return NULL;
-
- chip = mv88e6xxx_alloc_chip(dsa_dev);
- if (!chip)
- return NULL;
-
- /* Legacy SMI probing will only support chips similar to 88E6085 */
- chip->info = &mv88e6xxx_table[MV88E6085];
-
- err = mv88e6xxx_smi_init(chip, bus, sw_addr);
- if (err)
- goto free;
-
- err = mv88e6xxx_detect(chip);
- if (err)
- goto free;
-
- mv88e6xxx_ports_cmode_init(chip);
-
- mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_switch_reset(chip);
- mutex_unlock(&chip->reg_lock);
- if (err)
- goto free;
-
- mv88e6xxx_phy_init(chip);
-
- err = mv88e6xxx_mdios_register(chip, NULL);
- if (err)
- goto free;
-
- *priv = chip;
-
- return chip->info->name;
-free:
- devm_kfree(dsa_dev, chip);
-
- return NULL;
-}
-#endif
-
static int mv88e6xxx_port_mdb_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb)
{
@@ -4753,9 +4565,6 @@ static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port,
}
static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
-#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
- .probe = mv88e6xxx_drv_probe,
-#endif
.get_tag_protocol = mv88e6xxx_get_tag_protocol,
.setup = mv88e6xxx_setup,
.adjust_link = mv88e6xxx_adjust_link,
@@ -4801,10 +4610,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_ts_info = mv88e6xxx_get_ts_info,
};
-static struct dsa_switch_driver mv88e6xxx_switch_drv = {
- .ops = &mv88e6xxx_switch_ops,
-};
-
static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
{
struct device *dev = chip->dev;
@@ -4915,7 +4720,6 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
if (err)
goto out;
- mv88e6xxx_ports_cmode_init(chip);
mv88e6xxx_phy_init(chip);
if (chip->info->ops->get_eeprom) {
@@ -4932,12 +4736,17 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
if (err)
goto out;
- chip->irq = of_irq_get(np, 0);
- if (chip->irq == -EPROBE_DEFER) {
- err = chip->irq;
- goto out;
+ if (np) {
+ chip->irq = of_irq_get(np, 0);
+ if (chip->irq == -EPROBE_DEFER) {
+ err = chip->irq;
+ goto out;
+ }
}
+ if (pdata)
+ chip->irq = pdata->irq;
+
/* Has to be performed before the MDIO bus is created, because
* the PHYs will link their interrupts to these interrupt
* controllers
@@ -5047,19 +4856,7 @@ static struct mdio_driver mv88e6xxx_driver = {
},
};
-static int __init mv88e6xxx_init(void)
-{
- register_switch_driver(&mv88e6xxx_switch_drv);
- return mdio_driver_register(&mv88e6xxx_driver);
-}
-module_init(mv88e6xxx_init);
-
-static void __exit mv88e6xxx_cleanup(void)
-{
- mdio_driver_unregister(&mv88e6xxx_driver);
- unregister_switch_driver(&mv88e6xxx_switch_drv);
-}
-module_exit(mv88e6xxx_cleanup);
+mdio_module_driver(mv88e6xxx_driver);
MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 19c07dff0440..faa3fa889f19 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -21,17 +21,6 @@
#include <linux/timecounter.h>
#include <net/dsa.h>
-#define SMI_CMD 0x00
-#define SMI_CMD_BUSY BIT(15)
-#define SMI_CMD_CLAUSE_22 BIT(12)
-#define SMI_CMD_OP_22_WRITE ((1 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22)
-#define SMI_CMD_OP_22_READ ((2 << 10) | SMI_CMD_BUSY | SMI_CMD_CLAUSE_22)
-#define SMI_CMD_OP_45_WRITE_ADDR ((0 << 10) | SMI_CMD_BUSY)
-#define SMI_CMD_OP_45_WRITE_DATA ((1 << 10) | SMI_CMD_BUSY)
-#define SMI_CMD_OP_45_READ_DATA ((2 << 10) | SMI_CMD_BUSY)
-#define SMI_CMD_OP_45_READ_DATA_INC ((3 << 10) | SMI_CMD_BUSY)
-#define SMI_DATA 0x01
-
#define MV88E6XXX_N_FID 4096
/* PVT limits for 4-bit port and 5-bit switch */
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index c7bed263a0f4..39c85e98fb92 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -52,7 +52,6 @@
#define MV88E6185_PORT_STS_CMODE_1000BASE_X 0x0005
#define MV88E6185_PORT_STS_CMODE_PHY 0x0006
#define MV88E6185_PORT_STS_CMODE_DISABLED 0x0007
-#define MV88E6XXX_PORT_STS_CMODE_INVALID 0xff
/* Offset 0x01: MAC (or PCS or Physical) Control Register */
#define MV88E6XXX_PORT_MAC_CTL 0x01
diff --git a/drivers/net/dsa/mv88e6xxx/smi.c b/drivers/net/dsa/mv88e6xxx/smi.c
new file mode 100644
index 000000000000..96f7d2685bdc
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/smi.c
@@ -0,0 +1,158 @@
+/*
+ * Marvell 88E6xxx System Management Interface (SMI) support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2019 Vivien Didelot <vivien.didelot@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "chip.h"
+#include "smi.h"
+
+/* The switch ADDR[4:1] configuration pins define the chip SMI device address
+ * (ADDR[0] is always zero, thus only even SMI addresses can be strapped).
+ *
+ * When ADDR is all zero, the chip uses Single-chip Addressing Mode, assuming it
+ * is the only device connected to the SMI master. In this mode it responds to
+ * all 32 possible SMI addresses, and thus maps directly the internal devices.
+ *
+ * When ADDR is non-zero, the chip uses Multi-chip Addressing Mode, allowing
+ * multiple devices to share the SMI interface. In this mode it responds to only
+ * 2 registers, used to indirectly access the internal SMI devices.
+ */
+
+static int mv88e6xxx_smi_direct_read(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 *data)
+{
+ int ret;
+
+ ret = mdiobus_read_nested(chip->bus, dev, reg);
+ if (ret < 0)
+ return ret;
+
+ *data = ret & 0xffff;
+
+ return 0;
+}
+
+static int mv88e6xxx_smi_direct_write(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 data)
+{
+ int ret;
+
+ ret = mdiobus_write_nested(chip->bus, dev, reg, data);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int mv88e6xxx_smi_direct_wait(struct mv88e6xxx_chip *chip,
+ int dev, int reg, int bit, int val)
+{
+ u16 data;
+ int err;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ err = mv88e6xxx_smi_direct_read(chip, dev, reg, &data);
+ if (err)
+ return err;
+
+ if (!!(data >> bit) == !!val)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_direct_ops = {
+ .read = mv88e6xxx_smi_direct_read,
+ .write = mv88e6xxx_smi_direct_write,
+};
+
+/* Offset 0x00: SMI Command Register
+ * Offset 0x01: SMI Data Register
+ */
+
+static int mv88e6xxx_smi_indirect_read(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 *data)
+{
+ int err;
+
+ err = mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
+ MV88E6XXX_SMI_CMD, 15, 0);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr,
+ MV88E6XXX_SMI_CMD,
+ MV88E6XXX_SMI_CMD_BUSY |
+ MV88E6XXX_SMI_CMD_MODE_22 |
+ MV88E6XXX_SMI_CMD_OP_22_READ |
+ (dev << 5) | reg);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
+ MV88E6XXX_SMI_CMD, 15, 0);
+ if (err)
+ return err;
+
+ return mv88e6xxx_smi_direct_read(chip, chip->sw_addr,
+ MV88E6XXX_SMI_DATA, data);
+}
+
+static int mv88e6xxx_smi_indirect_write(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 data)
+{
+ int err;
+
+ err = mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
+ MV88E6XXX_SMI_CMD, 15, 0);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr,
+ MV88E6XXX_SMI_DATA, data);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_smi_direct_write(chip, chip->sw_addr,
+ MV88E6XXX_SMI_CMD,
+ MV88E6XXX_SMI_CMD_BUSY |
+ MV88E6XXX_SMI_CMD_MODE_22 |
+ MV88E6XXX_SMI_CMD_OP_22_WRITE |
+ (dev << 5) | reg);
+ if (err)
+ return err;
+
+ return mv88e6xxx_smi_direct_wait(chip, chip->sw_addr,
+ MV88E6XXX_SMI_CMD, 15, 0);
+}
+
+static const struct mv88e6xxx_bus_ops mv88e6xxx_smi_indirect_ops = {
+ .read = mv88e6xxx_smi_indirect_read,
+ .write = mv88e6xxx_smi_indirect_write,
+};
+
+int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus, int sw_addr)
+{
+ if (sw_addr == 0)
+ chip->smi_ops = &mv88e6xxx_smi_direct_ops;
+ else if (chip->info->multi_chip)
+ chip->smi_ops = &mv88e6xxx_smi_indirect_ops;
+ else
+ return -EINVAL;
+
+ chip->bus = bus;
+ chip->sw_addr = sw_addr;
+
+ return 0;
+}
diff --git a/drivers/net/dsa/mv88e6xxx/smi.h b/drivers/net/dsa/mv88e6xxx/smi.h
new file mode 100644
index 000000000000..35e6403b65dc
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/smi.h
@@ -0,0 +1,59 @@
+/*
+ * Marvell 88E6xxx System Management Interface (SMI) support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2019 Vivien Didelot <vivien.didelot@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _MV88E6XXX_SMI_H
+#define _MV88E6XXX_SMI_H
+
+#include "chip.h"
+
+/* Offset 0x00: SMI Command Register */
+#define MV88E6XXX_SMI_CMD 0x00
+#define MV88E6XXX_SMI_CMD_BUSY 0x8000
+#define MV88E6XXX_SMI_CMD_MODE_MASK 0x1000
+#define MV88E6XXX_SMI_CMD_MODE_45 0x0000
+#define MV88E6XXX_SMI_CMD_MODE_22 0x1000
+#define MV88E6XXX_SMI_CMD_OP_MASK 0x0c00
+#define MV88E6XXX_SMI_CMD_OP_22_WRITE 0x0400
+#define MV88E6XXX_SMI_CMD_OP_22_READ 0x0800
+#define MV88E6XXX_SMI_CMD_OP_45_WRITE_ADDR 0x0000
+#define MV88E6XXX_SMI_CMD_OP_45_WRITE_DATA 0x0400
+#define MV88E6XXX_SMI_CMD_OP_45_READ_DATA 0x0800
+#define MV88E6XXX_SMI_CMD_OP_45_READ_DATA_INC 0x0c00
+#define MV88E6XXX_SMI_CMD_DEV_ADDR_MASK 0x003e
+#define MV88E6XXX_SMI_CMD_REG_ADDR_MASK 0x001f
+
+/* Offset 0x01: SMI Data Register */
+#define MV88E6XXX_SMI_DATA 0x01
+
+int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus, int sw_addr);
+
+static inline int mv88e6xxx_smi_read(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 *data)
+{
+ if (chip->smi_ops && chip->smi_ops->read)
+ return chip->smi_ops->read(chip, dev, reg, data);
+
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_smi_write(struct mv88e6xxx_chip *chip,
+ int dev, int reg, u16 data)
+{
+ if (chip->smi_ops && chip->smi_ops->write)
+ return chip->smi_ops->write(chip, dev, reg, data);
+
+ return -EOPNOTSUPP;
+}
+
+#endif /* _MV88E6XXX_SMI_H */
diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig
new file mode 100644
index 000000000000..757751a89819
--- /dev/null
+++ b/drivers/net/dsa/sja1105/Kconfig
@@ -0,0 +1,17 @@
+config NET_DSA_SJA1105
+tristate "NXP SJA1105 Ethernet switch family support"
+ depends on NET_DSA && SPI
+ select NET_DSA_TAG_SJA1105
+ select PACKING
+ select CRC32
+ help
+ This is the driver for the NXP SJA1105 automotive Ethernet switch
+ family. These are 5-port devices and are managed over an SPI
+ interface. Probing is handled based on OF bindings and so is the
+ linkage to phylib. The driver supports the following revisions:
+ - SJA1105E (Gen. 1, No TT-Ethernet)
+ - SJA1105T (Gen. 1, TT-Ethernet)
+ - SJA1105P (Gen. 2, No SGMII, No TT-Ethernet)
+ - SJA1105Q (Gen. 2, No SGMII, TT-Ethernet)
+ - SJA1105R (Gen. 2, SGMII, No TT-Ethernet)
+ - SJA1105S (Gen. 2, SGMII, TT-Ethernet)
diff --git a/drivers/net/dsa/sja1105/Makefile b/drivers/net/dsa/sja1105/Makefile
new file mode 100644
index 000000000000..1c2b55fec959
--- /dev/null
+++ b/drivers/net/dsa/sja1105/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_NET_DSA_SJA1105) += sja1105.o
+
+sja1105-objs := \
+ sja1105_spi.o \
+ sja1105_main.o \
+ sja1105_ethtool.o \
+ sja1105_clocking.o \
+ sja1105_static_config.o \
+ sja1105_dynamic_config.o \
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
new file mode 100644
index 000000000000..b043bfc408f2
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _SJA1105_H
+#define _SJA1105_H
+
+#include <linux/dsa/sja1105.h>
+#include <net/dsa.h>
+#include <linux/mutex.h>
+#include "sja1105_static_config.h"
+
+#define SJA1105_NUM_PORTS 5
+#define SJA1105_NUM_TC 8
+#define SJA1105ET_FDB_BIN_SIZE 4
+/* The hardware value is in multiples of 10 ms.
+ * The passed parameter is in multiples of 1 ms.
+ */
+#define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10)
+
+/* Keeps the different addresses between E/T and P/Q/R/S */
+struct sja1105_regs {
+ u64 device_id;
+ u64 prod_id;
+ u64 status;
+ u64 port_control;
+ u64 rgu;
+ u64 config;
+ u64 rmii_pll1;
+ u64 pad_mii_tx[SJA1105_NUM_PORTS];
+ u64 cgu_idiv[SJA1105_NUM_PORTS];
+ u64 rgmii_pad_mii_tx[SJA1105_NUM_PORTS];
+ u64 mii_tx_clk[SJA1105_NUM_PORTS];
+ u64 mii_rx_clk[SJA1105_NUM_PORTS];
+ u64 mii_ext_tx_clk[SJA1105_NUM_PORTS];
+ u64 mii_ext_rx_clk[SJA1105_NUM_PORTS];
+ u64 rgmii_tx_clk[SJA1105_NUM_PORTS];
+ u64 rmii_ref_clk[SJA1105_NUM_PORTS];
+ u64 rmii_ext_tx_clk[SJA1105_NUM_PORTS];
+ u64 mac[SJA1105_NUM_PORTS];
+ u64 mac_hl1[SJA1105_NUM_PORTS];
+ u64 mac_hl2[SJA1105_NUM_PORTS];
+ u64 qlevel[SJA1105_NUM_PORTS];
+};
+
+struct sja1105_info {
+ u64 device_id;
+ /* Needed for distinction between P and R, and between Q and S
+ * (since the parts with/without SGMII share the same
+ * switch core and device_id)
+ */
+ u64 part_no;
+ const struct sja1105_dynamic_table_ops *dyn_ops;
+ const struct sja1105_table_ops *static_ops;
+ const struct sja1105_regs *regs;
+ int (*reset_cmd)(const void *ctx, const void *data);
+ int (*setup_rgmii_delay)(const void *ctx, int port);
+ const char *name;
+};
+
+struct sja1105_private {
+ struct sja1105_static_config static_config;
+ bool rgmii_rx_delay[SJA1105_NUM_PORTS];
+ bool rgmii_tx_delay[SJA1105_NUM_PORTS];
+ const struct sja1105_info *info;
+ struct gpio_desc *reset_gpio;
+ struct spi_device *spidev;
+ struct dsa_switch *ds;
+ struct sja1105_port ports[SJA1105_NUM_PORTS];
+ /* Serializes transmission of management frames so that
+ * the switch doesn't confuse them with one another.
+ */
+ struct mutex mgmt_lock;
+};
+
+#include "sja1105_dynamic_config.h"
+
+struct sja1105_spi_message {
+ u64 access;
+ u64 read_count;
+ u64 address;
+};
+
+typedef enum {
+ SPI_READ = 0,
+ SPI_WRITE = 1,
+} sja1105_spi_rw_mode_t;
+
+/* From sja1105_spi.c */
+int sja1105_spi_send_packed_buf(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr,
+ void *packed_buf, size_t size_bytes);
+int sja1105_spi_send_int(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr,
+ u64 *value, u64 size_bytes);
+int sja1105_spi_send_long_packed_buf(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 base_addr,
+ void *packed_buf, u64 buf_len);
+int sja1105_static_config_upload(struct sja1105_private *priv);
+
+extern struct sja1105_info sja1105e_info;
+extern struct sja1105_info sja1105t_info;
+extern struct sja1105_info sja1105p_info;
+extern struct sja1105_info sja1105q_info;
+extern struct sja1105_info sja1105r_info;
+extern struct sja1105_info sja1105s_info;
+
+/* From sja1105_clocking.c */
+
+typedef enum {
+ XMII_MAC = 0,
+ XMII_PHY = 1,
+} sja1105_mii_role_t;
+
+typedef enum {
+ XMII_MODE_MII = 0,
+ XMII_MODE_RMII = 1,
+ XMII_MODE_RGMII = 2,
+} sja1105_phy_interface_t;
+
+typedef enum {
+ SJA1105_SPEED_10MBPS = 3,
+ SJA1105_SPEED_100MBPS = 2,
+ SJA1105_SPEED_1000MBPS = 1,
+ SJA1105_SPEED_AUTO = 0,
+} sja1105_speed_t;
+
+int sja1105_clocking_setup_port(struct sja1105_private *priv, int port);
+int sja1105_clocking_setup(struct sja1105_private *priv);
+
+/* From sja1105_ethtool.c */
+void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data);
+void sja1105_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, u8 *data);
+int sja1105_get_sset_count(struct dsa_switch *ds, int port, int sset);
+
+/* From sja1105_dynamic_config.c */
+int sja1105_dynamic_config_read(struct sja1105_private *priv,
+ enum sja1105_blk_idx blk_idx,
+ int index, void *entry);
+int sja1105_dynamic_config_write(struct sja1105_private *priv,
+ enum sja1105_blk_idx blk_idx,
+ int index, void *entry, bool keep);
+
+u8 sja1105_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid);
+
+/* Common implementations for the static and dynamic configs */
+size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105pqrs_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105et_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+
+#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
new file mode 100644
index 000000000000..94bfe0ee50a8
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
@@ -0,0 +1,601 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright (c) 2016-2018, NXP Semiconductors
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include <linux/packing.h>
+#include "sja1105.h"
+
+#define SJA1105_SIZE_CGU_CMD 4
+
+struct sja1105_cfg_pad_mii_tx {
+ u64 d32_os;
+ u64 d32_ipud;
+ u64 d10_os;
+ u64 d10_ipud;
+ u64 ctrl_os;
+ u64 ctrl_ipud;
+ u64 clk_os;
+ u64 clk_ih;
+ u64 clk_ipud;
+};
+
+/* UM10944 Table 82.
+ * IDIV_0_C to IDIV_4_C control registers
+ * (addr. 10000Bh to 10000Fh)
+ */
+struct sja1105_cgu_idiv {
+ u64 clksrc;
+ u64 autoblock;
+ u64 idiv;
+ u64 pd;
+};
+
+/* PLL_1_C control register
+ *
+ * SJA1105 E/T: UM10944 Table 81 (address 10000Ah)
+ * SJA1105 P/Q/R/S: UM11040 Table 116 (address 10000Ah)
+ */
+struct sja1105_cgu_pll_ctrl {
+ u64 pllclksrc;
+ u64 msel;
+ u64 autoblock;
+ u64 psel;
+ u64 direct;
+ u64 fbsel;
+ u64 bypass;
+ u64 pd;
+};
+
+enum {
+ CLKSRC_MII0_TX_CLK = 0x00,
+ CLKSRC_MII0_RX_CLK = 0x01,
+ CLKSRC_MII1_TX_CLK = 0x02,
+ CLKSRC_MII1_RX_CLK = 0x03,
+ CLKSRC_MII2_TX_CLK = 0x04,
+ CLKSRC_MII2_RX_CLK = 0x05,
+ CLKSRC_MII3_TX_CLK = 0x06,
+ CLKSRC_MII3_RX_CLK = 0x07,
+ CLKSRC_MII4_TX_CLK = 0x08,
+ CLKSRC_MII4_RX_CLK = 0x09,
+ CLKSRC_PLL0 = 0x0B,
+ CLKSRC_PLL1 = 0x0E,
+ CLKSRC_IDIV0 = 0x11,
+ CLKSRC_IDIV1 = 0x12,
+ CLKSRC_IDIV2 = 0x13,
+ CLKSRC_IDIV3 = 0x14,
+ CLKSRC_IDIV4 = 0x15,
+};
+
+/* UM10944 Table 83.
+ * MIIx clock control registers 1 to 30
+ * (addresses 100013h to 100035h)
+ */
+struct sja1105_cgu_mii_ctrl {
+ u64 clksrc;
+ u64 autoblock;
+ u64 pd;
+};
+
+static void sja1105_cgu_idiv_packing(void *buf, struct sja1105_cgu_idiv *idiv,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &idiv->clksrc, 28, 24, size, op);
+ sja1105_packing(buf, &idiv->autoblock, 11, 11, size, op);
+ sja1105_packing(buf, &idiv->idiv, 5, 2, size, op);
+ sja1105_packing(buf, &idiv->pd, 0, 0, size, op);
+}
+
+static int sja1105_cgu_idiv_config(struct sja1105_private *priv, int port,
+ bool enabled, int factor)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct device *dev = priv->ds->dev;
+ struct sja1105_cgu_idiv idiv;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ if (enabled && factor != 1 && factor != 10) {
+ dev_err(dev, "idiv factor must be 1 or 10\n");
+ return -ERANGE;
+ }
+
+ /* Payload for packed_buf */
+ idiv.clksrc = 0x0A; /* 25MHz */
+ idiv.autoblock = 1; /* Block clk automatically */
+ idiv.idiv = factor - 1; /* Divide by 1 or 10 */
+ idiv.pd = enabled ? 0 : 1; /* Power down? */
+ sja1105_cgu_idiv_packing(packed_buf, &idiv, PACK);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
+ regs->cgu_idiv[port], packed_buf,
+ SJA1105_SIZE_CGU_CMD);
+}
+
+static void
+sja1105_cgu_mii_control_packing(void *buf, struct sja1105_cgu_mii_ctrl *cmd,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &cmd->clksrc, 28, 24, size, op);
+ sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op);
+ sja1105_packing(buf, &cmd->pd, 0, 0, size, op);
+}
+
+static int sja1105_cgu_mii_tx_clk_config(struct sja1105_private *priv,
+ int port, sja1105_mii_role_t role)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl mii_tx_clk;
+ const int mac_clk_sources[] = {
+ CLKSRC_MII0_TX_CLK,
+ CLKSRC_MII1_TX_CLK,
+ CLKSRC_MII2_TX_CLK,
+ CLKSRC_MII3_TX_CLK,
+ CLKSRC_MII4_TX_CLK,
+ };
+ const int phy_clk_sources[] = {
+ CLKSRC_IDIV0,
+ CLKSRC_IDIV1,
+ CLKSRC_IDIV2,
+ CLKSRC_IDIV3,
+ CLKSRC_IDIV4,
+ };
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ int clksrc;
+
+ if (role == XMII_MAC)
+ clksrc = mac_clk_sources[port];
+ else
+ clksrc = phy_clk_sources[port];
+
+ /* Payload for packed_buf */
+ mii_tx_clk.clksrc = clksrc;
+ mii_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ mii_tx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &mii_tx_clk, PACK);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
+ regs->mii_tx_clk[port], packed_buf,
+ SJA1105_SIZE_CGU_CMD);
+}
+
+static int
+sja1105_cgu_mii_rx_clk_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl mii_rx_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ const int clk_sources[] = {
+ CLKSRC_MII0_RX_CLK,
+ CLKSRC_MII1_RX_CLK,
+ CLKSRC_MII2_RX_CLK,
+ CLKSRC_MII3_RX_CLK,
+ CLKSRC_MII4_RX_CLK,
+ };
+
+ /* Payload for packed_buf */
+ mii_rx_clk.clksrc = clk_sources[port];
+ mii_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ mii_rx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &mii_rx_clk, PACK);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
+ regs->mii_rx_clk[port], packed_buf,
+ SJA1105_SIZE_CGU_CMD);
+}
+
+static int
+sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl mii_ext_tx_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ const int clk_sources[] = {
+ CLKSRC_IDIV0,
+ CLKSRC_IDIV1,
+ CLKSRC_IDIV2,
+ CLKSRC_IDIV3,
+ CLKSRC_IDIV4,
+ };
+
+ /* Payload for packed_buf */
+ mii_ext_tx_clk.clksrc = clk_sources[port];
+ mii_ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ mii_ext_tx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_tx_clk, PACK);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
+ regs->mii_ext_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int
+sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl mii_ext_rx_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ const int clk_sources[] = {
+ CLKSRC_IDIV0,
+ CLKSRC_IDIV1,
+ CLKSRC_IDIV2,
+ CLKSRC_IDIV3,
+ CLKSRC_IDIV4,
+ };
+
+ /* Payload for packed_buf */
+ mii_ext_rx_clk.clksrc = clk_sources[port];
+ mii_ext_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ mii_ext_rx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_rx_clk, PACK);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
+ regs->mii_ext_rx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int sja1105_mii_clocking_setup(struct sja1105_private *priv, int port,
+ sja1105_mii_role_t role)
+{
+ struct device *dev = priv->ds->dev;
+ int rc;
+
+ dev_dbg(dev, "Configuring MII-%s clocking\n",
+ (role == XMII_MAC) ? "MAC" : "PHY");
+ /* If role is MAC, disable IDIV
+ * If role is PHY, enable IDIV and configure for 1/1 divider
+ */
+ rc = sja1105_cgu_idiv_config(priv, port, (role == XMII_PHY), 1);
+ if (rc < 0)
+ return rc;
+
+ /* Configure CLKSRC of MII_TX_CLK_n
+ * * If role is MAC, select TX_CLK_n
+ * * If role is PHY, select IDIV_n
+ */
+ rc = sja1105_cgu_mii_tx_clk_config(priv, port, role);
+ if (rc < 0)
+ return rc;
+
+ /* Configure CLKSRC of MII_RX_CLK_n
+ * Select RX_CLK_n
+ */
+ rc = sja1105_cgu_mii_rx_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+
+ if (role == XMII_PHY) {
+ /* Per MII spec, the PHY (which is us) drives the TX_CLK pin */
+
+ /* Configure CLKSRC of EXT_TX_CLK_n
+ * Select IDIV_n
+ */
+ rc = sja1105_cgu_mii_ext_tx_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+
+ /* Configure CLKSRC of EXT_RX_CLK_n
+ * Select IDIV_n
+ */
+ rc = sja1105_cgu_mii_ext_rx_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+ }
+ return 0;
+}
+
+static void
+sja1105_cgu_pll_control_packing(void *buf, struct sja1105_cgu_pll_ctrl *cmd,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &cmd->pllclksrc, 28, 24, size, op);
+ sja1105_packing(buf, &cmd->msel, 23, 16, size, op);
+ sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op);
+ sja1105_packing(buf, &cmd->psel, 9, 8, size, op);
+ sja1105_packing(buf, &cmd->direct, 7, 7, size, op);
+ sja1105_packing(buf, &cmd->fbsel, 6, 6, size, op);
+ sja1105_packing(buf, &cmd->bypass, 1, 1, size, op);
+ sja1105_packing(buf, &cmd->pd, 0, 0, size, op);
+}
+
+static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv,
+ int port, sja1105_speed_t speed)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl txc;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ int clksrc;
+
+ if (speed == SJA1105_SPEED_1000MBPS) {
+ clksrc = CLKSRC_PLL0;
+ } else {
+ int clk_sources[] = {CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2,
+ CLKSRC_IDIV3, CLKSRC_IDIV4};
+ clksrc = clk_sources[port];
+ }
+
+ /* RGMII: 125MHz for 1000, 25MHz for 100, 2.5MHz for 10 */
+ txc.clksrc = clksrc;
+ /* Autoblock clk while changing clksrc */
+ txc.autoblock = 1;
+ /* Power Down off => enabled */
+ txc.pd = 0;
+ sja1105_cgu_mii_control_packing(packed_buf, &txc, PACK);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
+ regs->rgmii_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+/* AGU */
+static void
+sja1105_cfg_pad_mii_tx_packing(void *buf, struct sja1105_cfg_pad_mii_tx *cmd,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &cmd->d32_os, 28, 27, size, op);
+ sja1105_packing(buf, &cmd->d32_ipud, 25, 24, size, op);
+ sja1105_packing(buf, &cmd->d10_os, 20, 19, size, op);
+ sja1105_packing(buf, &cmd->d10_ipud, 17, 16, size, op);
+ sja1105_packing(buf, &cmd->ctrl_os, 12, 11, size, op);
+ sja1105_packing(buf, &cmd->ctrl_ipud, 9, 8, size, op);
+ sja1105_packing(buf, &cmd->clk_os, 4, 3, size, op);
+ sja1105_packing(buf, &cmd->clk_ih, 2, 2, size, op);
+ sja1105_packing(buf, &cmd->clk_ipud, 1, 0, size, op);
+}
+
+static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv,
+ int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cfg_pad_mii_tx pad_mii_tx;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ /* Payload */
+ pad_mii_tx.d32_os = 3; /* TXD[3:2] output stage: */
+ /* high noise/high speed */
+ pad_mii_tx.d10_os = 3; /* TXD[1:0] output stage: */
+ /* high noise/high speed */
+ pad_mii_tx.d32_ipud = 2; /* TXD[3:2] input stage: */
+ /* plain input (default) */
+ pad_mii_tx.d10_ipud = 2; /* TXD[1:0] input stage: */
+ /* plain input (default) */
+ pad_mii_tx.ctrl_os = 3; /* TX_CTL / TX_ER output stage */
+ pad_mii_tx.ctrl_ipud = 2; /* TX_CTL / TX_ER input stage (default) */
+ pad_mii_tx.clk_os = 3; /* TX_CLK output stage */
+ pad_mii_tx.clk_ih = 0; /* TX_CLK input hysteresis (default) */
+ pad_mii_tx.clk_ipud = 2; /* TX_CLK input stage (default) */
+ sja1105_cfg_pad_mii_tx_packing(packed_buf, &pad_mii_tx, PACK);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
+ regs->rgmii_pad_mii_tx[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port)
+{
+ struct device *dev = priv->ds->dev;
+ struct sja1105_mac_config_entry *mac;
+ sja1105_speed_t speed;
+ int rc;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+ speed = mac[port].speed;
+
+ dev_dbg(dev, "Configuring port %d RGMII at speed %dMbps\n",
+ port, speed);
+
+ switch (speed) {
+ case SJA1105_SPEED_1000MBPS:
+ /* 1000Mbps, IDIV disabled (125 MHz) */
+ rc = sja1105_cgu_idiv_config(priv, port, false, 1);
+ break;
+ case SJA1105_SPEED_100MBPS:
+ /* 100Mbps, IDIV enabled, divide by 1 (25 MHz) */
+ rc = sja1105_cgu_idiv_config(priv, port, true, 1);
+ break;
+ case SJA1105_SPEED_10MBPS:
+ /* 10Mbps, IDIV enabled, divide by 10 (2.5 MHz) */
+ rc = sja1105_cgu_idiv_config(priv, port, true, 10);
+ break;
+ case SJA1105_SPEED_AUTO:
+ /* Skip CGU configuration if there is no speed available
+ * (e.g. link is not established yet)
+ */
+ dev_dbg(dev, "Speed not available, skipping CGU config\n");
+ return 0;
+ default:
+ rc = -EINVAL;
+ }
+
+ if (rc < 0) {
+ dev_err(dev, "Failed to configure idiv\n");
+ return rc;
+ }
+ rc = sja1105_cgu_rgmii_tx_clk_config(priv, port, speed);
+ if (rc < 0) {
+ dev_err(dev, "Failed to configure RGMII Tx clock\n");
+ return rc;
+ }
+ rc = sja1105_rgmii_cfg_pad_tx_config(priv, port);
+ if (rc < 0) {
+ dev_err(dev, "Failed to configure Tx pad registers\n");
+ return rc;
+ }
+ if (!priv->info->setup_rgmii_delay)
+ return 0;
+
+ return priv->info->setup_rgmii_delay(priv, port);
+}
+
+static int sja1105_cgu_rmii_ref_clk_config(struct sja1105_private *priv,
+ int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl ref_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ const int clk_sources[] = {
+ CLKSRC_MII0_TX_CLK,
+ CLKSRC_MII1_TX_CLK,
+ CLKSRC_MII2_TX_CLK,
+ CLKSRC_MII3_TX_CLK,
+ CLKSRC_MII4_TX_CLK,
+ };
+
+ /* Payload for packed_buf */
+ ref_clk.clksrc = clk_sources[port];
+ ref_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ ref_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &ref_clk, PACK);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
+ regs->rmii_ref_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int
+sja1105_cgu_rmii_ext_tx_clk_config(struct sja1105_private *priv, int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cgu_mii_ctrl ext_tx_clk;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ /* Payload for packed_buf */
+ ext_tx_clk.clksrc = CLKSRC_PLL1;
+ ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
+ ext_tx_clk.pd = 0; /* Power Down off => enabled */
+ sja1105_cgu_mii_control_packing(packed_buf, &ext_tx_clk, PACK);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE,
+ regs->rmii_ext_tx_clk[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
+static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ struct sja1105_cgu_pll_ctrl pll = {0};
+ struct device *dev = priv->ds->dev;
+ int rc;
+
+ /* PLL1 must be enabled and output 50 Mhz.
+ * This is done by writing first 0x0A010941 to
+ * the PLL_1_C register and then deasserting
+ * power down (PD) 0x0A010940.
+ */
+
+ /* Step 1: PLL1 setup for 50Mhz */
+ pll.pllclksrc = 0xA;
+ pll.msel = 0x1;
+ pll.autoblock = 0x1;
+ pll.psel = 0x1;
+ pll.direct = 0x0;
+ pll.fbsel = 0x1;
+ pll.bypass = 0x0;
+ pll.pd = 0x1;
+
+ sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
+ rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rmii_pll1,
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+ if (rc < 0) {
+ dev_err(dev, "failed to configure PLL1 for 50MHz\n");
+ return rc;
+ }
+
+ /* Step 2: Enable PLL1 */
+ pll.pd = 0x0;
+
+ sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
+ rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rmii_pll1,
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+ if (rc < 0) {
+ dev_err(dev, "failed to enable PLL1\n");
+ return rc;
+ }
+ return rc;
+}
+
+static int sja1105_rmii_clocking_setup(struct sja1105_private *priv, int port,
+ sja1105_mii_role_t role)
+{
+ struct device *dev = priv->ds->dev;
+ int rc;
+
+ dev_dbg(dev, "Configuring RMII-%s clocking\n",
+ (role == XMII_MAC) ? "MAC" : "PHY");
+ /* AH1601.pdf chapter 2.5.1. Sources */
+ if (role == XMII_MAC) {
+ /* Configure and enable PLL1 for 50Mhz output */
+ rc = sja1105_cgu_rmii_pll_config(priv);
+ if (rc < 0)
+ return rc;
+ }
+ /* Disable IDIV for this port */
+ rc = sja1105_cgu_idiv_config(priv, port, false, 1);
+ if (rc < 0)
+ return rc;
+ /* Source to sink mappings */
+ rc = sja1105_cgu_rmii_ref_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+ if (role == XMII_MAC) {
+ rc = sja1105_cgu_rmii_ext_tx_clk_config(priv, port);
+ if (rc < 0)
+ return rc;
+ }
+ return 0;
+}
+
+int sja1105_clocking_setup_port(struct sja1105_private *priv, int port)
+{
+ struct sja1105_xmii_params_entry *mii;
+ struct device *dev = priv->ds->dev;
+ sja1105_phy_interface_t phy_mode;
+ sja1105_mii_role_t role;
+ int rc;
+
+ mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
+
+ /* RGMII etc */
+ phy_mode = mii->xmii_mode[port];
+ /* MAC or PHY, for applicable types (not RGMII) */
+ role = mii->phy_mac[port];
+
+ switch (phy_mode) {
+ case XMII_MODE_MII:
+ rc = sja1105_mii_clocking_setup(priv, port, role);
+ break;
+ case XMII_MODE_RMII:
+ rc = sja1105_rmii_clocking_setup(priv, port, role);
+ break;
+ case XMII_MODE_RGMII:
+ rc = sja1105_rgmii_clocking_setup(priv, port);
+ break;
+ default:
+ dev_err(dev, "Invalid interface mode specified: %d\n",
+ phy_mode);
+ return -EINVAL;
+ }
+ if (rc)
+ dev_err(dev, "Clocking setup for port %d failed: %d\n",
+ port, rc);
+ return rc;
+}
+
+int sja1105_clocking_setup(struct sja1105_private *priv)
+{
+ int port, rc;
+
+ for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ rc = sja1105_clocking_setup_port(priv, port);
+ if (rc < 0)
+ return rc;
+ }
+ return 0;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
new file mode 100644
index 000000000000..e73ab28bf632
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -0,0 +1,532 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include "sja1105.h"
+
+#define SJA1105_SIZE_DYN_CMD 4
+
+#define SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY \
+ SJA1105_SIZE_DYN_CMD
+
+#define SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105ET_SIZE_L2_LOOKUP_ENTRY)
+
+#define SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY)
+
+#define SJA1105_SIZE_VLAN_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + 4 + SJA1105_SIZE_VLAN_LOOKUP_ENTRY)
+
+#define SJA1105_SIZE_L2_FORWARDING_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_L2_FORWARDING_ENTRY)
+
+#define SJA1105ET_SIZE_MAC_CONFIG_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY)
+
+#define SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY)
+
+#define SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD \
+ SJA1105_SIZE_DYN_CMD
+
+#define SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD \
+ SJA1105_SIZE_DYN_CMD
+
+#define SJA1105_MAX_DYN_CMD_SIZE \
+ SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD
+
+static void
+sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->valident, 27, 27, size, op);
+ /* Hack - The hardware takes the 'index' field within
+ * struct sja1105_l2_lookup_entry as the index on which this command
+ * will operate. However it will ignore everything else, so 'index'
+ * is logically part of command but physically part of entry.
+ * Populate the 'index' entry field from within the command callback,
+ * such that our API doesn't need to ask for a full-blown entry
+ * structure when e.g. a delete is requested.
+ */
+ sja1105_packing(buf, &cmd->index, 29, 20,
+ SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY, op);
+ /* TODO hostcmd */
+}
+
+static void
+sja1105et_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->valident, 27, 27, size, op);
+ /* Hack - see comments above. */
+ sja1105_packing(buf, &cmd->index, 29, 20,
+ SJA1105ET_SIZE_L2_LOOKUP_ENTRY, op);
+}
+
+static void
+sja1105et_mgmt_route_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+ u64 mgmtroute = 1;
+
+ sja1105et_l2_lookup_cmd_packing(buf, cmd, op);
+ if (op == PACK)
+ sja1105_pack(p, &mgmtroute, 26, 26, SJA1105_SIZE_DYN_CMD);
+}
+
+static size_t sja1105et_mgmt_route_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_mgmt_entry *entry = entry_ptr;
+ const size_t size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+
+ /* UM10944: To specify if a PTP egress timestamp shall be captured on
+ * each port upon transmission of the frame, the LSB of VLANID in the
+ * ENTRY field provided by the host must be set.
+ * Bit 1 of VLANID then specifies the register where the timestamp for
+ * this port is stored in.
+ */
+ sja1105_packing(buf, &entry->tsreg, 85, 85, size, op);
+ sja1105_packing(buf, &entry->takets, 84, 84, size, op);
+ sja1105_packing(buf, &entry->macaddr, 83, 36, size, op);
+ sja1105_packing(buf, &entry->destports, 35, 31, size, op);
+ sja1105_packing(buf, &entry->enfport, 30, 30, size, op);
+ return size;
+}
+
+/* In E/T, entry is at addresses 0x27-0x28. There is a 4 byte gap at 0x29,
+ * and command is at 0x2a. Similarly in P/Q/R/S there is a 1 register gap
+ * between entry (0x2d, 0x2e) and command (0x30).
+ */
+static void
+sja1105_vlan_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_VLAN_LOOKUP_ENTRY + 4;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->valident, 27, 27, size, op);
+ /* Hack - see comments above, applied for 'vlanid' field of
+ * struct sja1105_vlan_lookup_entry.
+ */
+ sja1105_packing(buf, &cmd->index, 38, 27,
+ SJA1105_SIZE_VLAN_LOOKUP_ENTRY, op);
+}
+
+static void
+sja1105_l2_forwarding_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_L2_FORWARDING_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 4, 0, size, op);
+}
+
+static void
+sja1105et_mac_config_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_DYN_CMD;
+ /* Yup, user manual definitions are reversed */
+ u8 *reg1 = buf + 4;
+
+ sja1105_packing(reg1, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(reg1, &cmd->index, 26, 24, size, op);
+}
+
+static size_t sja1105et_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const int size = SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY;
+ struct sja1105_mac_config_entry *entry = entry_ptr;
+ /* Yup, user manual definitions are reversed */
+ u8 *reg1 = buf + 4;
+ u8 *reg2 = buf;
+
+ sja1105_packing(reg1, &entry->speed, 30, 29, size, op);
+ sja1105_packing(reg1, &entry->drpdtag, 23, 23, size, op);
+ sja1105_packing(reg1, &entry->drpuntag, 22, 22, size, op);
+ sja1105_packing(reg1, &entry->retag, 21, 21, size, op);
+ sja1105_packing(reg1, &entry->dyn_learn, 20, 20, size, op);
+ sja1105_packing(reg1, &entry->egress, 19, 19, size, op);
+ sja1105_packing(reg1, &entry->ingress, 18, 18, size, op);
+ sja1105_packing(reg1, &entry->ing_mirr, 17, 17, size, op);
+ sja1105_packing(reg1, &entry->egr_mirr, 16, 16, size, op);
+ sja1105_packing(reg1, &entry->vlanprio, 14, 12, size, op);
+ sja1105_packing(reg1, &entry->vlanid, 11, 0, size, op);
+ sja1105_packing(reg2, &entry->tp_delin, 31, 16, size, op);
+ sja1105_packing(reg2, &entry->tp_delout, 15, 0, size, op);
+ /* MAC configuration table entries which can't be reconfigured:
+ * top, base, enabled, ifg, maxage, drpnona664
+ */
+ /* Bogus return value, not used anywhere */
+ return 0;
+}
+
+static void
+sja1105pqrs_mac_config_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY;
+ u8 *p = buf + SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 2, 0, size, op);
+}
+
+static void
+sja1105et_l2_lookup_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ sja1105_packing(buf, &cmd->valid, 31, 31,
+ SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD, op);
+}
+
+static size_t
+sja1105et_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->poly, 7, 0,
+ SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD, op);
+ /* Bogus return value, not used anywhere */
+ return 0;
+}
+
+static void
+sja1105et_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD;
+
+ sja1105_packing(buf, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(buf, &cmd->errors, 30, 30, size, op);
+}
+
+static size_t
+sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_general_params_entry *entry = entry_ptr;
+ const int size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD;
+
+ sja1105_packing(buf, &entry->mirr_port, 2, 0, size, op);
+ /* Bogus return value, not used anywhere */
+ return 0;
+}
+
+#define OP_READ BIT(0)
+#define OP_WRITE BIT(1)
+#define OP_DEL BIT(2)
+
+/* SJA1105E/T: First generation */
+struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .entry_packing = sja1105et_l2_lookup_entry_packing,
+ .cmd_packing = sja1105et_l2_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ .packed_size = SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = 0x20,
+ },
+ [BLK_IDX_MGMT_ROUTE] = {
+ .entry_packing = sja1105et_mgmt_route_entry_packing,
+ .cmd_packing = sja1105et_mgmt_route_cmd_packing,
+ .access = (OP_READ | OP_WRITE),
+ .max_entry_count = SJA1105_NUM_PORTS,
+ .packed_size = SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = 0x20,
+ },
+ [BLK_IDX_L2_POLICING] = {0},
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .entry_packing = sja1105_vlan_lookup_entry_packing,
+ .cmd_packing = sja1105_vlan_lookup_cmd_packing,
+ .access = (OP_WRITE | OP_DEL),
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ .packed_size = SJA1105_SIZE_VLAN_LOOKUP_DYN_CMD,
+ .addr = 0x27,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .entry_packing = sja1105_l2_forwarding_entry_packing,
+ .cmd_packing = sja1105_l2_forwarding_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105_SIZE_L2_FORWARDING_DYN_CMD,
+ .addr = 0x24,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .entry_packing = sja1105et_mac_config_entry_packing,
+ .cmd_packing = sja1105et_mac_config_cmd_packing,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105ET_SIZE_MAC_CONFIG_DYN_CMD,
+ .addr = 0x36,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .entry_packing = sja1105et_l2_lookup_params_entry_packing,
+ .cmd_packing = sja1105et_l2_lookup_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD,
+ .addr = 0x38,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {0},
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .entry_packing = sja1105et_general_params_entry_packing,
+ .cmd_packing = sja1105et_general_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD,
+ .addr = 0x34,
+ },
+ [BLK_IDX_XMII_PARAMS] = {0},
+};
+
+/* SJA1105P/Q/R/S: Second generation: TODO */
+struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .entry_packing = sja1105pqrs_l2_lookup_entry_packing,
+ .cmd_packing = sja1105pqrs_l2_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ .packed_size = SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = 0x24,
+ },
+ [BLK_IDX_L2_POLICING] = {0},
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .entry_packing = sja1105_vlan_lookup_entry_packing,
+ .cmd_packing = sja1105_vlan_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ .packed_size = SJA1105_SIZE_VLAN_LOOKUP_DYN_CMD,
+ .addr = 0x2D,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .entry_packing = sja1105_l2_forwarding_entry_packing,
+ .cmd_packing = sja1105_l2_forwarding_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105_SIZE_L2_FORWARDING_DYN_CMD,
+ .addr = 0x2A,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .entry_packing = sja1105pqrs_mac_config_entry_packing,
+ .cmd_packing = sja1105pqrs_mac_config_cmd_packing,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ .access = (OP_READ | OP_WRITE),
+ .packed_size = SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD,
+ .addr = 0x4B,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .entry_packing = sja1105et_l2_lookup_params_entry_packing,
+ .cmd_packing = sja1105et_l2_lookup_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE),
+ .packed_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_DYN_CMD,
+ .addr = 0x38,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {0},
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .entry_packing = sja1105et_general_params_entry_packing,
+ .cmd_packing = sja1105et_general_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ .access = OP_WRITE,
+ .packed_size = SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD,
+ .addr = 0x34,
+ },
+ [BLK_IDX_XMII_PARAMS] = {0},
+};
+
+int sja1105_dynamic_config_read(struct sja1105_private *priv,
+ enum sja1105_blk_idx blk_idx,
+ int index, void *entry)
+{
+ const struct sja1105_dynamic_table_ops *ops;
+ struct sja1105_dyn_cmd cmd = {0};
+ /* SPI payload buffer */
+ u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {0};
+ int retries = 3;
+ int rc;
+
+ if (blk_idx >= BLK_IDX_MAX_DYN)
+ return -ERANGE;
+
+ ops = &priv->info->dyn_ops[blk_idx];
+
+ if (index >= ops->max_entry_count)
+ return -ERANGE;
+ if (!(ops->access & OP_READ))
+ return -EOPNOTSUPP;
+ if (ops->packed_size > SJA1105_MAX_DYN_CMD_SIZE)
+ return -ERANGE;
+ if (!ops->cmd_packing)
+ return -EOPNOTSUPP;
+ if (!ops->entry_packing)
+ return -EOPNOTSUPP;
+
+ cmd.valid = true; /* Trigger action on table entry */
+ cmd.rdwrset = SPI_READ; /* Action is read */
+ cmd.index = index;
+ ops->cmd_packing(packed_buf, &cmd, PACK);
+
+ /* Send SPI write operation: read config table entry */
+ rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, ops->addr,
+ packed_buf, ops->packed_size);
+ if (rc < 0)
+ return rc;
+
+ /* Loop until we have confirmation that hardware has finished
+ * processing the command and has cleared the VALID field
+ */
+ do {
+ memset(packed_buf, 0, ops->packed_size);
+
+ /* Retrieve the read operation's result */
+ rc = sja1105_spi_send_packed_buf(priv, SPI_READ, ops->addr,
+ packed_buf, ops->packed_size);
+ if (rc < 0)
+ return rc;
+
+ cmd = (struct sja1105_dyn_cmd) {0};
+ ops->cmd_packing(packed_buf, &cmd, UNPACK);
+ /* UM10944: [valident] will always be found cleared
+ * during a read access with MGMTROUTE set.
+ * So don't error out in that case.
+ */
+ if (!cmd.valident && blk_idx != BLK_IDX_MGMT_ROUTE)
+ return -EINVAL;
+ cpu_relax();
+ } while (cmd.valid && --retries);
+
+ if (cmd.valid)
+ return -ETIMEDOUT;
+
+ /* Don't dereference possibly NULL pointer - maybe caller
+ * only wanted to see whether the entry existed or not.
+ */
+ if (entry)
+ ops->entry_packing(packed_buf, entry, UNPACK);
+ return 0;
+}
+
+int sja1105_dynamic_config_write(struct sja1105_private *priv,
+ enum sja1105_blk_idx blk_idx,
+ int index, void *entry, bool keep)
+{
+ const struct sja1105_dynamic_table_ops *ops;
+ struct sja1105_dyn_cmd cmd = {0};
+ /* SPI payload buffer */
+ u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {0};
+ int rc;
+
+ if (blk_idx >= BLK_IDX_MAX_DYN)
+ return -ERANGE;
+
+ ops = &priv->info->dyn_ops[blk_idx];
+
+ if (index >= ops->max_entry_count)
+ return -ERANGE;
+ if (!(ops->access & OP_WRITE))
+ return -EOPNOTSUPP;
+ if (!keep && !(ops->access & OP_DEL))
+ return -EOPNOTSUPP;
+ if (ops->packed_size > SJA1105_MAX_DYN_CMD_SIZE)
+ return -ERANGE;
+
+ cmd.valident = keep; /* If false, deletes entry */
+ cmd.valid = true; /* Trigger action on table entry */
+ cmd.rdwrset = SPI_WRITE; /* Action is write */
+ cmd.index = index;
+
+ if (!ops->cmd_packing)
+ return -EOPNOTSUPP;
+ ops->cmd_packing(packed_buf, &cmd, PACK);
+
+ if (!ops->entry_packing)
+ return -EOPNOTSUPP;
+ /* Don't dereference potentially NULL pointer if just
+ * deleting a table entry is what was requested. For cases
+ * where 'index' field is physically part of entry structure,
+ * and needed here, we deal with that in the cmd_packing callback.
+ */
+ if (keep)
+ ops->entry_packing(packed_buf, entry, PACK);
+
+ /* Send SPI write operation: read config table entry */
+ rc = sja1105_spi_send_packed_buf(priv, SPI_WRITE, ops->addr,
+ packed_buf, ops->packed_size);
+ if (rc < 0)
+ return rc;
+
+ cmd = (struct sja1105_dyn_cmd) {0};
+ ops->cmd_packing(packed_buf, &cmd, UNPACK);
+ if (cmd.errors)
+ return -EINVAL;
+
+ return 0;
+}
+
+static u8 sja1105_crc8_add(u8 crc, u8 byte, u8 poly)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ if ((crc ^ byte) & (1 << 7)) {
+ crc <<= 1;
+ crc ^= poly;
+ } else {
+ crc <<= 1;
+ }
+ byte <<= 1;
+ }
+ return crc;
+}
+
+/* CRC8 algorithm with non-reversed input, non-reversed output,
+ * no input xor and no output xor. Code customized for receiving
+ * the SJA1105 E/T FDB keys (vlanid, macaddr) as input. CRC polynomial
+ * is also received as argument in the Koopman notation that the switch
+ * hardware stores it in.
+ */
+u8 sja1105_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_params_entry *l2_lookup_params =
+ priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS].entries;
+ u64 poly_koopman = l2_lookup_params->poly;
+ /* Convert polynomial from Koopman to 'normal' notation */
+ u8 poly = (u8)(1 + (poly_koopman << 1));
+ u64 vlanid = l2_lookup_params->shared_learn ? 0 : vid;
+ u64 input = (vlanid << 48) | ether_addr_to_u64(addr);
+ u8 crc = 0; /* seed */
+ int i;
+
+ /* Mask the eight bytes starting from MSB one at a time */
+ for (i = 56; i >= 0; i -= 8) {
+ u8 byte = (input & (0xffull << i)) >> i;
+
+ crc = sja1105_crc8_add(crc, byte, poly);
+ }
+ return crc;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.h b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
new file mode 100644
index 000000000000..77be59546a55
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _SJA1105_DYNAMIC_CONFIG_H
+#define _SJA1105_DYNAMIC_CONFIG_H
+
+#include "sja1105.h"
+#include <linux/packing.h>
+
+struct sja1105_dyn_cmd {
+ u64 valid;
+ u64 rdwrset;
+ u64 errors;
+ u64 valident;
+ u64 index;
+};
+
+struct sja1105_dynamic_table_ops {
+ /* This returns size_t just to keep same prototype as the
+ * static config ops, of which we are reusing some functions.
+ */
+ size_t (*entry_packing)(void *buf, void *entry_ptr, enum packing_op op);
+ void (*cmd_packing)(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op);
+ size_t max_entry_count;
+ size_t packed_size;
+ u64 addr;
+ u8 access;
+};
+
+struct sja1105_mgmt_entry {
+ u64 tsreg;
+ u64 takets;
+ u64 macaddr;
+ u64 destports;
+ u64 enfport;
+ u64 index;
+};
+
+extern struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN];
+extern struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN];
+
+#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_ethtool.c b/drivers/net/dsa/sja1105/sja1105_ethtool.c
new file mode 100644
index 000000000000..ab581a28cd41
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_ethtool.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include "sja1105.h"
+
+#define SJA1105_SIZE_MAC_AREA (0x02 * 4)
+#define SJA1105_SIZE_HL1_AREA (0x10 * 4)
+#define SJA1105_SIZE_HL2_AREA (0x4 * 4)
+#define SJA1105_SIZE_QLEVEL_AREA (0x8 * 4) /* 0x4 to 0xB */
+
+struct sja1105_port_status_mac {
+ u64 n_runt;
+ u64 n_soferr;
+ u64 n_alignerr;
+ u64 n_miierr;
+ u64 typeerr;
+ u64 sizeerr;
+ u64 tctimeout;
+ u64 priorerr;
+ u64 nomaster;
+ u64 memov;
+ u64 memerr;
+ u64 invtyp;
+ u64 intcyov;
+ u64 domerr;
+ u64 pcfbagdrop;
+ u64 spcprior;
+ u64 ageprior;
+ u64 portdrop;
+ u64 lendrop;
+ u64 bagdrop;
+ u64 policeerr;
+ u64 drpnona664err;
+ u64 spcerr;
+ u64 agedrp;
+};
+
+struct sja1105_port_status_hl1 {
+ u64 n_n664err;
+ u64 n_vlanerr;
+ u64 n_unreleased;
+ u64 n_sizeerr;
+ u64 n_crcerr;
+ u64 n_vlnotfound;
+ u64 n_ctpolerr;
+ u64 n_polerr;
+ u64 n_rxfrmsh;
+ u64 n_rxfrm;
+ u64 n_rxbytesh;
+ u64 n_rxbyte;
+ u64 n_txfrmsh;
+ u64 n_txfrm;
+ u64 n_txbytesh;
+ u64 n_txbyte;
+};
+
+struct sja1105_port_status_hl2 {
+ u64 n_qfull;
+ u64 n_part_drop;
+ u64 n_egr_disabled;
+ u64 n_not_reach;
+ u64 qlevel_hwm[8]; /* Only for P/Q/R/S */
+ u64 qlevel[8]; /* Only for P/Q/R/S */
+};
+
+struct sja1105_port_status {
+ struct sja1105_port_status_mac mac;
+ struct sja1105_port_status_hl1 hl1;
+ struct sja1105_port_status_hl2 hl2;
+};
+
+static void
+sja1105_port_status_mac_unpack(void *buf,
+ struct sja1105_port_status_mac *status)
+{
+ /* Make pointer arithmetic work on 4 bytes */
+ u32 *p = buf;
+
+ sja1105_unpack(p + 0x0, &status->n_runt, 31, 24, 4);
+ sja1105_unpack(p + 0x0, &status->n_soferr, 23, 16, 4);
+ sja1105_unpack(p + 0x0, &status->n_alignerr, 15, 8, 4);
+ sja1105_unpack(p + 0x0, &status->n_miierr, 7, 0, 4);
+ sja1105_unpack(p + 0x1, &status->typeerr, 27, 27, 4);
+ sja1105_unpack(p + 0x1, &status->sizeerr, 26, 26, 4);
+ sja1105_unpack(p + 0x1, &status->tctimeout, 25, 25, 4);
+ sja1105_unpack(p + 0x1, &status->priorerr, 24, 24, 4);
+ sja1105_unpack(p + 0x1, &status->nomaster, 23, 23, 4);
+ sja1105_unpack(p + 0x1, &status->memov, 22, 22, 4);
+ sja1105_unpack(p + 0x1, &status->memerr, 21, 21, 4);
+ sja1105_unpack(p + 0x1, &status->invtyp, 19, 19, 4);
+ sja1105_unpack(p + 0x1, &status->intcyov, 18, 18, 4);
+ sja1105_unpack(p + 0x1, &status->domerr, 17, 17, 4);
+ sja1105_unpack(p + 0x1, &status->pcfbagdrop, 16, 16, 4);
+ sja1105_unpack(p + 0x1, &status->spcprior, 15, 12, 4);
+ sja1105_unpack(p + 0x1, &status->ageprior, 11, 8, 4);
+ sja1105_unpack(p + 0x1, &status->portdrop, 6, 6, 4);
+ sja1105_unpack(p + 0x1, &status->lendrop, 5, 5, 4);
+ sja1105_unpack(p + 0x1, &status->bagdrop, 4, 4, 4);
+ sja1105_unpack(p + 0x1, &status->policeerr, 3, 3, 4);
+ sja1105_unpack(p + 0x1, &status->drpnona664err, 2, 2, 4);
+ sja1105_unpack(p + 0x1, &status->spcerr, 1, 1, 4);
+ sja1105_unpack(p + 0x1, &status->agedrp, 0, 0, 4);
+}
+
+static void
+sja1105_port_status_hl1_unpack(void *buf,
+ struct sja1105_port_status_hl1 *status)
+{
+ /* Make pointer arithmetic work on 4 bytes */
+ u32 *p = buf;
+
+ sja1105_unpack(p + 0xF, &status->n_n664err, 31, 0, 4);
+ sja1105_unpack(p + 0xE, &status->n_vlanerr, 31, 0, 4);
+ sja1105_unpack(p + 0xD, &status->n_unreleased, 31, 0, 4);
+ sja1105_unpack(p + 0xC, &status->n_sizeerr, 31, 0, 4);
+ sja1105_unpack(p + 0xB, &status->n_crcerr, 31, 0, 4);
+ sja1105_unpack(p + 0xA, &status->n_vlnotfound, 31, 0, 4);
+ sja1105_unpack(p + 0x9, &status->n_ctpolerr, 31, 0, 4);
+ sja1105_unpack(p + 0x8, &status->n_polerr, 31, 0, 4);
+ sja1105_unpack(p + 0x7, &status->n_rxfrmsh, 31, 0, 4);
+ sja1105_unpack(p + 0x6, &status->n_rxfrm, 31, 0, 4);
+ sja1105_unpack(p + 0x5, &status->n_rxbytesh, 31, 0, 4);
+ sja1105_unpack(p + 0x4, &status->n_rxbyte, 31, 0, 4);
+ sja1105_unpack(p + 0x3, &status->n_txfrmsh, 31, 0, 4);
+ sja1105_unpack(p + 0x2, &status->n_txfrm, 31, 0, 4);
+ sja1105_unpack(p + 0x1, &status->n_txbytesh, 31, 0, 4);
+ sja1105_unpack(p + 0x0, &status->n_txbyte, 31, 0, 4);
+ status->n_rxfrm += status->n_rxfrmsh << 32;
+ status->n_rxbyte += status->n_rxbytesh << 32;
+ status->n_txfrm += status->n_txfrmsh << 32;
+ status->n_txbyte += status->n_txbytesh << 32;
+}
+
+static void
+sja1105_port_status_hl2_unpack(void *buf,
+ struct sja1105_port_status_hl2 *status)
+{
+ /* Make pointer arithmetic work on 4 bytes */
+ u32 *p = buf;
+
+ sja1105_unpack(p + 0x3, &status->n_qfull, 31, 0, 4);
+ sja1105_unpack(p + 0x2, &status->n_part_drop, 31, 0, 4);
+ sja1105_unpack(p + 0x1, &status->n_egr_disabled, 31, 0, 4);
+ sja1105_unpack(p + 0x0, &status->n_not_reach, 31, 0, 4);
+}
+
+static void
+sja1105pqrs_port_status_qlevel_unpack(void *buf,
+ struct sja1105_port_status_hl2 *status)
+{
+ /* Make pointer arithmetic work on 4 bytes */
+ u32 *p = buf;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ sja1105_unpack(p + i, &status->qlevel_hwm[i], 24, 16, 4);
+ sja1105_unpack(p + i, &status->qlevel[i], 8, 0, 4);
+ }
+}
+
+static int sja1105_port_status_get_mac(struct sja1105_private *priv,
+ struct sja1105_port_status_mac *status,
+ int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[SJA1105_SIZE_MAC_AREA] = {0};
+ int rc;
+
+ /* MAC area */
+ rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->mac[port],
+ packed_buf, SJA1105_SIZE_MAC_AREA);
+ if (rc < 0)
+ return rc;
+
+ sja1105_port_status_mac_unpack(packed_buf, status);
+
+ return 0;
+}
+
+static int sja1105_port_status_get_hl1(struct sja1105_private *priv,
+ struct sja1105_port_status_hl1 *status,
+ int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[SJA1105_SIZE_HL1_AREA] = {0};
+ int rc;
+
+ rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->mac_hl1[port],
+ packed_buf, SJA1105_SIZE_HL1_AREA);
+ if (rc < 0)
+ return rc;
+
+ sja1105_port_status_hl1_unpack(packed_buf, status);
+
+ return 0;
+}
+
+static int sja1105_port_status_get_hl2(struct sja1105_private *priv,
+ struct sja1105_port_status_hl2 *status,
+ int port)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[SJA1105_SIZE_QLEVEL_AREA] = {0};
+ int rc;
+
+ rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->mac_hl2[port],
+ packed_buf, SJA1105_SIZE_HL2_AREA);
+ if (rc < 0)
+ return rc;
+
+ sja1105_port_status_hl2_unpack(packed_buf, status);
+
+ /* Code below is strictly P/Q/R/S specific. */
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ return 0;
+
+ rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->qlevel[port],
+ packed_buf, SJA1105_SIZE_QLEVEL_AREA);
+ if (rc < 0)
+ return rc;
+
+ sja1105pqrs_port_status_qlevel_unpack(packed_buf, status);
+
+ return 0;
+}
+
+static int sja1105_port_status_get(struct sja1105_private *priv,
+ struct sja1105_port_status *status,
+ int port)
+{
+ int rc;
+
+ rc = sja1105_port_status_get_mac(priv, &status->mac, port);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_port_status_get_hl1(priv, &status->hl1, port);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_port_status_get_hl2(priv, &status->hl2, port);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+static char sja1105_port_stats[][ETH_GSTRING_LEN] = {
+ /* MAC-Level Diagnostic Counters */
+ "n_runt",
+ "n_soferr",
+ "n_alignerr",
+ "n_miierr",
+ /* MAC-Level Diagnostic Flags */
+ "typeerr",
+ "sizeerr",
+ "tctimeout",
+ "priorerr",
+ "nomaster",
+ "memov",
+ "memerr",
+ "invtyp",
+ "intcyov",
+ "domerr",
+ "pcfbagdrop",
+ "spcprior",
+ "ageprior",
+ "portdrop",
+ "lendrop",
+ "bagdrop",
+ "policeerr",
+ "drpnona664err",
+ "spcerr",
+ "agedrp",
+ /* High-Level Diagnostic Counters */
+ "n_n664err",
+ "n_vlanerr",
+ "n_unreleased",
+ "n_sizeerr",
+ "n_crcerr",
+ "n_vlnotfound",
+ "n_ctpolerr",
+ "n_polerr",
+ "n_rxfrm",
+ "n_rxbyte",
+ "n_txfrm",
+ "n_txbyte",
+ "n_qfull",
+ "n_part_drop",
+ "n_egr_disabled",
+ "n_not_reach",
+};
+
+static char sja1105pqrs_extra_port_stats[][ETH_GSTRING_LEN] = {
+ /* Queue Levels */
+ "qlevel_hwm_0",
+ "qlevel_hwm_1",
+ "qlevel_hwm_2",
+ "qlevel_hwm_3",
+ "qlevel_hwm_4",
+ "qlevel_hwm_5",
+ "qlevel_hwm_6",
+ "qlevel_hwm_7",
+ "qlevel_0",
+ "qlevel_1",
+ "qlevel_2",
+ "qlevel_3",
+ "qlevel_4",
+ "qlevel_5",
+ "qlevel_6",
+ "qlevel_7",
+};
+
+void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_port_status status;
+ int rc, i, k = 0;
+
+ memset(&status, 0, sizeof(status));
+
+ rc = sja1105_port_status_get(priv, &status, port);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to read port %d counters: %d\n",
+ port, rc);
+ return;
+ }
+ memset(data, 0, ARRAY_SIZE(sja1105_port_stats) * sizeof(u64));
+ data[k++] = status.mac.n_runt;
+ data[k++] = status.mac.n_soferr;
+ data[k++] = status.mac.n_alignerr;
+ data[k++] = status.mac.n_miierr;
+ data[k++] = status.mac.typeerr;
+ data[k++] = status.mac.sizeerr;
+ data[k++] = status.mac.tctimeout;
+ data[k++] = status.mac.priorerr;
+ data[k++] = status.mac.nomaster;
+ data[k++] = status.mac.memov;
+ data[k++] = status.mac.memerr;
+ data[k++] = status.mac.invtyp;
+ data[k++] = status.mac.intcyov;
+ data[k++] = status.mac.domerr;
+ data[k++] = status.mac.pcfbagdrop;
+ data[k++] = status.mac.spcprior;
+ data[k++] = status.mac.ageprior;
+ data[k++] = status.mac.portdrop;
+ data[k++] = status.mac.lendrop;
+ data[k++] = status.mac.bagdrop;
+ data[k++] = status.mac.policeerr;
+ data[k++] = status.mac.drpnona664err;
+ data[k++] = status.mac.spcerr;
+ data[k++] = status.mac.agedrp;
+ data[k++] = status.hl1.n_n664err;
+ data[k++] = status.hl1.n_vlanerr;
+ data[k++] = status.hl1.n_unreleased;
+ data[k++] = status.hl1.n_sizeerr;
+ data[k++] = status.hl1.n_crcerr;
+ data[k++] = status.hl1.n_vlnotfound;
+ data[k++] = status.hl1.n_ctpolerr;
+ data[k++] = status.hl1.n_polerr;
+ data[k++] = status.hl1.n_rxfrm;
+ data[k++] = status.hl1.n_rxbyte;
+ data[k++] = status.hl1.n_txfrm;
+ data[k++] = status.hl1.n_txbyte;
+ data[k++] = status.hl2.n_qfull;
+ data[k++] = status.hl2.n_part_drop;
+ data[k++] = status.hl2.n_egr_disabled;
+ data[k++] = status.hl2.n_not_reach;
+
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ return;
+
+ memset(data + k, 0, ARRAY_SIZE(sja1105pqrs_extra_port_stats) *
+ sizeof(u64));
+ for (i = 0; i < 8; i++) {
+ data[k++] = status.hl2.qlevel_hwm[i];
+ data[k++] = status.hl2.qlevel[i];
+ }
+}
+
+void sja1105_get_strings(struct dsa_switch *ds, int port,
+ u32 stringset, u8 *data)
+{
+ struct sja1105_private *priv = ds->priv;
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(sja1105_port_stats); i++) {
+ strlcpy(p, sja1105_port_stats[i], ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ if (priv->info->device_id == SJA1105E_DEVICE_ID ||
+ priv->info->device_id == SJA1105T_DEVICE_ID)
+ return;
+ for (i = 0; i < ARRAY_SIZE(sja1105pqrs_extra_port_stats); i++) {
+ strlcpy(p, sja1105pqrs_extra_port_stats[i],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+int sja1105_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ int count = ARRAY_SIZE(sja1105_port_stats);
+ struct sja1105_private *priv = ds->priv;
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ if (priv->info->device_id == SJA1105PR_DEVICE_ID ||
+ priv->info->device_id == SJA1105QS_DEVICE_ID)
+ count += ARRAY_SIZE(sja1105pqrs_extra_port_stats);
+
+ return count;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
new file mode 100644
index 000000000000..50ff625c85d6
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -0,0 +1,1675 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/spi/spi.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/phylink.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/of_device.h>
+#include <linux/netdev_features.h>
+#include <linux/netdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/if_ether.h>
+#include <linux/dsa/8021q.h>
+#include "sja1105.h"
+
+static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
+ unsigned int startup_delay)
+{
+ gpiod_set_value_cansleep(gpio, 1);
+ /* Wait for minimum reset pulse length */
+ msleep(pulse_len);
+ gpiod_set_value_cansleep(gpio, 0);
+ /* Wait until chip is ready after reset */
+ msleep(startup_delay);
+}
+
+static void
+sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd,
+ int from, int to, bool allow)
+{
+ if (allow) {
+ l2_fwd[from].bc_domain |= BIT(to);
+ l2_fwd[from].reach_port |= BIT(to);
+ l2_fwd[from].fl_domain |= BIT(to);
+ } else {
+ l2_fwd[from].bc_domain &= ~BIT(to);
+ l2_fwd[from].reach_port &= ~BIT(to);
+ l2_fwd[from].fl_domain &= ~BIT(to);
+ }
+}
+
+/* Structure used to temporarily transport device tree
+ * settings into sja1105_setup
+ */
+struct sja1105_dt_port {
+ phy_interface_t phy_mode;
+ sja1105_mii_role_t role;
+};
+
+static int sja1105_init_mac_settings(struct sja1105_private *priv)
+{
+ struct sja1105_mac_config_entry default_mac = {
+ /* Enable all 8 priority queues on egress.
+ * Every queue i holds top[i] - base[i] frames.
+ * Sum of top[i] - base[i] is 511 (max hardware limit).
+ */
+ .top = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF},
+ .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0},
+ .enabled = {true, true, true, true, true, true, true, true},
+ /* Keep standard IFG of 12 bytes on egress. */
+ .ifg = 0,
+ /* Always put the MAC speed in automatic mode, where it can be
+ * retrieved from the PHY object through phylib and
+ * sja1105_adjust_port_config.
+ */
+ .speed = SJA1105_SPEED_AUTO,
+ /* No static correction for 1-step 1588 events */
+ .tp_delin = 0,
+ .tp_delout = 0,
+ /* Disable aging for critical TTEthernet traffic */
+ .maxage = 0xFF,
+ /* Internal VLAN (pvid) to apply to untagged ingress */
+ .vlanprio = 0,
+ .vlanid = 0,
+ .ing_mirr = false,
+ .egr_mirr = false,
+ /* Don't drop traffic with other EtherType than ETH_P_IP */
+ .drpnona664 = false,
+ /* Don't drop double-tagged traffic */
+ .drpdtag = false,
+ /* Don't drop untagged traffic */
+ .drpuntag = false,
+ /* Don't retag 802.1p (VID 0) traffic with the pvid */
+ .retag = false,
+ /* Disable learning and I/O on user ports by default -
+ * STP will enable it.
+ */
+ .dyn_learn = false,
+ .egress = false,
+ .ingress = false,
+ };
+ struct sja1105_mac_config_entry *mac;
+ struct sja1105_table *table;
+ int i;
+
+ table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG];
+
+ /* Discard previous MAC Configuration Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_NUM_PORTS,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ /* Override table based on phylib DT bindings */
+ table->entry_count = SJA1105_NUM_PORTS;
+
+ mac = table->entries;
+
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ mac[i] = default_mac;
+ if (i == dsa_upstream_port(priv->ds, i)) {
+ /* STP doesn't get called for CPU port, so we need to
+ * set the I/O parameters statically.
+ */
+ mac[i].dyn_learn = true;
+ mac[i].ingress = true;
+ mac[i].egress = true;
+ }
+ }
+
+ return 0;
+}
+
+static int sja1105_init_mii_settings(struct sja1105_private *priv,
+ struct sja1105_dt_port *ports)
+{
+ struct device *dev = &priv->spidev->dev;
+ struct sja1105_xmii_params_entry *mii;
+ struct sja1105_table *table;
+ int i;
+
+ table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS];
+
+ /* Discard previous xMII Mode Parameters Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ /* Override table based on phylib DT bindings */
+ table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT;
+
+ mii = table->entries;
+
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ switch (ports[i].phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ mii->xmii_mode[i] = XMII_MODE_MII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ mii->xmii_mode[i] = XMII_MODE_RMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ mii->xmii_mode[i] = XMII_MODE_RGMII;
+ break;
+ default:
+ dev_err(dev, "Unsupported PHY mode %s!\n",
+ phy_modes(ports[i].phy_mode));
+ }
+
+ mii->phy_mac[i] = ports[i].role;
+ }
+ return 0;
+}
+
+static int sja1105_init_static_fdb(struct sja1105_private *priv)
+{
+ struct sja1105_table *table;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
+
+ /* We only populate the FDB table through dynamic
+ * L2 Address Lookup entries
+ */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+ return 0;
+}
+
+static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
+{
+ struct sja1105_table *table;
+ struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
+ /* Learned FDB entries are forgotten after 300 seconds */
+ .maxage = SJA1105_AGEING_TIME_MS(300000),
+ /* All entries within a FDB bin are available for learning */
+ .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
+ /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
+ .poly = 0x97,
+ /* This selects between Independent VLAN Learning (IVL) and
+ * Shared VLAN Learning (SVL)
+ */
+ .shared_learn = false,
+ /* Don't discard management traffic based on ENFPORT -
+ * we don't perform SMAC port enforcement anyway, so
+ * what we are setting here doesn't matter.
+ */
+ .no_enf_hostprt = false,
+ /* Don't learn SMAC for mac_fltres1 and mac_fltres0.
+ * Maybe correlate with no_linklocal_learn from bridge driver?
+ */
+ .no_mgmt_learn = true,
+ };
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT;
+
+ /* This table only has a single entry */
+ ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] =
+ default_l2_lookup_params;
+
+ return 0;
+}
+
+static int sja1105_init_static_vlan(struct sja1105_private *priv)
+{
+ struct sja1105_table *table;
+ struct sja1105_vlan_lookup_entry pvid = {
+ .ving_mirr = 0,
+ .vegr_mirr = 0,
+ .vmemb_port = 0,
+ .vlan_bc = 0,
+ .tag_port = 0,
+ .vlanid = 0,
+ };
+ int i;
+
+ table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+
+ /* The static VLAN table will only contain the initial pvid of 0.
+ * All other VLANs are to be configured through dynamic entries,
+ * and kept in the static configuration table as backing memory.
+ * The pvid of 0 is sufficient to pass traffic while the ports are
+ * standalone and when vlan_filtering is disabled. When filtering
+ * gets enabled, the switchdev core sets up the VLAN ID 1 and sets
+ * it as the new pvid. Actually 'pvid 1' still comes up in 'bridge
+ * vlan' even when vlan_filtering is off, but it has no effect.
+ */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(1, table->ops->unpacked_entry_size,
+ GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = 1;
+
+ /* VLAN ID 0: all DT-defined ports are members; no restrictions on
+ * forwarding; always transmit priority-tagged frames as untagged.
+ */
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ pvid.vmemb_port |= BIT(i);
+ pvid.vlan_bc |= BIT(i);
+ pvid.tag_port &= ~BIT(i);
+ }
+
+ ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
+ return 0;
+}
+
+static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
+{
+ struct sja1105_l2_forwarding_entry *l2fwd;
+ struct sja1105_table *table;
+ int i, j;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING];
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT;
+
+ l2fwd = table->entries;
+
+ /* First 5 entries define the forwarding rules */
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ unsigned int upstream = dsa_upstream_port(priv->ds, i);
+
+ for (j = 0; j < SJA1105_NUM_TC; j++)
+ l2fwd[i].vlan_pmap[j] = j;
+
+ if (i == upstream)
+ continue;
+
+ sja1105_port_allow_traffic(l2fwd, i, upstream, true);
+ sja1105_port_allow_traffic(l2fwd, upstream, i, true);
+ }
+ /* Next 8 entries define VLAN PCP mapping from ingress to egress.
+ * Create a one-to-one mapping.
+ */
+ for (i = 0; i < SJA1105_NUM_TC; i++)
+ for (j = 0; j < SJA1105_NUM_PORTS; j++)
+ l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i;
+
+ return 0;
+}
+
+static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
+{
+ struct sja1105_l2_forwarding_params_entry default_l2fwd_params = {
+ /* Disallow dynamic reconfiguration of vlan_pmap */
+ .max_dynp = 0,
+ /* Use a single memory partition for all ingress queues */
+ .part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 },
+ };
+ struct sja1105_table *table;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT;
+
+ /* This table only has a single entry */
+ ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] =
+ default_l2fwd_params;
+
+ return 0;
+}
+
+static int sja1105_init_general_params(struct sja1105_private *priv)
+{
+ struct sja1105_general_params_entry default_general_params = {
+ /* Disallow dynamic changing of the mirror port */
+ .mirr_ptacu = 0,
+ .switchid = priv->ds->index,
+ /* Priority queue for link-local frames trapped to CPU */
+ .hostprio = 0,
+ .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
+ .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK,
+ .incl_srcpt1 = true,
+ .send_meta1 = false,
+ .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
+ .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK,
+ .incl_srcpt0 = true,
+ .send_meta0 = false,
+ /* The destination for traffic matching mac_fltres1 and
+ * mac_fltres0 on all ports except host_port. Such traffic
+ * receieved on host_port itself would be dropped, except
+ * by installing a temporary 'management route'
+ */
+ .host_port = dsa_upstream_port(priv->ds, 0),
+ /* Same as host port */
+ .mirr_port = dsa_upstream_port(priv->ds, 0),
+ /* Link-local traffic received on casc_port will be forwarded
+ * to host_port without embedding the source port and device ID
+ * info in the destination MAC address (presumably because it
+ * is a cascaded port and a downstream SJA switch already did
+ * that). Default to an invalid port (to disable the feature)
+ * and overwrite this if we find any DSA (cascaded) ports.
+ */
+ .casc_port = SJA1105_NUM_PORTS,
+ /* No TTEthernet */
+ .vllupformat = 0,
+ .vlmarker = 0,
+ .vlmask = 0,
+ /* Only update correctionField for 1-step PTP (L2 transport) */
+ .ignore2stf = 0,
+ /* Forcefully disable VLAN filtering by telling
+ * the switch that VLAN has a different EtherType.
+ */
+ .tpid = ETH_P_SJA1105,
+ .tpid2 = ETH_P_SJA1105,
+ };
+ struct sja1105_table *table;
+ int i, k = 0;
+
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ if (dsa_is_dsa_port(priv->ds, i))
+ default_general_params.casc_port = i;
+ else if (dsa_is_user_port(priv->ds, i))
+ priv->ports[i].mgmt_slot = k++;
+ }
+
+ table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT;
+
+ /* This table only has a single entry */
+ ((struct sja1105_general_params_entry *)table->entries)[0] =
+ default_general_params;
+
+ return 0;
+}
+
+#define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
+
+static inline void
+sja1105_setup_policer(struct sja1105_l2_policing_entry *policing,
+ int index)
+{
+ policing[index].sharindx = index;
+ policing[index].smax = 65535; /* Burst size in bytes */
+ policing[index].rate = SJA1105_RATE_MBPS(1000);
+ policing[index].maxlen = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
+ policing[index].partition = 0;
+}
+
+static int sja1105_init_l2_policing(struct sja1105_private *priv)
+{
+ struct sja1105_l2_policing_entry *policing;
+ struct sja1105_table *table;
+ int i, j, k;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_POLICING];
+
+ /* Discard previous L2 Policing Table */
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = SJA1105_MAX_L2_POLICING_COUNT;
+
+ policing = table->entries;
+
+ /* k sweeps through all unicast policers (0-39).
+ * bcast sweeps through policers 40-44.
+ */
+ for (i = 0, k = 0; i < SJA1105_NUM_PORTS; i++) {
+ int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + i;
+
+ for (j = 0; j < SJA1105_NUM_TC; j++, k++)
+ sja1105_setup_policer(policing, k);
+
+ /* Set up this port's policer for broadcast traffic */
+ sja1105_setup_policer(policing, bcast);
+ }
+ return 0;
+}
+
+static int sja1105_static_config_load(struct sja1105_private *priv,
+ struct sja1105_dt_port *ports)
+{
+ int rc;
+
+ sja1105_static_config_free(&priv->static_config);
+ rc = sja1105_static_config_init(&priv->static_config,
+ priv->info->static_ops,
+ priv->info->device_id);
+ if (rc)
+ return rc;
+
+ /* Build static configuration */
+ rc = sja1105_init_mac_settings(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_mii_settings(priv, ports);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_static_fdb(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_static_vlan(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_l2_lookup_params(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_l2_forwarding(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_l2_forwarding_params(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_l2_policing(priv);
+ if (rc < 0)
+ return rc;
+ rc = sja1105_init_general_params(priv);
+ if (rc < 0)
+ return rc;
+
+ /* Send initial configuration to hardware via SPI */
+ return sja1105_static_config_upload(priv);
+}
+
+static int sja1105_parse_rgmii_delays(struct sja1105_private *priv,
+ const struct sja1105_dt_port *ports)
+{
+ int i;
+
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ if (ports->role == XMII_MAC)
+ continue;
+
+ if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
+ ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ priv->rgmii_rx_delay[i] = true;
+
+ if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+ ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ priv->rgmii_tx_delay[i] = true;
+
+ if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) &&
+ !priv->info->setup_rgmii_delay)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int sja1105_parse_ports_node(struct sja1105_private *priv,
+ struct sja1105_dt_port *ports,
+ struct device_node *ports_node)
+{
+ struct device *dev = &priv->spidev->dev;
+ struct device_node *child;
+
+ for_each_child_of_node(ports_node, child) {
+ struct device_node *phy_node;
+ int phy_mode;
+ u32 index;
+
+ /* Get switch port number from DT */
+ if (of_property_read_u32(child, "reg", &index) < 0) {
+ dev_err(dev, "Port number not defined in device tree "
+ "(property \"reg\")\n");
+ return -ENODEV;
+ }
+
+ /* Get PHY mode from DT */
+ phy_mode = of_get_phy_mode(child);
+ if (phy_mode < 0) {
+ dev_err(dev, "Failed to read phy-mode or "
+ "phy-interface-type property for port %d\n",
+ index);
+ return -ENODEV;
+ }
+ ports[index].phy_mode = phy_mode;
+
+ phy_node = of_parse_phandle(child, "phy-handle", 0);
+ if (!phy_node) {
+ if (!of_phy_is_fixed_link(child)) {
+ dev_err(dev, "phy-handle or fixed-link "
+ "properties missing!\n");
+ return -ENODEV;
+ }
+ /* phy-handle is missing, but fixed-link isn't.
+ * So it's a fixed link. Default to PHY role.
+ */
+ ports[index].role = XMII_PHY;
+ } else {
+ /* phy-handle present => put port in MAC role */
+ ports[index].role = XMII_MAC;
+ of_node_put(phy_node);
+ }
+
+ /* The MAC/PHY role can be overridden with explicit bindings */
+ if (of_property_read_bool(child, "sja1105,role-mac"))
+ ports[index].role = XMII_MAC;
+ else if (of_property_read_bool(child, "sja1105,role-phy"))
+ ports[index].role = XMII_PHY;
+ }
+
+ return 0;
+}
+
+static int sja1105_parse_dt(struct sja1105_private *priv,
+ struct sja1105_dt_port *ports)
+{
+ struct device *dev = &priv->spidev->dev;
+ struct device_node *switch_node = dev->of_node;
+ struct device_node *ports_node;
+ int rc;
+
+ ports_node = of_get_child_by_name(switch_node, "ports");
+ if (!ports_node) {
+ dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
+ return -ENODEV;
+ }
+
+ rc = sja1105_parse_ports_node(priv, ports, ports_node);
+ of_node_put(ports_node);
+
+ return rc;
+}
+
+/* Convert back and forth MAC speed from Mbps to SJA1105 encoding */
+static int sja1105_speed[] = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 10,
+ [SJA1105_SPEED_100MBPS] = 100,
+ [SJA1105_SPEED_1000MBPS] = 1000,
+};
+
+static sja1105_speed_t sja1105_get_speed_cfg(unsigned int speed_mbps)
+{
+ int i;
+
+ for (i = SJA1105_SPEED_AUTO; i <= SJA1105_SPEED_1000MBPS; i++)
+ if (sja1105_speed[i] == speed_mbps)
+ return i;
+ return -EINVAL;
+}
+
+/* Set link speed and enable/disable traffic I/O in the MAC configuration
+ * for a specific port.
+ *
+ * @speed_mbps: If 0, leave the speed unchanged, else adapt MAC to PHY speed.
+ * @enabled: Manage Rx and Tx settings for this port. If false, overrides the
+ * settings from the STP state, but not persistently (does not
+ * overwrite the static MAC info for this port).
+ */
+static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
+ int speed_mbps, bool enabled)
+{
+ struct sja1105_mac_config_entry dyn_mac;
+ struct sja1105_xmii_params_entry *mii;
+ struct sja1105_mac_config_entry *mac;
+ struct device *dev = priv->ds->dev;
+ sja1105_phy_interface_t phy_mode;
+ sja1105_speed_t speed;
+ int rc;
+
+ mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ speed = sja1105_get_speed_cfg(speed_mbps);
+ if (speed_mbps && speed < 0) {
+ dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
+ return -EINVAL;
+ }
+
+ /* If requested, overwrite SJA1105_SPEED_AUTO from the static MAC
+ * configuration table, since this will be used for the clocking setup,
+ * and we no longer need to store it in the static config (already told
+ * hardware we want auto during upload phase).
+ */
+ if (speed_mbps)
+ mac[port].speed = speed;
+ else
+ mac[port].speed = SJA1105_SPEED_AUTO;
+
+ /* On P/Q/R/S, one can read from the device via the MAC reconfiguration
+ * tables. On E/T, MAC reconfig tables are not readable, only writable.
+ * We have to *know* what the MAC looks like. For the sake of keeping
+ * the code common, we'll use the static configuration tables as a
+ * reasonable approximation for both E/T and P/Q/R/S.
+ */
+ dyn_mac = mac[port];
+ dyn_mac.ingress = enabled && mac[port].ingress;
+ dyn_mac.egress = enabled && mac[port].egress;
+
+ /* Write to the dynamic reconfiguration tables */
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG,
+ port, &dyn_mac, true);
+ if (rc < 0) {
+ dev_err(dev, "Failed to write MAC config: %d\n", rc);
+ return rc;
+ }
+
+ /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at
+ * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and
+ * RMII no change of the clock setup is required. Actually, changing
+ * the clock setup does interrupt the clock signal for a certain time
+ * which causes trouble for all PHYs relying on this signal.
+ */
+ if (!enabled)
+ return 0;
+
+ phy_mode = mii->xmii_mode[port];
+ if (phy_mode != XMII_MODE_RGMII)
+ return 0;
+
+ return sja1105_clocking_setup_port(priv, port);
+}
+
+static void sja1105_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ if (!phydev->link)
+ sja1105_adjust_port_config(priv, port, 0, false);
+ else
+ sja1105_adjust_port_config(priv, port, phydev->speed, true);
+}
+
+static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ /* Construct a new mask which exhaustively contains all link features
+ * supported by the MAC, and then apply that (logical AND) to what will
+ * be sent to the PHY for "marketing".
+ */
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_xmii_params_entry *mii;
+
+ mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
+
+ /* The MAC does not support pause frames, and also doesn't
+ * support half-duplex traffic modes.
+ */
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, MII);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Full);
+ if (mii->xmii_mode[port] == XMII_MODE_RGMII)
+ phylink_set(mask, 1000baseT_Full);
+
+ bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+/* First-generation switches have a 4-way set associative TCAM that
+ * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of
+ * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin).
+ * For the placement of a newly learnt FDB entry, the switch selects the bin
+ * based on a hash function, and the way within that bin incrementally.
+ */
+static inline int sja1105et_fdb_index(int bin, int way)
+{
+ return bin * SJA1105ET_FDB_BIN_SIZE + way;
+}
+
+static int sja1105_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
+ const u8 *addr, u16 vid,
+ struct sja1105_l2_lookup_entry *match,
+ int *last_unused)
+{
+ int way;
+
+ for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) {
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ int index = sja1105et_fdb_index(bin, way);
+
+ /* Skip unused entries, optionally marking them
+ * into the return value
+ */
+ if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ index, &l2_lookup)) {
+ if (last_unused)
+ *last_unused = way;
+ continue;
+ }
+
+ if (l2_lookup.macaddr == ether_addr_to_u64(addr) &&
+ l2_lookup.vlanid == vid) {
+ if (match)
+ *match = l2_lookup;
+ return way;
+ }
+ }
+ /* Return an invalid entry index if not found */
+ return -1;
+}
+
+static int sja1105_fdb_add(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ struct sja1105_private *priv = ds->priv;
+ struct device *dev = ds->dev;
+ int last_unused = -1;
+ int bin, way;
+
+ bin = sja1105_fdb_hash(priv, addr, vid);
+
+ way = sja1105_is_fdb_entry_in_bin(priv, bin, addr, vid,
+ &l2_lookup, &last_unused);
+ if (way >= 0) {
+ /* We have an FDB entry. Is our port in the destination
+ * mask? If yes, we need to do nothing. If not, we need
+ * to rewrite the entry by adding this port to it.
+ */
+ if (l2_lookup.destports & BIT(port))
+ return 0;
+ l2_lookup.destports |= BIT(port);
+ } else {
+ int index = sja1105et_fdb_index(bin, way);
+
+ /* We don't have an FDB entry. We construct a new one and
+ * try to find a place for it within the FDB table.
+ */
+ l2_lookup.macaddr = ether_addr_to_u64(addr);
+ l2_lookup.destports = BIT(port);
+ l2_lookup.vlanid = vid;
+
+ if (last_unused >= 0) {
+ way = last_unused;
+ } else {
+ /* Bin is full, need to evict somebody.
+ * Choose victim at random. If you get these messages
+ * often, you may need to consider changing the
+ * distribution function:
+ * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly
+ */
+ get_random_bytes(&way, sizeof(u8));
+ way %= SJA1105ET_FDB_BIN_SIZE;
+ dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n",
+ bin, addr, way);
+ /* Evict entry */
+ sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ index, NULL, false);
+ }
+ }
+ l2_lookup.index = sja1105et_fdb_index(bin, way);
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup.index, &l2_lookup,
+ true);
+}
+
+static int sja1105_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid)
+{
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ struct sja1105_private *priv = ds->priv;
+ int index, bin, way;
+ bool keep;
+
+ bin = sja1105_fdb_hash(priv, addr, vid);
+ way = sja1105_is_fdb_entry_in_bin(priv, bin, addr, vid,
+ &l2_lookup, NULL);
+ if (way < 0)
+ return 0;
+ index = sja1105et_fdb_index(bin, way);
+
+ /* We have an FDB entry. Is our port in the destination mask? If yes,
+ * we need to remove it. If the resulting port mask becomes empty, we
+ * need to completely evict the FDB entry.
+ * Otherwise we just write it back.
+ */
+ if (l2_lookup.destports & BIT(port))
+ l2_lookup.destports &= ~BIT(port);
+ if (l2_lookup.destports)
+ keep = true;
+ else
+ keep = false;
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ index, &l2_lookup, keep);
+}
+
+static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
+ dsa_fdb_dump_cb_t *cb, void *data)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct device *dev = ds->dev;
+ int i;
+
+ for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
+ struct sja1105_l2_lookup_entry l2_lookup = {0};
+ u8 macaddr[ETH_ALEN];
+ int rc;
+
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
+ i, &l2_lookup);
+ /* No fdb entry at i, not an issue */
+ if (rc == -EINVAL)
+ continue;
+ if (rc) {
+ dev_err(dev, "Failed to dump FDB: %d\n", rc);
+ return rc;
+ }
+
+ /* FDB dump callback is per port. This means we have to
+ * disregard a valid entry if it's not for this port, even if
+ * only to revisit it later. This is inefficient because the
+ * 1024-sized FDB table needs to be traversed 4 times through
+ * SPI during a 'bridge fdb show' command.
+ */
+ if (!(l2_lookup.destports & BIT(port)))
+ continue;
+ u64_to_ether_addr(l2_lookup.macaddr, macaddr);
+ cb(macaddr, l2_lookup.vlanid, false, data);
+ }
+ return 0;
+}
+
+/* This callback needs to be present */
+static int sja1105_mdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ return 0;
+}
+
+static void sja1105_mdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
+}
+
+static int sja1105_mdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid);
+}
+
+static int sja1105_bridge_member(struct dsa_switch *ds, int port,
+ struct net_device *br, bool member)
+{
+ struct sja1105_l2_forwarding_entry *l2_fwd;
+ struct sja1105_private *priv = ds->priv;
+ int i, rc;
+
+ l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
+
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ /* Add this port to the forwarding matrix of the
+ * other ports in the same bridge, and viceversa.
+ */
+ if (!dsa_is_user_port(ds, i))
+ continue;
+ /* For the ports already under the bridge, only one thing needs
+ * to be done, and that is to add this port to their
+ * reachability domain. So we can perform the SPI write for
+ * them immediately. However, for this port itself (the one
+ * that is new to the bridge), we need to add all other ports
+ * to its reachability domain. So we do that incrementally in
+ * this loop, and perform the SPI write only at the end, once
+ * the domain contains all other bridge ports.
+ */
+ if (i == port)
+ continue;
+ if (dsa_to_port(ds, i)->bridge_dev != br)
+ continue;
+ sja1105_port_allow_traffic(l2_fwd, i, port, member);
+ sja1105_port_allow_traffic(l2_fwd, port, i, member);
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
+ i, &l2_fwd[i], true);
+ if (rc < 0)
+ return rc;
+ }
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
+ port, &l2_fwd[port], true);
+}
+
+static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_mac_config_entry *mac;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ /* From UM10944 description of DRPDTAG (why put this there?):
+ * "Management traffic flows to the port regardless of the state
+ * of the INGRESS flag". So BPDUs are still be allowed to pass.
+ * At the moment no difference between DISABLED and BLOCKING.
+ */
+ mac[port].ingress = false;
+ mac[port].egress = false;
+ mac[port].dyn_learn = false;
+ break;
+ case BR_STATE_LISTENING:
+ mac[port].ingress = true;
+ mac[port].egress = false;
+ mac[port].dyn_learn = false;
+ break;
+ case BR_STATE_LEARNING:
+ mac[port].ingress = true;
+ mac[port].egress = false;
+ mac[port].dyn_learn = true;
+ break;
+ case BR_STATE_FORWARDING:
+ mac[port].ingress = true;
+ mac[port].egress = true;
+ mac[port].dyn_learn = true;
+ break;
+ default:
+ dev_err(ds->dev, "invalid STP state: %d\n", state);
+ return;
+ }
+
+ sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
+}
+
+static int sja1105_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *br)
+{
+ return sja1105_bridge_member(ds, port, br, true);
+}
+
+static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *br)
+{
+ sja1105_bridge_member(ds, port, br, false);
+}
+
+static u8 sja1105_stp_state_get(struct sja1105_private *priv, int port)
+{
+ struct sja1105_mac_config_entry *mac;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ if (!mac[port].ingress && !mac[port].egress && !mac[port].dyn_learn)
+ return BR_STATE_BLOCKING;
+ if (mac[port].ingress && !mac[port].egress && !mac[port].dyn_learn)
+ return BR_STATE_LISTENING;
+ if (mac[port].ingress && !mac[port].egress && mac[port].dyn_learn)
+ return BR_STATE_LEARNING;
+ if (mac[port].ingress && mac[port].egress && mac[port].dyn_learn)
+ return BR_STATE_FORWARDING;
+ return -EINVAL;
+}
+
+/* For situations where we need to change a setting at runtime that is only
+ * available through the static configuration, resetting the switch in order
+ * to upload the new static config is unavoidable. Back up the settings we
+ * modify at runtime (currently only MAC) and restore them after uploading,
+ * such that this operation is relatively seamless.
+ */
+static int sja1105_static_config_reload(struct sja1105_private *priv)
+{
+ struct sja1105_mac_config_entry *mac;
+ int speed_mbps[SJA1105_NUM_PORTS];
+ u8 stp_state[SJA1105_NUM_PORTS];
+ int rc, i;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ /* Back up settings changed by sja1105_adjust_port_config and
+ * sja1105_bridge_stp_state_set and restore their defaults.
+ */
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ speed_mbps[i] = sja1105_speed[mac[i].speed];
+ mac[i].speed = SJA1105_SPEED_AUTO;
+ if (i == dsa_upstream_port(priv->ds, i)) {
+ mac[i].ingress = true;
+ mac[i].egress = true;
+ mac[i].dyn_learn = true;
+ } else {
+ stp_state[i] = sja1105_stp_state_get(priv, i);
+ mac[i].ingress = false;
+ mac[i].egress = false;
+ mac[i].dyn_learn = false;
+ }
+ }
+
+ /* Reset switch and send updated static configuration */
+ rc = sja1105_static_config_upload(priv);
+ if (rc < 0)
+ goto out;
+
+ /* Configure the CGU (PLLs) for MII and RMII PHYs.
+ * For these interfaces there is no dynamic configuration
+ * needed, since PLLs have same settings at all speeds.
+ */
+ rc = sja1105_clocking_setup(priv);
+ if (rc < 0)
+ goto out;
+
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ bool enabled = (speed_mbps[i] != 0);
+
+ if (i != dsa_upstream_port(priv->ds, i))
+ sja1105_bridge_stp_state_set(priv->ds, i, stp_state[i]);
+
+ rc = sja1105_adjust_port_config(priv, i, speed_mbps[i],
+ enabled);
+ if (rc < 0)
+ goto out;
+ }
+out:
+ return rc;
+}
+
+/* The TPID setting belongs to the General Parameters table,
+ * which can only be partially reconfigured at runtime (and not the TPID).
+ * So a switch reset is required.
+ */
+static int sja1105_change_tpid(struct sja1105_private *priv,
+ u16 tpid, u16 tpid2)
+{
+ struct sja1105_general_params_entry *general_params;
+ struct sja1105_table *table;
+
+ table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+ general_params = table->entries;
+ general_params->tpid = tpid;
+ general_params->tpid2 = tpid2;
+ return sja1105_static_config_reload(priv);
+}
+
+static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
+{
+ struct sja1105_mac_config_entry *mac;
+
+ mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
+
+ mac[port].vlanid = pvid;
+
+ return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
+ &mac[port], true);
+}
+
+static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
+{
+ struct sja1105_vlan_lookup_entry *vlan;
+ int count, i;
+
+ vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
+ count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
+
+ for (i = 0; i < count; i++)
+ if (vlan[i].vlanid == vid)
+ return i;
+
+ /* Return an invalid entry index if not found */
+ return -1;
+}
+
+static int sja1105_vlan_apply(struct sja1105_private *priv, int port, u16 vid,
+ bool enabled, bool untagged)
+{
+ struct sja1105_vlan_lookup_entry *vlan;
+ struct sja1105_table *table;
+ bool keep = true;
+ int match, rc;
+
+ table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
+
+ match = sja1105_is_vlan_configured(priv, vid);
+ if (match < 0) {
+ /* Can't delete a missing entry. */
+ if (!enabled)
+ return 0;
+ rc = sja1105_table_resize(table, table->entry_count + 1);
+ if (rc)
+ return rc;
+ match = table->entry_count - 1;
+ }
+ /* Assign pointer after the resize (it's new memory) */
+ vlan = table->entries;
+ vlan[match].vlanid = vid;
+ if (enabled) {
+ vlan[match].vlan_bc |= BIT(port);
+ vlan[match].vmemb_port |= BIT(port);
+ } else {
+ vlan[match].vlan_bc &= ~BIT(port);
+ vlan[match].vmemb_port &= ~BIT(port);
+ }
+ /* Also unset tag_port if removing this VLAN was requested,
+ * just so we don't have a confusing bitmap (no practical purpose).
+ */
+ if (untagged || !enabled)
+ vlan[match].tag_port &= ~BIT(port);
+ else
+ vlan[match].tag_port |= BIT(port);
+ /* If there's no port left as member of this VLAN,
+ * it's time for it to go.
+ */
+ if (!vlan[match].vmemb_port)
+ keep = false;
+
+ dev_dbg(priv->ds->dev,
+ "%s: port %d, vid %llu, broadcast domain 0x%llx, "
+ "port members 0x%llx, tagged ports 0x%llx, keep %d\n",
+ __func__, port, vlan[match].vlanid, vlan[match].vlan_bc,
+ vlan[match].vmemb_port, vlan[match].tag_port, keep);
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid,
+ &vlan[match], keep);
+ if (rc < 0)
+ return rc;
+
+ if (!keep)
+ return sja1105_table_delete_entry(table, match);
+
+ return 0;
+}
+
+static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
+{
+ int rc, i;
+
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ rc = dsa_port_setup_8021q_tagging(ds, i, enabled);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to setup VLAN tagging for port %d: %d\n",
+ i, rc);
+ return rc;
+ }
+ }
+ dev_info(ds->dev, "%s switch tagging\n",
+ enabled ? "Enabled" : "Disabled");
+ return 0;
+}
+
+static enum dsa_tag_protocol
+sja1105_get_tag_protocol(struct dsa_switch *ds, int port)
+{
+ return DSA_TAG_PROTO_SJA1105;
+}
+
+/* This callback needs to be present */
+static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ return 0;
+}
+
+static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ if (enabled)
+ /* Enable VLAN filtering. */
+ rc = sja1105_change_tpid(priv, ETH_P_8021Q, ETH_P_8021AD);
+ else
+ /* Disable VLAN filtering. */
+ rc = sja1105_change_tpid(priv, ETH_P_SJA1105, ETH_P_SJA1105);
+ if (rc)
+ dev_err(ds->dev, "Failed to change VLAN Ethertype\n");
+
+ /* Switch port identification based on 802.1Q is only passable
+ * if we are not under a vlan_filtering bridge. So make sure
+ * the two configurations are mutually exclusive.
+ */
+ return sja1105_setup_8021q_tagging(ds, !enabled);
+}
+
+static void sja1105_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct sja1105_private *priv = ds->priv;
+ u16 vid;
+ int rc;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ rc = sja1105_vlan_apply(priv, port, vid, true, vlan->flags &
+ BRIDGE_VLAN_INFO_UNTAGGED);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n",
+ vid, port, rc);
+ return;
+ }
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
+ rc = sja1105_pvid_apply(ds->priv, port, vid);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to set pvid %d on port %d: %d\n",
+ vid, port, rc);
+ return;
+ }
+ }
+ }
+}
+
+static int sja1105_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct sja1105_private *priv = ds->priv;
+ u16 vid;
+ int rc;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ rc = sja1105_vlan_apply(priv, port, vid, false, vlan->flags &
+ BRIDGE_VLAN_INFO_UNTAGGED);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n",
+ vid, port, rc);
+ return rc;
+ }
+ }
+ return 0;
+}
+
+/* The programming model for the SJA1105 switch is "all-at-once" via static
+ * configuration tables. Some of these can be dynamically modified at runtime,
+ * but not the xMII mode parameters table.
+ * Furthermode, some PHYs may not have crystals for generating their clocks
+ * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's
+ * ref_clk pin. So port clocking needs to be initialized early, before
+ * connecting to PHYs is attempted, otherwise they won't respond through MDIO.
+ * Setting correct PHY link speed does not matter now.
+ * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY
+ * bindings are not yet parsed by DSA core. We need to parse early so that we
+ * can populate the xMII mode parameters table.
+ */
+static int sja1105_setup(struct dsa_switch *ds)
+{
+ struct sja1105_dt_port ports[SJA1105_NUM_PORTS];
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ rc = sja1105_parse_dt(priv, ports);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
+ return rc;
+ }
+
+ /* Error out early if internal delays are required through DT
+ * and we can't apply them.
+ */
+ rc = sja1105_parse_rgmii_delays(priv, ports);
+ if (rc < 0) {
+ dev_err(ds->dev, "RGMII delay not supported\n");
+ return rc;
+ }
+
+ /* Create and send configuration down to device */
+ rc = sja1105_static_config_load(priv, ports);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to load static config: %d\n", rc);
+ return rc;
+ }
+ /* Configure the CGU (PHY link modes and speeds) */
+ rc = sja1105_clocking_setup(priv);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
+ return rc;
+ }
+ /* On SJA1105, VLAN filtering per se is always enabled in hardware.
+ * The only thing we can do to disable it is lie about what the 802.1Q
+ * EtherType is.
+ * So it will still try to apply VLAN filtering, but all ingress
+ * traffic (except frames received with EtherType of ETH_P_SJA1105)
+ * will be internally tagged with a distorted VLAN header where the
+ * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
+ */
+ ds->vlan_filtering_is_global = true;
+
+ /* The DSA/switchdev model brings up switch ports in standalone mode by
+ * default, and that means vlan_filtering is 0 since they're not under
+ * a bridge, so it's safe to set up switch tagging at this time.
+ */
+ return sja1105_setup_8021q_tagging(ds, true);
+}
+
+static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
+ struct sk_buff *skb)
+{
+ struct sja1105_mgmt_entry mgmt_route = {0};
+ struct sja1105_private *priv = ds->priv;
+ struct ethhdr *hdr;
+ int timeout = 10;
+ int rc;
+
+ hdr = eth_hdr(skb);
+
+ mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest);
+ mgmt_route.destports = BIT(port);
+ mgmt_route.enfport = 1;
+
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
+ slot, &mgmt_route, true);
+ if (rc < 0) {
+ kfree_skb(skb);
+ return rc;
+ }
+
+ /* Transfer skb to the host port. */
+ dsa_enqueue_skb(skb, ds->ports[port].slave);
+
+ /* Wait until the switch has processed the frame */
+ do {
+ rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE,
+ slot, &mgmt_route);
+ if (rc < 0) {
+ dev_err_ratelimited(priv->ds->dev,
+ "failed to poll for mgmt route\n");
+ continue;
+ }
+
+ /* UM10944: The ENFPORT flag of the respective entry is
+ * cleared when a match is found. The host can use this
+ * flag as an acknowledgment.
+ */
+ cpu_relax();
+ } while (mgmt_route.enfport && --timeout);
+
+ if (!timeout) {
+ /* Clean up the management route so that a follow-up
+ * frame may not match on it by mistake.
+ */
+ sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
+ slot, &mgmt_route, false);
+ dev_err_ratelimited(priv->ds->dev, "xmit timed out\n");
+ }
+
+ return NETDEV_TX_OK;
+}
+
+/* Deferred work is unfortunately necessary because setting up the management
+ * route cannot be done from atomit context (SPI transfer takes a sleepable
+ * lock on the bus)
+ */
+static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
+ struct sk_buff *skb)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_port *sp = &priv->ports[port];
+ int slot = sp->mgmt_slot;
+
+ /* The tragic fact about the switch having 4x2 slots for installing
+ * management routes is that all of them except one are actually
+ * useless.
+ * If 2 slots are simultaneously configured for two BPDUs sent to the
+ * same (multicast) DMAC but on different egress ports, the switch
+ * would confuse them and redirect first frame it receives on the CPU
+ * port towards the port configured on the numerically first slot
+ * (therefore wrong port), then second received frame on second slot
+ * (also wrong port).
+ * So for all practical purposes, there needs to be a lock that
+ * prevents that from happening. The slot used here is utterly useless
+ * (could have simply been 0 just as fine), but we are doing it
+ * nonetheless, in case a smarter idea ever comes up in the future.
+ */
+ mutex_lock(&priv->mgmt_lock);
+
+ sja1105_mgmt_xmit(ds, port, slot, skb);
+
+ mutex_unlock(&priv->mgmt_lock);
+ return NETDEV_TX_OK;
+}
+
+/* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
+ * which cannot be reconfigured at runtime. So a switch reset is required.
+ */
+static int sja1105_set_ageing_time(struct dsa_switch *ds,
+ unsigned int ageing_time)
+{
+ struct sja1105_l2_lookup_params_entry *l2_lookup_params;
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_table *table;
+ unsigned int maxage;
+
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
+ l2_lookup_params = table->entries;
+
+ maxage = SJA1105_AGEING_TIME_MS(ageing_time);
+
+ if (l2_lookup_params->maxage == maxage)
+ return 0;
+
+ l2_lookup_params->maxage = maxage;
+
+ return sja1105_static_config_reload(priv);
+}
+
+static const struct dsa_switch_ops sja1105_switch_ops = {
+ .get_tag_protocol = sja1105_get_tag_protocol,
+ .setup = sja1105_setup,
+ .adjust_link = sja1105_adjust_link,
+ .set_ageing_time = sja1105_set_ageing_time,
+ .phylink_validate = sja1105_phylink_validate,
+ .get_strings = sja1105_get_strings,
+ .get_ethtool_stats = sja1105_get_ethtool_stats,
+ .get_sset_count = sja1105_get_sset_count,
+ .port_fdb_dump = sja1105_fdb_dump,
+ .port_fdb_add = sja1105_fdb_add,
+ .port_fdb_del = sja1105_fdb_del,
+ .port_bridge_join = sja1105_bridge_join,
+ .port_bridge_leave = sja1105_bridge_leave,
+ .port_stp_state_set = sja1105_bridge_stp_state_set,
+ .port_vlan_prepare = sja1105_vlan_prepare,
+ .port_vlan_filtering = sja1105_vlan_filtering,
+ .port_vlan_add = sja1105_vlan_add,
+ .port_vlan_del = sja1105_vlan_del,
+ .port_mdb_prepare = sja1105_mdb_prepare,
+ .port_mdb_add = sja1105_mdb_add,
+ .port_mdb_del = sja1105_mdb_del,
+ .port_deferred_xmit = sja1105_port_deferred_xmit,
+};
+
+static int sja1105_check_device_id(struct sja1105_private *priv)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0};
+ struct device *dev = &priv->spidev->dev;
+ u64 device_id;
+ u64 part_no;
+ int rc;
+
+ rc = sja1105_spi_send_int(priv, SPI_READ, regs->device_id,
+ &device_id, SJA1105_SIZE_DEVICE_ID);
+ if (rc < 0)
+ return rc;
+
+ if (device_id != priv->info->device_id) {
+ dev_err(dev, "Expected device ID 0x%llx but read 0x%llx\n",
+ priv->info->device_id, device_id);
+ return -ENODEV;
+ }
+
+ rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->prod_id,
+ prod_id, SJA1105_SIZE_DEVICE_ID);
+ if (rc < 0)
+ return rc;
+
+ sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID);
+
+ if (part_no != priv->info->part_no) {
+ dev_err(dev, "Expected part number 0x%llx but read 0x%llx\n",
+ priv->info->part_no, part_no);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int sja1105_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct sja1105_private *priv;
+ struct dsa_switch *ds;
+ int rc, i;
+
+ if (!dev->of_node) {
+ dev_err(dev, "No DTS bindings for SJA1105 driver\n");
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Configure the optional reset pin and bring up switch */
+ priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset_gpio))
+ dev_dbg(dev, "reset-gpios not defined, ignoring\n");
+ else
+ sja1105_hw_reset(priv->reset_gpio, 1, 1);
+
+ /* Populate our driver private structure (priv) based on
+ * the device tree node that was probed (spi)
+ */
+ priv->spidev = spi;
+ spi_set_drvdata(spi, priv);
+
+ /* Configure the SPI bus */
+ spi->bits_per_word = 8;
+ rc = spi_setup(spi);
+ if (rc < 0) {
+ dev_err(dev, "Could not init SPI\n");
+ return rc;
+ }
+
+ priv->info = of_device_get_match_data(dev);
+
+ /* Detect hardware device */
+ rc = sja1105_check_device_id(priv);
+ if (rc < 0) {
+ dev_err(dev, "Device ID check failed: %d\n", rc);
+ return rc;
+ }
+
+ dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
+
+ ds = dsa_switch_alloc(dev, SJA1105_NUM_PORTS);
+ if (!ds)
+ return -ENOMEM;
+
+ ds->ops = &sja1105_switch_ops;
+ ds->priv = priv;
+ priv->ds = ds;
+
+ /* Connections between dsa_port and sja1105_port */
+ for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ struct sja1105_port *sp = &priv->ports[i];
+
+ ds->ports[i].priv = sp;
+ sp->dp = &ds->ports[i];
+ }
+ mutex_init(&priv->mgmt_lock);
+
+ return dsa_register_switch(priv->ds);
+}
+
+static int sja1105_remove(struct spi_device *spi)
+{
+ struct sja1105_private *priv = spi_get_drvdata(spi);
+
+ dsa_unregister_switch(priv->ds);
+ sja1105_static_config_free(&priv->static_config);
+ return 0;
+}
+
+static const struct of_device_id sja1105_dt_ids[] = {
+ { .compatible = "nxp,sja1105e", .data = &sja1105e_info },
+ { .compatible = "nxp,sja1105t", .data = &sja1105t_info },
+ { .compatible = "nxp,sja1105p", .data = &sja1105p_info },
+ { .compatible = "nxp,sja1105q", .data = &sja1105q_info },
+ { .compatible = "nxp,sja1105r", .data = &sja1105r_info },
+ { .compatible = "nxp,sja1105s", .data = &sja1105s_info },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
+
+static struct spi_driver sja1105_driver = {
+ .driver = {
+ .name = "sja1105",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(sja1105_dt_ids),
+ },
+ .probe = sja1105_probe,
+ .remove = sja1105_remove,
+};
+
+module_spi_driver(sja1105_driver);
+
+MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>");
+MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>");
+MODULE_DESCRIPTION("SJA1105 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
new file mode 100644
index 000000000000..244a94ccfc18
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -0,0 +1,590 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright (c) 2016-2018, NXP Semiconductors
+ * Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include <linux/spi/spi.h>
+#include <linux/packing.h>
+#include "sja1105.h"
+
+#define SJA1105_SIZE_PORT_CTRL 4
+#define SJA1105_SIZE_RESET_CMD 4
+#define SJA1105_SIZE_SPI_MSG_HEADER 4
+#define SJA1105_SIZE_SPI_MSG_MAXLEN (64 * 4)
+#define SJA1105_SIZE_SPI_TRANSFER_MAX \
+ (SJA1105_SIZE_SPI_MSG_HEADER + SJA1105_SIZE_SPI_MSG_MAXLEN)
+
+static int sja1105_spi_transfer(const struct sja1105_private *priv,
+ const void *tx, void *rx, int size)
+{
+ struct spi_device *spi = priv->spidev;
+ struct spi_transfer transfer = {
+ .tx_buf = tx,
+ .rx_buf = rx,
+ .len = size,
+ };
+ struct spi_message msg;
+ int rc;
+
+ if (size > SJA1105_SIZE_SPI_TRANSFER_MAX) {
+ dev_err(&spi->dev, "SPI message (%d) longer than max of %d\n",
+ size, SJA1105_SIZE_SPI_TRANSFER_MAX);
+ return -EMSGSIZE;
+ }
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&transfer, &msg);
+
+ rc = spi_sync(spi, &msg);
+ if (rc < 0) {
+ dev_err(&spi->dev, "SPI transfer failed: %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static void
+sja1105_spi_message_pack(void *buf, const struct sja1105_spi_message *msg)
+{
+ const int size = SJA1105_SIZE_SPI_MSG_HEADER;
+
+ memset(buf, 0, size);
+
+ sja1105_pack(buf, &msg->access, 31, 31, size);
+ sja1105_pack(buf, &msg->read_count, 30, 25, size);
+ sja1105_pack(buf, &msg->address, 24, 4, size);
+}
+
+/* If @rw is:
+ * - SPI_WRITE: creates and sends an SPI write message at absolute
+ * address reg_addr, taking size_bytes from *packed_buf
+ * - SPI_READ: creates and sends an SPI read message from absolute
+ * address reg_addr, writing size_bytes into *packed_buf
+ *
+ * This function should only be called if it is priorly known that
+ * @size_bytes is smaller than SIZE_SPI_MSG_MAXLEN. Larger packed buffers
+ * are chunked in smaller pieces by sja1105_spi_send_long_packed_buf below.
+ */
+int sja1105_spi_send_packed_buf(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr,
+ void *packed_buf, size_t size_bytes)
+{
+ u8 tx_buf[SJA1105_SIZE_SPI_TRANSFER_MAX] = {0};
+ u8 rx_buf[SJA1105_SIZE_SPI_TRANSFER_MAX] = {0};
+ const int msg_len = size_bytes + SJA1105_SIZE_SPI_MSG_HEADER;
+ struct sja1105_spi_message msg = {0};
+ int rc;
+
+ if (msg_len > SJA1105_SIZE_SPI_TRANSFER_MAX)
+ return -ERANGE;
+
+ msg.access = rw;
+ msg.address = reg_addr;
+ if (rw == SPI_READ)
+ msg.read_count = size_bytes / 4;
+
+ sja1105_spi_message_pack(tx_buf, &msg);
+
+ if (rw == SPI_WRITE)
+ memcpy(tx_buf + SJA1105_SIZE_SPI_MSG_HEADER,
+ packed_buf, size_bytes);
+
+ rc = sja1105_spi_transfer(priv, tx_buf, rx_buf, msg_len);
+ if (rc < 0)
+ return rc;
+
+ if (rw == SPI_READ)
+ memcpy(packed_buf, rx_buf + SJA1105_SIZE_SPI_MSG_HEADER,
+ size_bytes);
+
+ return 0;
+}
+
+/* If @rw is:
+ * - SPI_WRITE: creates and sends an SPI write message at absolute
+ * address reg_addr, taking size_bytes from *packed_buf
+ * - SPI_READ: creates and sends an SPI read message from absolute
+ * address reg_addr, writing size_bytes into *packed_buf
+ *
+ * The u64 *value is unpacked, meaning that it's stored in the native
+ * CPU endianness and directly usable by software running on the core.
+ *
+ * This is a wrapper around sja1105_spi_send_packed_buf().
+ */
+int sja1105_spi_send_int(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 reg_addr,
+ u64 *value, u64 size_bytes)
+{
+ u8 packed_buf[SJA1105_SIZE_SPI_MSG_MAXLEN];
+ int rc;
+
+ if (size_bytes > SJA1105_SIZE_SPI_MSG_MAXLEN)
+ return -ERANGE;
+
+ if (rw == SPI_WRITE)
+ sja1105_pack(packed_buf, value, 8 * size_bytes - 1, 0,
+ size_bytes);
+
+ rc = sja1105_spi_send_packed_buf(priv, rw, reg_addr, packed_buf,
+ size_bytes);
+
+ if (rw == SPI_READ)
+ sja1105_unpack(packed_buf, value, 8 * size_bytes - 1, 0,
+ size_bytes);
+
+ return rc;
+}
+
+/* Should be used if a @packed_buf larger than SJA1105_SIZE_SPI_MSG_MAXLEN
+ * must be sent/received. Splitting the buffer into chunks and assembling
+ * those into SPI messages is done automatically by this function.
+ */
+int sja1105_spi_send_long_packed_buf(const struct sja1105_private *priv,
+ sja1105_spi_rw_mode_t rw, u64 base_addr,
+ void *packed_buf, u64 buf_len)
+{
+ struct chunk {
+ void *buf_ptr;
+ int len;
+ u64 spi_address;
+ } chunk;
+ int distance_to_end;
+ int rc;
+
+ /* Initialize chunk */
+ chunk.buf_ptr = packed_buf;
+ chunk.spi_address = base_addr;
+ chunk.len = min_t(int, buf_len, SJA1105_SIZE_SPI_MSG_MAXLEN);
+
+ while (chunk.len) {
+ rc = sja1105_spi_send_packed_buf(priv, rw, chunk.spi_address,
+ chunk.buf_ptr, chunk.len);
+ if (rc < 0)
+ return rc;
+
+ chunk.buf_ptr += chunk.len;
+ chunk.spi_address += chunk.len / 4;
+ distance_to_end = (uintptr_t)(packed_buf + buf_len -
+ chunk.buf_ptr);
+ chunk.len = min(distance_to_end, SJA1105_SIZE_SPI_MSG_MAXLEN);
+ }
+
+ return 0;
+}
+
+/* Back-ported structure from UM11040 Table 112.
+ * Reset control register (addr. 100440h)
+ * In the SJA1105 E/T, only warm_rst and cold_rst are
+ * supported (exposed in UM10944 as rst_ctrl), but the bit
+ * offsets of warm_rst and cold_rst are actually reversed.
+ */
+struct sja1105_reset_cmd {
+ u64 switch_rst;
+ u64 cfg_rst;
+ u64 car_rst;
+ u64 otp_rst;
+ u64 warm_rst;
+ u64 cold_rst;
+ u64 por_rst;
+};
+
+static void
+sja1105et_reset_cmd_pack(void *buf, const struct sja1105_reset_cmd *reset)
+{
+ const int size = SJA1105_SIZE_RESET_CMD;
+
+ memset(buf, 0, size);
+
+ sja1105_pack(buf, &reset->cold_rst, 3, 3, size);
+ sja1105_pack(buf, &reset->warm_rst, 2, 2, size);
+}
+
+static void
+sja1105pqrs_reset_cmd_pack(void *buf, const struct sja1105_reset_cmd *reset)
+{
+ const int size = SJA1105_SIZE_RESET_CMD;
+
+ memset(buf, 0, size);
+
+ sja1105_pack(buf, &reset->switch_rst, 8, 8, size);
+ sja1105_pack(buf, &reset->cfg_rst, 7, 7, size);
+ sja1105_pack(buf, &reset->car_rst, 5, 5, size);
+ sja1105_pack(buf, &reset->otp_rst, 4, 4, size);
+ sja1105_pack(buf, &reset->warm_rst, 3, 3, size);
+ sja1105_pack(buf, &reset->cold_rst, 2, 2, size);
+ sja1105_pack(buf, &reset->por_rst, 1, 1, size);
+}
+
+static int sja1105et_reset_cmd(const void *ctx, const void *data)
+{
+ const struct sja1105_private *priv = ctx;
+ const struct sja1105_reset_cmd *reset = data;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct device *dev = priv->ds->dev;
+ u8 packed_buf[SJA1105_SIZE_RESET_CMD];
+
+ if (reset->switch_rst ||
+ reset->cfg_rst ||
+ reset->car_rst ||
+ reset->otp_rst ||
+ reset->por_rst) {
+ dev_err(dev, "Only warm and cold reset is supported "
+ "for SJA1105 E/T!\n");
+ return -EINVAL;
+ }
+
+ if (reset->warm_rst)
+ dev_dbg(dev, "Warm reset requested\n");
+ if (reset->cold_rst)
+ dev_dbg(dev, "Cold reset requested\n");
+
+ sja1105et_reset_cmd_pack(packed_buf, reset);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rgu,
+ packed_buf, SJA1105_SIZE_RESET_CMD);
+}
+
+static int sja1105pqrs_reset_cmd(const void *ctx, const void *data)
+{
+ const struct sja1105_private *priv = ctx;
+ const struct sja1105_reset_cmd *reset = data;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct device *dev = priv->ds->dev;
+ u8 packed_buf[SJA1105_SIZE_RESET_CMD];
+
+ if (reset->switch_rst)
+ dev_dbg(dev, "Main reset for all functional modules requested\n");
+ if (reset->cfg_rst)
+ dev_dbg(dev, "Chip configuration reset requested\n");
+ if (reset->car_rst)
+ dev_dbg(dev, "Clock and reset control logic reset requested\n");
+ if (reset->otp_rst)
+ dev_dbg(dev, "OTP read cycle for reading product "
+ "config settings requested\n");
+ if (reset->warm_rst)
+ dev_dbg(dev, "Warm reset requested\n");
+ if (reset->cold_rst)
+ dev_dbg(dev, "Cold reset requested\n");
+ if (reset->por_rst)
+ dev_dbg(dev, "Power-on reset requested\n");
+
+ sja1105pqrs_reset_cmd_pack(packed_buf, reset);
+
+ return sja1105_spi_send_packed_buf(priv, SPI_WRITE, regs->rgu,
+ packed_buf, SJA1105_SIZE_RESET_CMD);
+}
+
+static int sja1105_cold_reset(const struct sja1105_private *priv)
+{
+ struct sja1105_reset_cmd reset = {0};
+
+ reset.cold_rst = 1;
+ return priv->info->reset_cmd(priv, &reset);
+}
+
+static int sja1105_inhibit_tx(const struct sja1105_private *priv,
+ const unsigned long *port_bitmap)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u64 inhibit_cmd;
+ int port, rc;
+
+ rc = sja1105_spi_send_int(priv, SPI_READ, regs->port_control,
+ &inhibit_cmd, SJA1105_SIZE_PORT_CTRL);
+ if (rc < 0)
+ return rc;
+
+ for_each_set_bit(port, port_bitmap, SJA1105_NUM_PORTS)
+ inhibit_cmd |= BIT(port);
+
+ return sja1105_spi_send_int(priv, SPI_WRITE, regs->port_control,
+ &inhibit_cmd, SJA1105_SIZE_PORT_CTRL);
+}
+
+struct sja1105_status {
+ u64 configs;
+ u64 crcchkl;
+ u64 ids;
+ u64 crcchkg;
+};
+
+/* This is not reading the entire General Status area, which is also
+ * divergent between E/T and P/Q/R/S, but only the relevant bits for
+ * ensuring that the static config upload procedure was successful.
+ */
+static void sja1105_status_unpack(void *buf, struct sja1105_status *status)
+{
+ /* So that addition translates to 4 bytes */
+ u32 *p = buf;
+
+ /* device_id is missing from the buffer, but we don't
+ * want to diverge from the manual definition of the
+ * register addresses, so we'll back off one step with
+ * the register pointer, and never access p[0].
+ */
+ p--;
+ sja1105_unpack(p + 0x1, &status->configs, 31, 31, 4);
+ sja1105_unpack(p + 0x1, &status->crcchkl, 30, 30, 4);
+ sja1105_unpack(p + 0x1, &status->ids, 29, 29, 4);
+ sja1105_unpack(p + 0x1, &status->crcchkg, 28, 28, 4);
+}
+
+static int sja1105_status_get(struct sja1105_private *priv,
+ struct sja1105_status *status)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+ u8 packed_buf[4];
+ int rc;
+
+ rc = sja1105_spi_send_packed_buf(priv, SPI_READ,
+ regs->status,
+ packed_buf, 4);
+ if (rc < 0)
+ return rc;
+
+ sja1105_status_unpack(packed_buf, status);
+
+ return 0;
+}
+
+/* Not const because unpacking priv->static_config into buffers and preparing
+ * for upload requires the recalculation of table CRCs and updating the
+ * structures with these.
+ */
+static int
+static_config_buf_prepare_for_upload(struct sja1105_private *priv,
+ void *config_buf, int buf_len)
+{
+ struct sja1105_static_config *config = &priv->static_config;
+ struct sja1105_table_header final_header;
+ sja1105_config_valid_t valid;
+ char *final_header_ptr;
+ int crc_len;
+
+ valid = sja1105_static_config_check_valid(config);
+ if (valid != SJA1105_CONFIG_OK) {
+ dev_err(&priv->spidev->dev,
+ sja1105_static_config_error_msg[valid]);
+ return -EINVAL;
+ }
+
+ /* Write Device ID and config tables to config_buf */
+ sja1105_static_config_pack(config_buf, config);
+ /* Recalculate CRC of the last header (right now 0xDEADBEEF).
+ * Don't include the CRC field itself.
+ */
+ crc_len = buf_len - 4;
+ /* Read the whole table header */
+ final_header_ptr = config_buf + buf_len - SJA1105_SIZE_TABLE_HEADER;
+ sja1105_table_header_packing(final_header_ptr, &final_header, UNPACK);
+ /* Modify */
+ final_header.crc = sja1105_crc32(config_buf, crc_len);
+ /* Rewrite */
+ sja1105_table_header_packing(final_header_ptr, &final_header, PACK);
+
+ return 0;
+}
+
+#define RETRIES 10
+
+int sja1105_static_config_upload(struct sja1105_private *priv)
+{
+ unsigned long port_bitmap = GENMASK_ULL(SJA1105_NUM_PORTS - 1, 0);
+ struct sja1105_static_config *config = &priv->static_config;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct device *dev = &priv->spidev->dev;
+ struct sja1105_status status;
+ int rc, retries = RETRIES;
+ u8 *config_buf;
+ int buf_len;
+
+ buf_len = sja1105_static_config_get_length(config);
+ config_buf = kcalloc(buf_len, sizeof(char), GFP_KERNEL);
+ if (!config_buf)
+ return -ENOMEM;
+
+ rc = static_config_buf_prepare_for_upload(priv, config_buf, buf_len);
+ if (rc < 0) {
+ dev_err(dev, "Invalid config, cannot upload\n");
+ return -EINVAL;
+ }
+ /* Prevent PHY jabbering during switch reset by inhibiting
+ * Tx on all ports and waiting for current packet to drain.
+ * Otherwise, the PHY will see an unterminated Ethernet packet.
+ */
+ rc = sja1105_inhibit_tx(priv, &port_bitmap);
+ if (rc < 0) {
+ dev_err(dev, "Failed to inhibit Tx on ports\n");
+ return -ENXIO;
+ }
+ /* Wait for an eventual egress packet to finish transmission
+ * (reach IFG). It is guaranteed that a second one will not
+ * follow, and that switch cold reset is thus safe
+ */
+ usleep_range(500, 1000);
+ do {
+ /* Put the SJA1105 in programming mode */
+ rc = sja1105_cold_reset(priv);
+ if (rc < 0) {
+ dev_err(dev, "Failed to reset switch, retrying...\n");
+ continue;
+ }
+ /* Wait for the switch to come out of reset */
+ usleep_range(1000, 5000);
+ /* Upload the static config to the device */
+ rc = sja1105_spi_send_long_packed_buf(priv, SPI_WRITE,
+ regs->config,
+ config_buf, buf_len);
+ if (rc < 0) {
+ dev_err(dev, "Failed to upload config, retrying...\n");
+ continue;
+ }
+ /* Check that SJA1105 responded well to the config upload */
+ rc = sja1105_status_get(priv, &status);
+ if (rc < 0)
+ continue;
+
+ if (status.ids == 1) {
+ dev_err(dev, "Mismatch between hardware and static config "
+ "device id. Wrote 0x%llx, wants 0x%llx\n",
+ config->device_id, priv->info->device_id);
+ continue;
+ }
+ if (status.crcchkl == 1) {
+ dev_err(dev, "Switch reported invalid local CRC on "
+ "the uploaded config, retrying...\n");
+ continue;
+ }
+ if (status.crcchkg == 1) {
+ dev_err(dev, "Switch reported invalid global CRC on "
+ "the uploaded config, retrying...\n");
+ continue;
+ }
+ if (status.configs == 0) {
+ dev_err(dev, "Switch reported that configuration is "
+ "invalid, retrying...\n");
+ continue;
+ }
+ } while (--retries && (status.crcchkl == 1 || status.crcchkg == 1 ||
+ status.configs == 0 || status.ids == 1));
+
+ if (!retries) {
+ rc = -EIO;
+ dev_err(dev, "Failed to upload config to device, giving up\n");
+ goto out;
+ } else if (retries != RETRIES - 1) {
+ dev_info(dev, "Succeeded after %d tried\n", RETRIES - retries);
+ }
+
+ dev_info(dev, "Reset switch and programmed static config\n");
+out:
+ kfree(config_buf);
+ return rc;
+}
+
+struct sja1105_regs sja1105et_regs = {
+ .device_id = 0x0,
+ .prod_id = 0x100BC3,
+ .status = 0x1,
+ .port_control = 0x11,
+ .config = 0x020000,
+ .rgu = 0x100440,
+ .pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
+ .rmii_pll1 = 0x10000A,
+ .cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
+ /* UM10944.pdf, Table 86, ACU Register overview */
+ .rgmii_pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
+ .mac = {0x200, 0x202, 0x204, 0x206, 0x208},
+ .mac_hl1 = {0x400, 0x410, 0x420, 0x430, 0x440},
+ .mac_hl2 = {0x600, 0x610, 0x620, 0x630, 0x640},
+ /* UM10944.pdf, Table 78, CGU Register overview */
+ .mii_tx_clk = {0x100013, 0x10001A, 0x100021, 0x100028, 0x10002F},
+ .mii_rx_clk = {0x100014, 0x10001B, 0x100022, 0x100029, 0x100030},
+ .mii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
+ .mii_ext_rx_clk = {0x100019, 0x100020, 0x100027, 0x10002E, 0x100035},
+ .rgmii_tx_clk = {0x100016, 0x10001D, 0x100024, 0x10002B, 0x100032},
+ .rmii_ref_clk = {0x100015, 0x10001C, 0x100023, 0x10002A, 0x100031},
+ .rmii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
+};
+
+struct sja1105_regs sja1105pqrs_regs = {
+ .device_id = 0x0,
+ .prod_id = 0x100BC3,
+ .status = 0x1,
+ .port_control = 0x12,
+ .config = 0x020000,
+ .rgu = 0x100440,
+ .pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
+ .rmii_pll1 = 0x10000A,
+ .cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
+ /* UM10944.pdf, Table 86, ACU Register overview */
+ .rgmii_pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
+ .mac = {0x200, 0x202, 0x204, 0x206, 0x208},
+ .mac_hl1 = {0x400, 0x410, 0x420, 0x430, 0x440},
+ .mac_hl2 = {0x600, 0x610, 0x620, 0x630, 0x640},
+ /* UM11040.pdf, Table 114 */
+ .mii_tx_clk = {0x100013, 0x100019, 0x10001F, 0x100025, 0x10002B},
+ .mii_rx_clk = {0x100014, 0x10001A, 0x100020, 0x100026, 0x10002C},
+ .mii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
+ .mii_ext_rx_clk = {0x100018, 0x10001E, 0x100024, 0x10002A, 0x100030},
+ .rgmii_tx_clk = {0x100016, 0x10001C, 0x100022, 0x100028, 0x10002E},
+ .rmii_ref_clk = {0x100015, 0x10001B, 0x100021, 0x100027, 0x10002D},
+ .rmii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
+ .qlevel = {0x604, 0x614, 0x624, 0x634, 0x644},
+};
+
+struct sja1105_info sja1105e_info = {
+ .device_id = SJA1105E_DEVICE_ID,
+ .part_no = SJA1105ET_PART_NO,
+ .static_ops = sja1105e_table_ops,
+ .dyn_ops = sja1105et_dyn_ops,
+ .reset_cmd = sja1105et_reset_cmd,
+ .regs = &sja1105et_regs,
+ .name = "SJA1105E",
+};
+struct sja1105_info sja1105t_info = {
+ .device_id = SJA1105T_DEVICE_ID,
+ .part_no = SJA1105ET_PART_NO,
+ .static_ops = sja1105t_table_ops,
+ .dyn_ops = sja1105et_dyn_ops,
+ .reset_cmd = sja1105et_reset_cmd,
+ .regs = &sja1105et_regs,
+ .name = "SJA1105T",
+};
+struct sja1105_info sja1105p_info = {
+ .device_id = SJA1105PR_DEVICE_ID,
+ .part_no = SJA1105P_PART_NO,
+ .static_ops = sja1105p_table_ops,
+ .dyn_ops = sja1105pqrs_dyn_ops,
+ .reset_cmd = sja1105pqrs_reset_cmd,
+ .regs = &sja1105pqrs_regs,
+ .name = "SJA1105P",
+};
+struct sja1105_info sja1105q_info = {
+ .device_id = SJA1105QS_DEVICE_ID,
+ .part_no = SJA1105Q_PART_NO,
+ .static_ops = sja1105q_table_ops,
+ .dyn_ops = sja1105pqrs_dyn_ops,
+ .reset_cmd = sja1105pqrs_reset_cmd,
+ .regs = &sja1105pqrs_regs,
+ .name = "SJA1105Q",
+};
+struct sja1105_info sja1105r_info = {
+ .device_id = SJA1105PR_DEVICE_ID,
+ .part_no = SJA1105R_PART_NO,
+ .static_ops = sja1105r_table_ops,
+ .dyn_ops = sja1105pqrs_dyn_ops,
+ .reset_cmd = sja1105pqrs_reset_cmd,
+ .regs = &sja1105pqrs_regs,
+ .name = "SJA1105R",
+};
+struct sja1105_info sja1105s_info = {
+ .device_id = SJA1105QS_DEVICE_ID,
+ .part_no = SJA1105S_PART_NO,
+ .static_ops = sja1105s_table_ops,
+ .dyn_ops = sja1105pqrs_dyn_ops,
+ .regs = &sja1105pqrs_regs,
+ .reset_cmd = sja1105pqrs_reset_cmd,
+ .name = "SJA1105S",
+};
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
new file mode 100644
index 000000000000..b3c992b0abb0
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -0,0 +1,987 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/* Copyright (c) 2016-2018, NXP Semiconductors
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#include "sja1105_static_config.h"
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+
+/* Convenience wrappers over the generic packing functions. These take into
+ * account the SJA1105 memory layout quirks and provide some level of
+ * programmer protection against incorrect API use. The errors are not expected
+ * to occur durring runtime, therefore printing and swallowing them here is
+ * appropriate instead of clutterring up higher-level code.
+ */
+void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len)
+{
+ int rc = packing(buf, (u64 *)val, start, end, len,
+ PACK, QUIRK_LSW32_IS_FIRST);
+
+ if (likely(!rc))
+ return;
+
+ if (rc == -EINVAL) {
+ pr_err("Start bit (%d) expected to be larger than end (%d)\n",
+ start, end);
+ } else if (rc == -ERANGE) {
+ if ((start - end + 1) > 64)
+ pr_err("Field %d-%d too large for 64 bits!\n",
+ start, end);
+ else
+ pr_err("Cannot store %llx inside bits %d-%d (would truncate)\n",
+ *val, start, end);
+ }
+ dump_stack();
+}
+
+void sja1105_unpack(const void *buf, u64 *val, int start, int end, size_t len)
+{
+ int rc = packing((void *)buf, val, start, end, len,
+ UNPACK, QUIRK_LSW32_IS_FIRST);
+
+ if (likely(!rc))
+ return;
+
+ if (rc == -EINVAL)
+ pr_err("Start bit (%d) expected to be larger than end (%d)\n",
+ start, end);
+ else if (rc == -ERANGE)
+ pr_err("Field %d-%d too large for 64 bits!\n",
+ start, end);
+ dump_stack();
+}
+
+void sja1105_packing(void *buf, u64 *val, int start, int end,
+ size_t len, enum packing_op op)
+{
+ int rc = packing(buf, val, start, end, len, op, QUIRK_LSW32_IS_FIRST);
+
+ if (likely(!rc))
+ return;
+
+ if (rc == -EINVAL) {
+ pr_err("Start bit (%d) expected to be larger than end (%d)\n",
+ start, end);
+ } else if (rc == -ERANGE) {
+ if ((start - end + 1) > 64)
+ pr_err("Field %d-%d too large for 64 bits!\n",
+ start, end);
+ else
+ pr_err("Cannot store %llx inside bits %d-%d (would truncate)\n",
+ *val, start, end);
+ }
+ dump_stack();
+}
+
+/* Little-endian Ethernet CRC32 of data packed as big-endian u32 words */
+u32 sja1105_crc32(const void *buf, size_t len)
+{
+ unsigned int i;
+ u64 word;
+ u32 crc;
+
+ /* seed */
+ crc = ~0;
+ for (i = 0; i < len; i += 4) {
+ sja1105_unpack((void *)buf + i, &word, 31, 0, 4);
+ crc = crc32_le(crc, (u8 *)&word, 4);
+ }
+ return ~crc;
+}
+
+static size_t sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY;
+ struct sja1105_general_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->vllupformat, 319, 319, size, op);
+ sja1105_packing(buf, &entry->mirr_ptacu, 318, 318, size, op);
+ sja1105_packing(buf, &entry->switchid, 317, 315, size, op);
+ sja1105_packing(buf, &entry->hostprio, 314, 312, size, op);
+ sja1105_packing(buf, &entry->mac_fltres1, 311, 264, size, op);
+ sja1105_packing(buf, &entry->mac_fltres0, 263, 216, size, op);
+ sja1105_packing(buf, &entry->mac_flt1, 215, 168, size, op);
+ sja1105_packing(buf, &entry->mac_flt0, 167, 120, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt1, 119, 119, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt0, 118, 118, size, op);
+ sja1105_packing(buf, &entry->send_meta1, 117, 117, size, op);
+ sja1105_packing(buf, &entry->send_meta0, 116, 116, size, op);
+ sja1105_packing(buf, &entry->casc_port, 115, 113, size, op);
+ sja1105_packing(buf, &entry->host_port, 112, 110, size, op);
+ sja1105_packing(buf, &entry->mirr_port, 109, 107, size, op);
+ sja1105_packing(buf, &entry->vlmarker, 106, 75, size, op);
+ sja1105_packing(buf, &entry->vlmask, 74, 43, size, op);
+ sja1105_packing(buf, &entry->tpid, 42, 27, size, op);
+ sja1105_packing(buf, &entry->ignore2stf, 26, 26, size, op);
+ sja1105_packing(buf, &entry->tpid2, 25, 10, size, op);
+ return size;
+}
+
+static size_t
+sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY;
+ struct sja1105_general_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->vllupformat, 351, 351, size, op);
+ sja1105_packing(buf, &entry->mirr_ptacu, 350, 350, size, op);
+ sja1105_packing(buf, &entry->switchid, 349, 347, size, op);
+ sja1105_packing(buf, &entry->hostprio, 346, 344, size, op);
+ sja1105_packing(buf, &entry->mac_fltres1, 343, 296, size, op);
+ sja1105_packing(buf, &entry->mac_fltres0, 295, 248, size, op);
+ sja1105_packing(buf, &entry->mac_flt1, 247, 200, size, op);
+ sja1105_packing(buf, &entry->mac_flt0, 199, 152, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt1, 151, 151, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt0, 150, 150, size, op);
+ sja1105_packing(buf, &entry->send_meta1, 149, 149, size, op);
+ sja1105_packing(buf, &entry->send_meta0, 148, 148, size, op);
+ sja1105_packing(buf, &entry->casc_port, 147, 145, size, op);
+ sja1105_packing(buf, &entry->host_port, 144, 142, size, op);
+ sja1105_packing(buf, &entry->mirr_port, 141, 139, size, op);
+ sja1105_packing(buf, &entry->vlmarker, 138, 107, size, op);
+ sja1105_packing(buf, &entry->vlmask, 106, 75, size, op);
+ sja1105_packing(buf, &entry->tpid, 74, 59, size, op);
+ sja1105_packing(buf, &entry->ignore2stf, 58, 58, size, op);
+ sja1105_packing(buf, &entry->tpid2, 57, 42, size, op);
+ sja1105_packing(buf, &entry->queue_ts, 41, 41, size, op);
+ sja1105_packing(buf, &entry->egrmirrvid, 40, 29, size, op);
+ sja1105_packing(buf, &entry->egrmirrpcp, 28, 26, size, op);
+ sja1105_packing(buf, &entry->egrmirrdei, 25, 25, size, op);
+ sja1105_packing(buf, &entry->replay_port, 24, 22, size, op);
+ return size;
+}
+
+static size_t
+sja1105_l2_forwarding_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY;
+ struct sja1105_l2_forwarding_params_entry *entry = entry_ptr;
+ int offset, i;
+
+ sja1105_packing(buf, &entry->max_dynp, 95, 93, size, op);
+ for (i = 0, offset = 13; i < 8; i++, offset += 10)
+ sja1105_packing(buf, &entry->part_spc[i],
+ offset + 9, offset + 0, size, op);
+ return size;
+}
+
+size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_L2_FORWARDING_ENTRY;
+ struct sja1105_l2_forwarding_entry *entry = entry_ptr;
+ int offset, i;
+
+ sja1105_packing(buf, &entry->bc_domain, 63, 59, size, op);
+ sja1105_packing(buf, &entry->reach_port, 58, 54, size, op);
+ sja1105_packing(buf, &entry->fl_domain, 53, 49, size, op);
+ for (i = 0, offset = 25; i < 8; i++, offset += 3)
+ sja1105_packing(buf, &entry->vlan_pmap[i],
+ offset + 2, offset + 0, size, op);
+ return size;
+}
+
+static size_t
+sja1105et_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY;
+ struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->maxage, 31, 17, size, op);
+ sja1105_packing(buf, &entry->dyn_tbsz, 16, 14, size, op);
+ sja1105_packing(buf, &entry->poly, 13, 6, size, op);
+ sja1105_packing(buf, &entry->shared_learn, 5, 5, size, op);
+ sja1105_packing(buf, &entry->no_enf_hostprt, 4, 4, size, op);
+ sja1105_packing(buf, &entry->no_mgmt_learn, 3, 3, size, op);
+ return size;
+}
+
+static size_t
+sja1105pqrs_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY;
+ struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->maxage, 57, 43, size, op);
+ sja1105_packing(buf, &entry->shared_learn, 27, 27, size, op);
+ sja1105_packing(buf, &entry->no_enf_hostprt, 26, 26, size, op);
+ sja1105_packing(buf, &entry->no_mgmt_learn, 25, 25, size, op);
+ return size;
+}
+
+size_t sja1105et_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->vlanid, 95, 84, size, op);
+ sja1105_packing(buf, &entry->macaddr, 83, 36, size, op);
+ sja1105_packing(buf, &entry->destports, 35, 31, size, op);
+ sja1105_packing(buf, &entry->enfport, 30, 30, size, op);
+ sja1105_packing(buf, &entry->index, 29, 20, size, op);
+ return size;
+}
+
+size_t sja1105pqrs_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+
+ /* These are static L2 lookup entries, so the structure
+ * should match UM11040 Table 16/17 definitions when
+ * LOCKEDS is 1.
+ */
+ sja1105_packing(buf, &entry->vlanid, 81, 70, size, op);
+ sja1105_packing(buf, &entry->macaddr, 69, 22, size, op);
+ sja1105_packing(buf, &entry->destports, 21, 17, size, op);
+ sja1105_packing(buf, &entry->enfport, 16, 16, size, op);
+ sja1105_packing(buf, &entry->index, 15, 6, size, op);
+ return size;
+}
+
+static size_t sja1105_l2_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_L2_POLICING_ENTRY;
+ struct sja1105_l2_policing_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->sharindx, 63, 58, size, op);
+ sja1105_packing(buf, &entry->smax, 57, 42, size, op);
+ sja1105_packing(buf, &entry->rate, 41, 26, size, op);
+ sja1105_packing(buf, &entry->maxlen, 25, 15, size, op);
+ sja1105_packing(buf, &entry->partition, 14, 12, size, op);
+ return size;
+}
+
+static size_t sja1105et_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY;
+ struct sja1105_mac_config_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 72; i < 8; i++, offset += 19) {
+ sja1105_packing(buf, &entry->enabled[i],
+ offset + 0, offset + 0, size, op);
+ sja1105_packing(buf, &entry->base[i],
+ offset + 9, offset + 1, size, op);
+ sja1105_packing(buf, &entry->top[i],
+ offset + 18, offset + 10, size, op);
+ }
+ sja1105_packing(buf, &entry->ifg, 71, 67, size, op);
+ sja1105_packing(buf, &entry->speed, 66, 65, size, op);
+ sja1105_packing(buf, &entry->tp_delin, 64, 49, size, op);
+ sja1105_packing(buf, &entry->tp_delout, 48, 33, size, op);
+ sja1105_packing(buf, &entry->maxage, 32, 25, size, op);
+ sja1105_packing(buf, &entry->vlanprio, 24, 22, size, op);
+ sja1105_packing(buf, &entry->vlanid, 21, 10, size, op);
+ sja1105_packing(buf, &entry->ing_mirr, 9, 9, size, op);
+ sja1105_packing(buf, &entry->egr_mirr, 8, 8, size, op);
+ sja1105_packing(buf, &entry->drpnona664, 7, 7, size, op);
+ sja1105_packing(buf, &entry->drpdtag, 6, 6, size, op);
+ sja1105_packing(buf, &entry->drpuntag, 5, 5, size, op);
+ sja1105_packing(buf, &entry->retag, 4, 4, size, op);
+ sja1105_packing(buf, &entry->dyn_learn, 3, 3, size, op);
+ sja1105_packing(buf, &entry->egress, 2, 2, size, op);
+ sja1105_packing(buf, &entry->ingress, 1, 1, size, op);
+ return size;
+}
+
+size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY;
+ struct sja1105_mac_config_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 104; i < 8; i++, offset += 19) {
+ sja1105_packing(buf, &entry->enabled[i],
+ offset + 0, offset + 0, size, op);
+ sja1105_packing(buf, &entry->base[i],
+ offset + 9, offset + 1, size, op);
+ sja1105_packing(buf, &entry->top[i],
+ offset + 18, offset + 10, size, op);
+ }
+ sja1105_packing(buf, &entry->ifg, 103, 99, size, op);
+ sja1105_packing(buf, &entry->speed, 98, 97, size, op);
+ sja1105_packing(buf, &entry->tp_delin, 96, 81, size, op);
+ sja1105_packing(buf, &entry->tp_delout, 80, 65, size, op);
+ sja1105_packing(buf, &entry->maxage, 64, 57, size, op);
+ sja1105_packing(buf, &entry->vlanprio, 56, 54, size, op);
+ sja1105_packing(buf, &entry->vlanid, 53, 42, size, op);
+ sja1105_packing(buf, &entry->ing_mirr, 41, 41, size, op);
+ sja1105_packing(buf, &entry->egr_mirr, 40, 40, size, op);
+ sja1105_packing(buf, &entry->drpnona664, 39, 39, size, op);
+ sja1105_packing(buf, &entry->drpdtag, 38, 38, size, op);
+ sja1105_packing(buf, &entry->drpuntag, 35, 35, size, op);
+ sja1105_packing(buf, &entry->retag, 34, 34, size, op);
+ sja1105_packing(buf, &entry->dyn_learn, 33, 33, size, op);
+ sja1105_packing(buf, &entry->egress, 32, 32, size, op);
+ sja1105_packing(buf, &entry->ingress, 31, 31, size, op);
+ return size;
+}
+
+size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY;
+ struct sja1105_vlan_lookup_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->ving_mirr, 63, 59, size, op);
+ sja1105_packing(buf, &entry->vegr_mirr, 58, 54, size, op);
+ sja1105_packing(buf, &entry->vmemb_port, 53, 49, size, op);
+ sja1105_packing(buf, &entry->vlan_bc, 48, 44, size, op);
+ sja1105_packing(buf, &entry->tag_port, 43, 39, size, op);
+ sja1105_packing(buf, &entry->vlanid, 38, 27, size, op);
+ return size;
+}
+
+static size_t sja1105_xmii_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_XMII_PARAMS_ENTRY;
+ struct sja1105_xmii_params_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 17; i < 5; i++, offset += 3) {
+ sja1105_packing(buf, &entry->xmii_mode[i],
+ offset + 1, offset + 0, size, op);
+ sja1105_packing(buf, &entry->phy_mac[i],
+ offset + 2, offset + 2, size, op);
+ }
+ return size;
+}
+
+size_t sja1105_table_header_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105_SIZE_TABLE_HEADER;
+ struct sja1105_table_header *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->block_id, 31, 24, size, op);
+ sja1105_packing(buf, &entry->len, 55, 32, size, op);
+ sja1105_packing(buf, &entry->crc, 95, 64, size, op);
+ return size;
+}
+
+/* WARNING: the *hdr pointer is really non-const, because it is
+ * modifying the CRC of the header for a 2-stage packing operation
+ */
+void
+sja1105_table_header_pack_with_crc(void *buf, struct sja1105_table_header *hdr)
+{
+ /* First pack the table as-is, then calculate the CRC, and
+ * finally put the proper CRC into the packed buffer
+ */
+ memset(buf, 0, SJA1105_SIZE_TABLE_HEADER);
+ sja1105_table_header_packing(buf, hdr, PACK);
+ hdr->crc = sja1105_crc32(buf, SJA1105_SIZE_TABLE_HEADER - 4);
+ sja1105_pack(buf + SJA1105_SIZE_TABLE_HEADER - 4, &hdr->crc, 31, 0, 4);
+}
+
+static void sja1105_table_write_crc(u8 *table_start, u8 *crc_ptr)
+{
+ u64 computed_crc;
+ int len_bytes;
+
+ len_bytes = (uintptr_t)(crc_ptr - table_start);
+ computed_crc = sja1105_crc32(table_start, len_bytes);
+ sja1105_pack(crc_ptr, &computed_crc, 31, 0, 4);
+}
+
+/* The block IDs that the switches support are unfortunately sparse, so keep a
+ * mapping table to "block indices" and translate back and forth so that we
+ * don't waste useless memory in struct sja1105_static_config.
+ * Also, since the block id comes from essentially untrusted input (unpacking
+ * the static config from userspace) it has to be sanitized (range-checked)
+ * before blindly indexing kernel memory with the blk_idx.
+ */
+static u64 blk_id_map[BLK_IDX_MAX] = {
+ [BLK_IDX_L2_LOOKUP] = BLKID_L2_LOOKUP,
+ [BLK_IDX_L2_POLICING] = BLKID_L2_POLICING,
+ [BLK_IDX_VLAN_LOOKUP] = BLKID_VLAN_LOOKUP,
+ [BLK_IDX_L2_FORWARDING] = BLKID_L2_FORWARDING,
+ [BLK_IDX_MAC_CONFIG] = BLKID_MAC_CONFIG,
+ [BLK_IDX_L2_LOOKUP_PARAMS] = BLKID_L2_LOOKUP_PARAMS,
+ [BLK_IDX_L2_FORWARDING_PARAMS] = BLKID_L2_FORWARDING_PARAMS,
+ [BLK_IDX_GENERAL_PARAMS] = BLKID_GENERAL_PARAMS,
+ [BLK_IDX_XMII_PARAMS] = BLKID_XMII_PARAMS,
+};
+
+const char *sja1105_static_config_error_msg[] = {
+ [SJA1105_CONFIG_OK] = "",
+ [SJA1105_MISSING_L2_POLICING_TABLE] =
+ "l2-policing-table needs to have at least one entry",
+ [SJA1105_MISSING_L2_FORWARDING_TABLE] =
+ "l2-forwarding-table is either missing or incomplete",
+ [SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE] =
+ "l2-forwarding-parameters-table is missing",
+ [SJA1105_MISSING_GENERAL_PARAMS_TABLE] =
+ "general-parameters-table is missing",
+ [SJA1105_MISSING_VLAN_TABLE] =
+ "vlan-lookup-table needs to have at least the default untagged VLAN",
+ [SJA1105_MISSING_XMII_TABLE] =
+ "xmii-table is missing",
+ [SJA1105_MISSING_MAC_TABLE] =
+ "mac-configuration-table needs to contain an entry for each port",
+ [SJA1105_OVERCOMMITTED_FRAME_MEMORY] =
+ "Not allowed to overcommit frame memory. L2 memory partitions "
+ "and VL memory partitions share the same space. The sum of all "
+ "16 memory partitions is not allowed to be larger than 929 "
+ "128-byte blocks (or 910 with retagging). Please adjust "
+ "l2-forwarding-parameters-table.part_spc and/or "
+ "vl-forwarding-parameters-table.partspc.",
+};
+
+sja1105_config_valid_t
+static_config_check_memory_size(const struct sja1105_table *tables)
+{
+ const struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
+ int i, mem = 0;
+
+ l2_fwd_params = tables[BLK_IDX_L2_FORWARDING_PARAMS].entries;
+
+ for (i = 0; i < 8; i++)
+ mem += l2_fwd_params->part_spc[i];
+
+ if (mem > SJA1105_MAX_FRAME_MEMORY)
+ return SJA1105_OVERCOMMITTED_FRAME_MEMORY;
+
+ return SJA1105_CONFIG_OK;
+}
+
+sja1105_config_valid_t
+sja1105_static_config_check_valid(const struct sja1105_static_config *config)
+{
+ const struct sja1105_table *tables = config->tables;
+#define IS_FULL(blk_idx) \
+ (tables[blk_idx].entry_count == tables[blk_idx].ops->max_entry_count)
+
+ if (tables[BLK_IDX_L2_POLICING].entry_count == 0)
+ return SJA1105_MISSING_L2_POLICING_TABLE;
+
+ if (tables[BLK_IDX_VLAN_LOOKUP].entry_count == 0)
+ return SJA1105_MISSING_VLAN_TABLE;
+
+ if (!IS_FULL(BLK_IDX_L2_FORWARDING))
+ return SJA1105_MISSING_L2_FORWARDING_TABLE;
+
+ if (!IS_FULL(BLK_IDX_MAC_CONFIG))
+ return SJA1105_MISSING_MAC_TABLE;
+
+ if (!IS_FULL(BLK_IDX_L2_FORWARDING_PARAMS))
+ return SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE;
+
+ if (!IS_FULL(BLK_IDX_GENERAL_PARAMS))
+ return SJA1105_MISSING_GENERAL_PARAMS_TABLE;
+
+ if (!IS_FULL(BLK_IDX_XMII_PARAMS))
+ return SJA1105_MISSING_XMII_TABLE;
+
+ return static_config_check_memory_size(tables);
+#undef IS_FULL
+}
+
+void
+sja1105_static_config_pack(void *buf, struct sja1105_static_config *config)
+{
+ struct sja1105_table_header header = {0};
+ enum sja1105_blk_idx i;
+ char *p = buf;
+ int j;
+
+ sja1105_pack(p, &config->device_id, 31, 0, 4);
+ p += SJA1105_SIZE_DEVICE_ID;
+
+ for (i = 0; i < BLK_IDX_MAX; i++) {
+ const struct sja1105_table *table;
+ char *table_start;
+
+ table = &config->tables[i];
+ if (!table->entry_count)
+ continue;
+
+ header.block_id = blk_id_map[i];
+ header.len = table->entry_count *
+ table->ops->packed_entry_size / 4;
+ sja1105_table_header_pack_with_crc(p, &header);
+ p += SJA1105_SIZE_TABLE_HEADER;
+ table_start = p;
+ for (j = 0; j < table->entry_count; j++) {
+ u8 *entry_ptr = table->entries;
+
+ entry_ptr += j * table->ops->unpacked_entry_size;
+ memset(p, 0, table->ops->packed_entry_size);
+ table->ops->packing(p, entry_ptr, PACK);
+ p += table->ops->packed_entry_size;
+ }
+ sja1105_table_write_crc(table_start, p);
+ p += 4;
+ }
+ /* Final header:
+ * Block ID does not matter
+ * Length of 0 marks that header is final
+ * CRC will be replaced on-the-fly on "config upload"
+ */
+ header.block_id = 0;
+ header.len = 0;
+ header.crc = 0xDEADBEEF;
+ memset(p, 0, SJA1105_SIZE_TABLE_HEADER);
+ sja1105_table_header_packing(p, &header, PACK);
+}
+
+size_t
+sja1105_static_config_get_length(const struct sja1105_static_config *config)
+{
+ unsigned int sum;
+ unsigned int header_count;
+ enum sja1105_blk_idx i;
+
+ /* Ending header */
+ header_count = 1;
+ sum = SJA1105_SIZE_DEVICE_ID;
+
+ /* Tables (headers and entries) */
+ for (i = 0; i < BLK_IDX_MAX; i++) {
+ const struct sja1105_table *table;
+
+ table = &config->tables[i];
+ if (table->entry_count)
+ header_count++;
+
+ sum += table->ops->packed_entry_size * table->entry_count;
+ }
+ /* Headers have an additional CRC at the end */
+ sum += header_count * (SJA1105_SIZE_TABLE_HEADER + 4);
+ /* Last header does not have an extra CRC because there is no data */
+ sum -= 4;
+
+ return sum;
+}
+
+/* Compatibility matrices */
+
+/* SJA1105E: First generation, no TTEthernet */
+struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105et_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105et_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105et_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105et_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105T: First generation, TTEthernet */
+struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105et_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105et_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105ET_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105et_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105et_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105P: Second generation, no TTEthernet, no SGMII */
+struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105pqrs_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105pqrs_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105pqrs_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105Q: Second generation, TTEthernet, no SGMII */
+struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105pqrs_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105pqrs_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105pqrs_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105R: Second generation, no TTEthernet, SGMII */
+struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105pqrs_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105pqrs_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105pqrs_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+/* SJA1105S: Second generation, TTEthernet, SGMII */
+struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1105pqrs_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1105_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1105_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1105_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1105pqrs_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1105_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1105pqrs_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1105_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1105pqrs_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1105_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1105_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+};
+
+int sja1105_static_config_init(struct sja1105_static_config *config,
+ const struct sja1105_table_ops *static_ops,
+ u64 device_id)
+{
+ enum sja1105_blk_idx i;
+
+ *config = (struct sja1105_static_config) {0};
+
+ /* Transfer static_ops array from priv into per-table ops
+ * for handier access
+ */
+ for (i = 0; i < BLK_IDX_MAX; i++)
+ config->tables[i].ops = &static_ops[i];
+
+ config->device_id = device_id;
+ return 0;
+}
+
+void sja1105_static_config_free(struct sja1105_static_config *config)
+{
+ enum sja1105_blk_idx i;
+
+ for (i = 0; i < BLK_IDX_MAX; i++) {
+ if (config->tables[i].entry_count) {
+ kfree(config->tables[i].entries);
+ config->tables[i].entry_count = 0;
+ }
+ }
+}
+
+int sja1105_table_delete_entry(struct sja1105_table *table, int i)
+{
+ size_t entry_size = table->ops->unpacked_entry_size;
+ u8 *entries = table->entries;
+
+ if (i > table->entry_count)
+ return -ERANGE;
+
+ memmove(entries + i * entry_size, entries + (i + 1) * entry_size,
+ (table->entry_count - i) * entry_size);
+
+ table->entry_count--;
+
+ return 0;
+}
+
+/* No pointers to table->entries should be kept when this is called. */
+int sja1105_table_resize(struct sja1105_table *table, size_t new_count)
+{
+ size_t entry_size = table->ops->unpacked_entry_size;
+ void *new_entries, *old_entries = table->entries;
+
+ if (new_count > table->ops->max_entry_count)
+ return -ERANGE;
+
+ new_entries = kcalloc(new_count, entry_size, GFP_KERNEL);
+ if (!new_entries)
+ return -ENOMEM;
+
+ memcpy(new_entries, old_entries, min(new_count, table->entry_count) *
+ entry_size);
+
+ table->entries = new_entries;
+ table->entry_count = new_count;
+ kfree(old_entries);
+ return 0;
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.h b/drivers/net/dsa/sja1105/sja1105_static_config.h
new file mode 100644
index 000000000000..069ca8fd059c
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016-2018, NXP Semiconductors
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _SJA1105_STATIC_CONFIG_H
+#define _SJA1105_STATIC_CONFIG_H
+
+#include <linux/packing.h>
+#include <linux/types.h>
+#include <asm/types.h>
+
+#define SJA1105_SIZE_DEVICE_ID 4
+#define SJA1105_SIZE_TABLE_HEADER 12
+#define SJA1105_SIZE_L2_POLICING_ENTRY 8
+#define SJA1105_SIZE_VLAN_LOOKUP_ENTRY 8
+#define SJA1105_SIZE_L2_FORWARDING_ENTRY 8
+#define SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY 12
+#define SJA1105_SIZE_XMII_PARAMS_ENTRY 4
+#define SJA1105ET_SIZE_L2_LOOKUP_ENTRY 12
+#define SJA1105ET_SIZE_MAC_CONFIG_ENTRY 28
+#define SJA1105ET_SIZE_L2_LOOKUP_PARAMS_ENTRY 4
+#define SJA1105ET_SIZE_GENERAL_PARAMS_ENTRY 40
+#define SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY 20
+#define SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY 32
+#define SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY 16
+#define SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY 44
+
+/* UM10944.pdf Page 11, Table 2. Configuration Blocks */
+enum {
+ BLKID_L2_LOOKUP = 0x05,
+ BLKID_L2_POLICING = 0x06,
+ BLKID_VLAN_LOOKUP = 0x07,
+ BLKID_L2_FORWARDING = 0x08,
+ BLKID_MAC_CONFIG = 0x09,
+ BLKID_L2_LOOKUP_PARAMS = 0x0D,
+ BLKID_L2_FORWARDING_PARAMS = 0x0E,
+ BLKID_GENERAL_PARAMS = 0x11,
+ BLKID_XMII_PARAMS = 0x4E,
+};
+
+enum sja1105_blk_idx {
+ BLK_IDX_L2_LOOKUP = 0,
+ BLK_IDX_L2_POLICING,
+ BLK_IDX_VLAN_LOOKUP,
+ BLK_IDX_L2_FORWARDING,
+ BLK_IDX_MAC_CONFIG,
+ BLK_IDX_L2_LOOKUP_PARAMS,
+ BLK_IDX_L2_FORWARDING_PARAMS,
+ BLK_IDX_GENERAL_PARAMS,
+ BLK_IDX_XMII_PARAMS,
+ BLK_IDX_MAX,
+ /* Fake block indices that are only valid for dynamic access */
+ BLK_IDX_MGMT_ROUTE,
+ BLK_IDX_MAX_DYN,
+ BLK_IDX_INVAL = -1,
+};
+
+#define SJA1105_MAX_L2_LOOKUP_COUNT 1024
+#define SJA1105_MAX_L2_POLICING_COUNT 45
+#define SJA1105_MAX_VLAN_LOOKUP_COUNT 4096
+#define SJA1105_MAX_L2_FORWARDING_COUNT 13
+#define SJA1105_MAX_MAC_CONFIG_COUNT 5
+#define SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT 1
+#define SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT 1
+#define SJA1105_MAX_GENERAL_PARAMS_COUNT 1
+#define SJA1105_MAX_XMII_PARAMS_COUNT 1
+
+#define SJA1105_MAX_FRAME_MEMORY 929
+
+#define SJA1105E_DEVICE_ID 0x9C00000Cull
+#define SJA1105T_DEVICE_ID 0x9E00030Eull
+#define SJA1105PR_DEVICE_ID 0xAF00030Eull
+#define SJA1105QS_DEVICE_ID 0xAE00030Eull
+
+#define SJA1105ET_PART_NO 0x9A83
+#define SJA1105P_PART_NO 0x9A84
+#define SJA1105Q_PART_NO 0x9A85
+#define SJA1105R_PART_NO 0x9A86
+#define SJA1105S_PART_NO 0x9A87
+
+struct sja1105_general_params_entry {
+ u64 vllupformat;
+ u64 mirr_ptacu;
+ u64 switchid;
+ u64 hostprio;
+ u64 mac_fltres1;
+ u64 mac_fltres0;
+ u64 mac_flt1;
+ u64 mac_flt0;
+ u64 incl_srcpt1;
+ u64 incl_srcpt0;
+ u64 send_meta1;
+ u64 send_meta0;
+ u64 casc_port;
+ u64 host_port;
+ u64 mirr_port;
+ u64 vlmarker;
+ u64 vlmask;
+ u64 tpid;
+ u64 ignore2stf;
+ u64 tpid2;
+ /* P/Q/R/S only */
+ u64 queue_ts;
+ u64 egrmirrvid;
+ u64 egrmirrpcp;
+ u64 egrmirrdei;
+ u64 replay_port;
+};
+
+struct sja1105_vlan_lookup_entry {
+ u64 ving_mirr;
+ u64 vegr_mirr;
+ u64 vmemb_port;
+ u64 vlan_bc;
+ u64 tag_port;
+ u64 vlanid;
+};
+
+struct sja1105_l2_lookup_entry {
+ u64 vlanid;
+ u64 macaddr;
+ u64 destports;
+ u64 enfport;
+ u64 index;
+};
+
+struct sja1105_l2_lookup_params_entry {
+ u64 maxage; /* Shared */
+ u64 dyn_tbsz; /* E/T only */
+ u64 poly; /* E/T only */
+ u64 shared_learn; /* Shared */
+ u64 no_enf_hostprt; /* Shared */
+ u64 no_mgmt_learn; /* Shared */
+};
+
+struct sja1105_l2_forwarding_entry {
+ u64 bc_domain;
+ u64 reach_port;
+ u64 fl_domain;
+ u64 vlan_pmap[8];
+};
+
+struct sja1105_l2_forwarding_params_entry {
+ u64 max_dynp;
+ u64 part_spc[8];
+};
+
+struct sja1105_l2_policing_entry {
+ u64 sharindx;
+ u64 smax;
+ u64 rate;
+ u64 maxlen;
+ u64 partition;
+};
+
+struct sja1105_mac_config_entry {
+ u64 top[8];
+ u64 base[8];
+ u64 enabled[8];
+ u64 ifg;
+ u64 speed;
+ u64 tp_delin;
+ u64 tp_delout;
+ u64 maxage;
+ u64 vlanprio;
+ u64 vlanid;
+ u64 ing_mirr;
+ u64 egr_mirr;
+ u64 drpnona664;
+ u64 drpdtag;
+ u64 drpuntag;
+ u64 retag;
+ u64 dyn_learn;
+ u64 egress;
+ u64 ingress;
+};
+
+struct sja1105_xmii_params_entry {
+ u64 phy_mac[5];
+ u64 xmii_mode[5];
+};
+
+struct sja1105_table_header {
+ u64 block_id;
+ u64 len;
+ u64 crc;
+};
+
+struct sja1105_table_ops {
+ size_t (*packing)(void *buf, void *entry_ptr, enum packing_op op);
+ size_t unpacked_entry_size;
+ size_t packed_entry_size;
+ size_t max_entry_count;
+};
+
+struct sja1105_table {
+ const struct sja1105_table_ops *ops;
+ size_t entry_count;
+ void *entries;
+};
+
+struct sja1105_static_config {
+ u64 device_id;
+ struct sja1105_table tables[BLK_IDX_MAX];
+};
+
+extern struct sja1105_table_ops sja1105e_table_ops[BLK_IDX_MAX];
+extern struct sja1105_table_ops sja1105t_table_ops[BLK_IDX_MAX];
+extern struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX];
+extern struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX];
+extern struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX];
+extern struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX];
+
+size_t sja1105_table_header_packing(void *buf, void *hdr, enum packing_op op);
+void
+sja1105_table_header_pack_with_crc(void *buf, struct sja1105_table_header *hdr);
+size_t
+sja1105_static_config_get_length(const struct sja1105_static_config *config);
+
+typedef enum {
+ SJA1105_CONFIG_OK = 0,
+ SJA1105_MISSING_L2_POLICING_TABLE,
+ SJA1105_MISSING_L2_FORWARDING_TABLE,
+ SJA1105_MISSING_L2_FORWARDING_PARAMS_TABLE,
+ SJA1105_MISSING_GENERAL_PARAMS_TABLE,
+ SJA1105_MISSING_VLAN_TABLE,
+ SJA1105_MISSING_XMII_TABLE,
+ SJA1105_MISSING_MAC_TABLE,
+ SJA1105_OVERCOMMITTED_FRAME_MEMORY,
+} sja1105_config_valid_t;
+
+extern const char *sja1105_static_config_error_msg[];
+
+sja1105_config_valid_t
+sja1105_static_config_check_valid(const struct sja1105_static_config *config);
+void
+sja1105_static_config_pack(void *buf, struct sja1105_static_config *config);
+int sja1105_static_config_init(struct sja1105_static_config *config,
+ const struct sja1105_table_ops *static_ops,
+ u64 device_id);
+void sja1105_static_config_free(struct sja1105_static_config *config);
+
+int sja1105_table_delete_entry(struct sja1105_table *table, int i);
+int sja1105_table_resize(struct sja1105_table *table, size_t new_count);
+
+u32 sja1105_crc32(const void *buf, size_t len);
+
+void sja1105_pack(void *buf, const u64 *val, int start, int end, size_t len);
+void sja1105_unpack(const void *buf, u64 *val, int start, int end, size_t len);
+void sja1105_packing(void *buf, u64 *val, int start, int end,
+ size_t len, enum packing_op op);
+
+#endif
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 0d15a12a4560..3568129fb7da 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -32,6 +32,7 @@
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
#include <linux/rtnetlink.h>
@@ -131,21 +132,9 @@ static void dummy_get_drvinfo(struct net_device *dev,
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
-static int dummy_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *ts_info)
-{
- ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
-
- ts_info->phc_index = -1;
-
- return 0;
-};
-
static const struct ethtool_ops dummy_ethtool_ops = {
.get_drvinfo = dummy_get_drvinfo,
- .get_ts_info = dummy_get_ts_info,
+ .get_ts_info = ethtool_op_get_ts_info,
};
static void dummy_setup(struct net_device *dev)
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 3155f7fa83eb..90080a886cd9 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1458,7 +1458,7 @@ static int greth_of_probe(struct platform_device *ofdev)
const u8 *addr;
addr = of_get_mac_address(ofdev->dev.of_node);
- if (addr) {
+ if (!IS_ERR(addr)) {
for (i = 0; i < 6; i++)
macaddr[i] = (unsigned int) addr[i];
} else {
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index e1acafa82214..37ebd890ef51 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -870,7 +870,7 @@ static int emac_probe(struct platform_device *pdev)
/* Read MAC-address from DT */
mac_addr = of_get_mac_address(np);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
/* Check if the MAC address is valid, if not get a random one */
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index aa1d1f5339d2..877e67f4344b 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -1537,7 +1537,7 @@ static int altera_tse_probe(struct platform_device *pdev)
/* get default MAC address from device tree */
macaddr = of_get_mac_address(pdev->dev.of_node);
- if (macaddr)
+ if (!IS_ERR(macaddr))
ether_addr_copy(ndev->dev_addr, macaddr);
else
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 05798aa5bb73..7f8266b191ae 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -731,7 +731,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
if (rc)
pr_err("Cannot set LLQ configuration: %d\n", rc);
- return 0;
+ return rc;
}
static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
@@ -2194,7 +2194,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
if (unlikely(ret))
return ret;
- if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
+ if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
pr_err("Func hash %d isn't supported by device, abort\n",
rss->hash_func);
return -EOPNOTSUPP;
@@ -2279,6 +2279,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
return -EINVAL;
}
+ rss->hash_func = func;
rc = ena_com_set_hash_function(ena_dev);
/* Restore the old function */
@@ -2801,7 +2802,11 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
/* if moderation is supported by device we set adaptive moderation */
delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
- ena_com_enable_adaptive_moderation(ena_dev);
+
+ /* Disable adaptive moderation by default - can be enabled from
+ * ethtool
+ */
+ ena_com_disable_adaptive_moderation(ena_dev);
return 0;
err:
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index f3a5a384e6e8..fe596bc30a96 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -697,8 +697,8 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
if (indir) {
for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
rc = ena_com_indirect_table_fill_entry(ena_dev,
- ENA_IO_RXQ_IDX(indir[i]),
- i);
+ i,
+ ENA_IO_RXQ_IDX(indir[i]));
if (unlikely(rc)) {
netif_err(adapter, drv, netdev,
"Cannot fill indirect table (index is too large)\n");
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index a6eacf2099c3..9c83642922c7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -224,28 +224,23 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
if (!tx_ring->tx_buffer_info) {
tx_ring->tx_buffer_info = vzalloc(size);
if (!tx_ring->tx_buffer_info)
- return -ENOMEM;
+ goto err_tx_buffer_info;
}
size = sizeof(u16) * tx_ring->ring_size;
tx_ring->free_tx_ids = vzalloc_node(size, node);
if (!tx_ring->free_tx_ids) {
tx_ring->free_tx_ids = vzalloc(size);
- if (!tx_ring->free_tx_ids) {
- vfree(tx_ring->tx_buffer_info);
- return -ENOMEM;
- }
+ if (!tx_ring->free_tx_ids)
+ goto err_free_tx_ids;
}
size = tx_ring->tx_max_header_size;
tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node);
if (!tx_ring->push_buf_intermediate_buf) {
tx_ring->push_buf_intermediate_buf = vzalloc(size);
- if (!tx_ring->push_buf_intermediate_buf) {
- vfree(tx_ring->tx_buffer_info);
- vfree(tx_ring->free_tx_ids);
- return -ENOMEM;
- }
+ if (!tx_ring->push_buf_intermediate_buf)
+ goto err_push_buf_intermediate_buf;
}
/* Req id ring for TX out of order completions */
@@ -259,6 +254,15 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
tx_ring->next_to_clean = 0;
tx_ring->cpu = ena_irq->cpu;
return 0;
+
+err_push_buf_intermediate_buf:
+ vfree(tx_ring->free_tx_ids);
+ tx_ring->free_tx_ids = NULL;
+err_free_tx_ids:
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+err_tx_buffer_info:
+ return -ENOMEM;
}
/* ena_free_tx_resources - Free I/O Tx Resources per Queue
@@ -378,6 +382,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
rx_ring->free_rx_ids = vzalloc(size);
if (!rx_ring->free_rx_ids) {
vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
return -ENOMEM;
}
}
@@ -1820,6 +1825,7 @@ err_setup_rx:
err_setup_tx:
ena_free_io_irq(adapter);
err_req_irq:
+ ena_del_napi(adapter);
return rc;
}
@@ -2236,7 +2242,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- if (netif_xmit_stopped(txq) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txq) || !netdev_xmit_more()) {
/* trigger the dma engine. ena_com_write_sq_doorbell()
* has a mb
*/
@@ -2258,8 +2264,7 @@ error_drop_packet:
}
static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
u16 qid;
/* we suspect that this is good for in--kernel network services that
@@ -2269,7 +2274,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
if (skb_rx_queue_recorded(skb))
qid = skb_get_rx_queue(skb);
else
- qid = fallback(dev, skb, NULL);
+ qid = netdev_pick_tx(dev, skb, NULL);
return qid;
}
@@ -2292,7 +2297,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev,
host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
host_info->os_type = ENA_ADMIN_OS_LINUX;
host_info->kernel_ver = LINUX_VERSION_CODE;
- strncpy(host_info->kernel_ver_str, utsname()->version,
+ strlcpy(host_info->kernel_ver_str, utsname()->version,
sizeof(host_info->kernel_ver_str) - 1);
host_info->os_dist = 0;
strncpy(host_info->os_dist_str, utsname()->release,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 4666084eda16..d5fd49dd25f3 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1887,7 +1887,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
smp_wmb();
ring->cur = cur_index + 1;
- if (!packet->skb->xmit_more ||
+ if (!netdev_xmit_more() ||
netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
channel->queue_index)))
xgbe_tx_start_xmit(channel, ring);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 0cc911f928b1..3dd0cecddba8 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1612,7 +1612,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
/* PTP v2, UDP, any kind of event packet */
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- /* PTP v1, UDP, any kind of event packet */
+ /* Fall through - to PTP v1, UDP, any kind of event packet */
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
@@ -1623,7 +1623,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
/* PTP v2, UDP, Sync packet */
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- /* PTP v1, UDP, Sync packet */
+ /* Fall through - to PTP v1, UDP, Sync packet */
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
@@ -1634,7 +1634,7 @@ static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
/* PTP v2, UDP, Delay_req packet */
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- /* PTP v1, UDP, Delay_req packet */
+ /* Fall through - to PTP v1, UDP, Delay_req packet */
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig
index 7d623e90dc19..12472c5bb34d 100644
--- a/drivers/net/ethernet/aquantia/Kconfig
+++ b/drivers/net/ethernet/aquantia/Kconfig
@@ -17,7 +17,8 @@ if NET_VENDOR_AQUANTIA
config AQTION
tristate "aQuantia AQtion(tm) Support"
- depends on PCI && X86_64
+ depends on PCI
+ depends on X86_64 || ARM64 || COMPILE_TEST
---help---
This enables the support for the aQuantia AQtion(tm) Ethernet card.
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index 4556630ee286..1f99cf832476 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -36,6 +36,7 @@ atlantic-objs := aq_main.o \
aq_ring.o \
aq_hw_utils.o \
aq_ethtool.o \
+ aq_drvinfo.o \
aq_filters.o \
hw_atl/hw_atl_a0.o \
hw_atl/hw_atl_b0.o \
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 3944ce7f0870..8f35c3f883f0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -16,7 +16,7 @@
#define AQ_CFG_TCS_DEF 1U
#define AQ_CFG_TXDS_DEF 4096U
-#define AQ_CFG_RXDS_DEF 1024U
+#define AQ_CFG_RXDS_DEF 2048U
#define AQ_CFG_IS_POLLING_DEF 0U
@@ -34,10 +34,16 @@
#define AQ_CFG_TCS_MAX 8U
#define AQ_CFG_TX_FRAME_MAX (16U * 1024U)
-#define AQ_CFG_RX_FRAME_MAX (4U * 1024U)
+#define AQ_CFG_RX_FRAME_MAX (2U * 1024U)
#define AQ_CFG_TX_CLEAN_BUDGET 256U
+#define AQ_CFG_RX_REFILL_THRES 32U
+
+#define AQ_CFG_RX_HDR_SIZE 256U
+
+#define AQ_CFG_RX_PAGEORDER 0U
+
/* LRO */
#define AQ_CFG_IS_LRO_DEF 1U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index 6b6d1724676e..235bb3a72d66 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -41,9 +41,6 @@
#define AQ_DEVICE_ID_AQC111S 0x91B1
#define AQ_DEVICE_ID_AQC112S 0x92B1
-#define AQ_DEVICE_ID_AQC111E 0x51B1
-#define AQ_DEVICE_ID_AQC112E 0x52B1
-
#define HW_ATL_NIC_NAME "aQuantia AQtion 10Gbit Network Adapter"
#define AQ_HWREV_ANY 0
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
new file mode 100644
index 000000000000..f5a92b2a5cd6
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (C) 2014-2019 aQuantia Corporation. */
+
+/* File aq_drvinfo.c: Definition of common code for firmware info in sys.*/
+
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/module.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/hwmon.h>
+#include <linux/uaccess.h>
+
+#include "aq_drvinfo.h"
+
+static int aq_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *value)
+{
+ struct aq_nic_s *aq_nic = dev_get_drvdata(dev);
+ int temp;
+ int err;
+
+ if (!aq_nic)
+ return -EIO;
+
+ if (type != hwmon_temp)
+ return -EOPNOTSUPP;
+
+ if (!aq_nic->aq_fw_ops->get_phy_temp)
+ return -EOPNOTSUPP;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ err = aq_nic->aq_fw_ops->get_phy_temp(aq_nic->aq_hw, &temp);
+ *value = temp;
+ return err;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int aq_hwmon_read_string(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct aq_nic_s *aq_nic = dev_get_drvdata(dev);
+
+ if (!aq_nic)
+ return -EIO;
+
+ if (type != hwmon_temp)
+ return -EOPNOTSUPP;
+
+ if (!aq_nic->aq_fw_ops->get_phy_temp)
+ return -EOPNOTSUPP;
+
+ switch (attr) {
+ case hwmon_temp_label:
+ *str = "PHY Temperature";
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t aq_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_label:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static const struct hwmon_ops aq_hwmon_ops = {
+ .is_visible = aq_hwmon_is_visible,
+ .read = aq_hwmon_read,
+ .read_string = aq_hwmon_read_string,
+};
+
+static u32 aq_hwmon_temp_config[] = {
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ 0,
+};
+
+static const struct hwmon_channel_info aq_hwmon_temp = {
+ .type = hwmon_temp,
+ .config = aq_hwmon_temp_config,
+};
+
+static const struct hwmon_channel_info *aq_hwmon_info[] = {
+ &aq_hwmon_temp,
+ NULL,
+};
+
+static const struct hwmon_chip_info aq_hwmon_chip_info = {
+ .ops = &aq_hwmon_ops,
+ .info = aq_hwmon_info,
+};
+
+int aq_drvinfo_init(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct device *dev = &aq_nic->pdev->dev;
+ struct device *hwmon_dev;
+ int err = 0;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev,
+ ndev->name,
+ aq_nic,
+ &aq_hwmon_chip_info,
+ NULL);
+
+ if (IS_ERR(hwmon_dev))
+ err = PTR_ERR(hwmon_dev);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h
new file mode 100644
index 000000000000..41fbb1358068
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_drvinfo.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File aq_drvinfo.h: Declaration of common code for firmware info in sys.*/
+
+#ifndef AQ_DRVINFO_H
+#define AQ_DRVINFO_H
+
+#include "aq_nic.h"
+#include "aq_hw.h"
+#include "hw_atl/hw_atl_utils.h"
+
+int aq_drvinfo_init(struct net_device *ndev);
+
+#endif /* AQ_DRVINFO_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index a718d7a1f76c..79da48094770 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -405,8 +405,10 @@ static int aq_ethtool_get_eee(struct net_device *ndev, struct ethtool_eee *eee)
if (!aq_nic->aq_fw_ops->get_eee_rate)
return -EOPNOTSUPP;
+ mutex_lock(&aq_nic->fwreq_mutex);
err = aq_nic->aq_fw_ops->get_eee_rate(aq_nic->aq_hw, &rate,
&supported_rates);
+ mutex_unlock(&aq_nic->fwreq_mutex);
if (err < 0)
return err;
@@ -439,8 +441,10 @@ static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_eee *eee)
!aq_nic->aq_fw_ops->set_eee_rate))
return -EOPNOTSUPP;
+ mutex_lock(&aq_nic->fwreq_mutex);
err = aq_nic->aq_fw_ops->get_eee_rate(aq_nic->aq_hw, &rate,
&supported_rates);
+ mutex_unlock(&aq_nic->fwreq_mutex);
if (err < 0)
return err;
@@ -452,20 +456,28 @@ static int aq_ethtool_set_eee(struct net_device *ndev, struct ethtool_eee *eee)
cfg->eee_speeds = 0;
}
- return aq_nic->aq_fw_ops->set_eee_rate(aq_nic->aq_hw, rate);
+ mutex_lock(&aq_nic->fwreq_mutex);
+ err = aq_nic->aq_fw_ops->set_eee_rate(aq_nic->aq_hw, rate);
+ mutex_unlock(&aq_nic->fwreq_mutex);
+
+ return err;
}
static int aq_ethtool_nway_reset(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
if (unlikely(!aq_nic->aq_fw_ops->renegotiate))
return -EOPNOTSUPP;
- if (netif_running(ndev))
- return aq_nic->aq_fw_ops->renegotiate(aq_nic->aq_hw);
+ if (netif_running(ndev)) {
+ mutex_lock(&aq_nic->fwreq_mutex);
+ err = aq_nic->aq_fw_ops->renegotiate(aq_nic->aq_hw);
+ mutex_unlock(&aq_nic->fwreq_mutex);
+ }
- return 0;
+ return err;
}
static void aq_ethtool_get_pauseparam(struct net_device *ndev,
@@ -503,7 +515,9 @@ static int aq_ethtool_set_pauseparam(struct net_device *ndev,
else
aq_nic->aq_hw->aq_nic_cfg->flow_control &= ~AQ_NIC_FC_TX;
+ mutex_lock(&aq_nic->fwreq_mutex);
err = aq_nic->aq_fw_ops->set_flow_control(aq_nic->aq_hw);
+ mutex_unlock(&aq_nic->fwreq_mutex);
return err;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 81aab73dc22f..95fd6c852a9d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -88,6 +88,8 @@ struct aq_stats_s {
#define AQ_HW_IRQ_MSI 2U
#define AQ_HW_IRQ_MSIX 3U
+#define AQ_HW_SERVICE_IRQS 1U
+
#define AQ_HW_POWER_STATE_D0 0U
#define AQ_HW_POWER_STATE_D3 3U
@@ -259,6 +261,8 @@ struct aq_fw_ops {
int (*update_stats)(struct aq_hw_s *self);
+ int (*get_phy_temp)(struct aq_hw_s *self, int *temp);
+
u32 (*get_flow_control)(struct aq_hw_s *self, u32 *fcmode);
int (*set_flow_control)(struct aq_hw_s *self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
index d526c4f19d34..22a1c784dc9c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
@@ -53,6 +53,18 @@ void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value)
writel(value, hw->mmio + reg);
}
+/* Most of 64-bit registers are in LSW, MSW form.
+ Counters are normally implemented by HW as latched pairs:
+ reading LSW first locks MSW, to overcome LSW overflow
+ */
+u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg)
+{
+ u64 value = aq_hw_read_reg(hw, reg);
+
+ value |= (u64)aq_hw_read_reg(hw, reg + 4) << 32;
+ return value;
+}
+
int aq_hw_err_from_flags(struct aq_hw_s *hw)
{
int err = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
index bc711238ca0c..bf73428ed689 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
@@ -35,6 +35,7 @@ void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift);
u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg);
void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value);
+u64 aq_hw_read_reg64(struct aq_hw_s *hw, u32 reg);
int aq_hw_err_from_flags(struct aq_hw_s *hw);
#endif /* AQ_HW_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 2a11c1eefd8f..1ea8b77fc1a7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -23,8 +23,17 @@ MODULE_VERSION(AQ_CFG_DRV_VERSION);
MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
+static const char aq_ndev_driver_name[] = AQ_CFG_DRV_NAME;
+
static const struct net_device_ops aq_ndev_ops;
+static struct workqueue_struct *aq_ndev_wq;
+
+void aq_ndev_schedule_work(struct work_struct *work)
+{
+ queue_work(aq_ndev_wq, work);
+}
+
struct net_device *aq_ndev_alloc(void)
{
struct net_device *ndev = NULL;
@@ -209,3 +218,35 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
};
+
+static int __init aq_ndev_init_module(void)
+{
+ int ret;
+
+ aq_ndev_wq = create_singlethread_workqueue(aq_ndev_driver_name);
+ if (!aq_ndev_wq) {
+ pr_err("Failed to create workqueue\n");
+ return -ENOMEM;
+ }
+
+ ret = aq_pci_func_register_driver();
+ if (ret) {
+ destroy_workqueue(aq_ndev_wq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit aq_ndev_exit_module(void)
+{
+ aq_pci_func_unregister_driver();
+
+ if (aq_ndev_wq) {
+ destroy_workqueue(aq_ndev_wq);
+ aq_ndev_wq = NULL;
+ }
+}
+
+module_init(aq_ndev_init_module);
+module_exit(aq_ndev_exit_module);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
index ce92152eb43e..5448b82fb7ea 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
@@ -13,7 +13,9 @@
#define AQ_MAIN_H
#include "aq_common.h"
+#include "aq_nic.h"
+void aq_ndev_schedule_work(struct work_struct *work);
struct net_device *aq_ndev_alloc(void);
#endif /* AQ_MAIN_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index ff83667410bd..e82d25a91bc1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -14,6 +14,7 @@
#include "aq_vec.h"
#include "aq_hw.h"
#include "aq_pci_func.h"
+#include "aq_main.h"
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
@@ -73,6 +74,7 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->tx_itr = aq_itr_tx;
cfg->rx_itr = aq_itr_rx;
+ cfg->rxpageorder = AQ_CFG_RX_PAGEORDER;
cfg->is_rss = AQ_CFG_IS_RSS_DEF;
cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
@@ -91,7 +93,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
/*rss rings */
cfg->vecs = min(cfg->aq_hw_caps->vecs, AQ_CFG_VECS_DEF);
cfg->vecs = min(cfg->vecs, num_online_cpus());
- cfg->vecs = min(cfg->vecs, self->irqvecs);
+ if (self->irqvecs > AQ_HW_SERVICE_IRQS)
+ cfg->vecs = min(cfg->vecs, self->irqvecs - AQ_HW_SERVICE_IRQS);
/* cfg->vecs should be power of 2 for RSS */
if (cfg->vecs >= 8U)
cfg->vecs = 8U;
@@ -115,6 +118,15 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->vecs = 1U;
}
+ /* Check if we have enough vectors allocated for
+ * link status IRQ. If no - we'll know link state from
+ * slower service task.
+ */
+ if (AQ_HW_SERVICE_IRQS > 0 && cfg->vecs + 1 <= self->irqvecs)
+ cfg->link_irq_vec = cfg->vecs;
+ else
+ cfg->link_irq_vec = 0;
+
cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
cfg->features = cfg->aq_hw_caps->hw_features;
}
@@ -160,30 +172,48 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
return 0;
}
-static void aq_nic_service_timer_cb(struct timer_list *t)
+static irqreturn_t aq_linkstate_threaded_isr(int irq, void *private)
{
- struct aq_nic_s *self = from_timer(self, t, service_timer);
- int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL;
- int err = 0;
+ struct aq_nic_s *self = private;
+
+ if (!self)
+ return IRQ_NONE;
+
+ aq_nic_update_link_status(self);
+
+ self->aq_hw_ops->hw_irq_enable(self->aq_hw,
+ BIT(self->aq_nic_cfg.link_irq_vec));
+ return IRQ_HANDLED;
+}
+
+static void aq_nic_service_task(struct work_struct *work)
+{
+ struct aq_nic_s *self = container_of(work, struct aq_nic_s,
+ service_task);
+ int err;
if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
- goto err_exit;
+ return;
err = aq_nic_update_link_status(self);
if (err)
- goto err_exit;
+ return;
+ mutex_lock(&self->fwreq_mutex);
if (self->aq_fw_ops->update_stats)
self->aq_fw_ops->update_stats(self->aq_hw);
+ mutex_unlock(&self->fwreq_mutex);
aq_nic_update_ndev_stats(self);
+}
+
+static void aq_nic_service_timer_cb(struct timer_list *t)
+{
+ struct aq_nic_s *self = from_timer(self, t, service_timer);
- /* If no link - use faster timer rate to detect link up asap */
- if (!netif_carrier_ok(self->ndev))
- ctimer = max(ctimer / 2, 1);
+ mod_timer(&self->service_timer, jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
-err_exit:
- mod_timer(&self->service_timer, jiffies + ctimer);
+ aq_ndev_schedule_work(&self->service_task);
}
static void aq_nic_polling_timer_cb(struct timer_list *t)
@@ -213,8 +243,10 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
if (err)
goto err_exit;
+ mutex_lock(&self->fwreq_mutex);
err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
self->ndev->dev_addr);
+ mutex_unlock(&self->fwreq_mutex);
if (err)
goto err_exit;
@@ -283,7 +315,9 @@ int aq_nic_init(struct aq_nic_s *self)
unsigned int i = 0U;
self->power_state = AQ_HW_POWER_STATE_D0;
+ mutex_lock(&self->fwreq_mutex);
err = self->aq_hw_ops->hw_reset(self->aq_hw);
+ mutex_unlock(&self->fwreq_mutex);
if (err < 0)
goto err_exit;
@@ -333,9 +367,11 @@ int aq_nic_start(struct aq_nic_s *self)
err = aq_nic_update_interrupt_moderation_settings(self);
if (err)
goto err_exit;
+
+ INIT_WORK(&self->service_task, aq_nic_service_task);
+
timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
- mod_timer(&self->service_timer, jiffies +
- AQ_CFG_SERVICE_TIMER_INTERVAL);
+ aq_nic_service_timer_cb(&self->service_timer);
if (self->aq_nic_cfg.is_polling) {
timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
@@ -344,13 +380,25 @@ int aq_nic_start(struct aq_nic_s *self)
} else {
for (i = 0U, aq_vec = self->aq_vec[0];
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
- err = aq_pci_func_alloc_irq(self, i,
- self->ndev->name, aq_vec,
+ err = aq_pci_func_alloc_irq(self, i, self->ndev->name,
+ aq_vec_isr, aq_vec,
aq_vec_get_affinity_mask(aq_vec));
if (err < 0)
goto err_exit;
}
+ if (self->aq_nic_cfg.link_irq_vec) {
+ int irqvec = pci_irq_vector(self->pdev,
+ self->aq_nic_cfg.link_irq_vec);
+ err = request_threaded_irq(irqvec, NULL,
+ aq_linkstate_threaded_isr,
+ IRQF_SHARED,
+ self->ndev->name, self);
+ if (err < 0)
+ goto err_exit;
+ self->msix_entry_mask |= (1 << self->aq_nic_cfg.link_irq_vec);
+ }
+
err = self->aq_hw_ops->hw_irq_enable(self->aq_hw,
AQ_CFG_IRQ_MASK);
if (err < 0)
@@ -652,7 +700,14 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
unsigned int i = 0U;
unsigned int count = 0U;
struct aq_vec_s *aq_vec = NULL;
- struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
+ struct aq_stats_s *stats;
+
+ if (self->aq_fw_ops->update_stats) {
+ mutex_lock(&self->fwreq_mutex);
+ self->aq_fw_ops->update_stats(self->aq_hw);
+ mutex_unlock(&self->fwreq_mutex);
+ }
+ stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
if (!stats)
goto err_exit;
@@ -698,11 +753,12 @@ static void aq_nic_update_ndev_stats(struct aq_nic_s *self)
struct net_device *ndev = self->ndev;
struct aq_stats_s *stats = self->aq_hw_ops->hw_get_hw_stats(self->aq_hw);
- ndev->stats.rx_packets = stats->uprc + stats->mprc + stats->bprc;
- ndev->stats.rx_bytes = stats->ubrc + stats->mbrc + stats->bbrc;
+ ndev->stats.rx_packets = stats->dma_pkt_rc;
+ ndev->stats.rx_bytes = stats->dma_oct_rc;
ndev->stats.rx_errors = stats->erpr;
- ndev->stats.tx_packets = stats->uptc + stats->mptc + stats->bptc;
- ndev->stats.tx_bytes = stats->ubtc + stats->mbtc + stats->bbtc;
+ ndev->stats.rx_dropped = stats->dpc;
+ ndev->stats.tx_packets = stats->dma_pkt_tc;
+ ndev->stats.tx_bytes = stats->dma_oct_tc;
ndev->stats.tx_errors = stats->erpt;
ndev->stats.multicast = stats->mprc;
}
@@ -839,7 +895,9 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
self->aq_nic_cfg.is_autoneg = false;
}
+ mutex_lock(&self->fwreq_mutex);
err = self->aq_fw_ops->set_link_speed(self->aq_hw, rate);
+ mutex_unlock(&self->fwreq_mutex);
if (err < 0)
goto err_exit;
@@ -872,6 +930,7 @@ int aq_nic_stop(struct aq_nic_s *self)
netif_carrier_off(self->ndev);
del_timer_sync(&self->service_timer);
+ cancel_work_sync(&self->service_task);
self->aq_hw_ops->hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
@@ -899,14 +958,22 @@ void aq_nic_deinit(struct aq_nic_s *self)
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
aq_vec_deinit(aq_vec);
- self->aq_fw_ops->deinit(self->aq_hw);
+ if (likely(self->aq_fw_ops->deinit)) {
+ mutex_lock(&self->fwreq_mutex);
+ self->aq_fw_ops->deinit(self->aq_hw);
+ mutex_unlock(&self->fwreq_mutex);
+ }
if (self->power_state != AQ_HW_POWER_STATE_D0 ||
- self->aq_hw->aq_nic_cfg->wol) {
- self->aq_fw_ops->set_power(self->aq_hw,
- self->power_state,
- self->ndev->dev_addr);
- }
+ self->aq_hw->aq_nic_cfg->wol)
+ if (likely(self->aq_fw_ops->set_power)) {
+ mutex_lock(&self->fwreq_mutex);
+ self->aq_fw_ops->set_power(self->aq_hw,
+ self->power_state,
+ self->ndev->dev_addr);
+ mutex_unlock(&self->fwreq_mutex);
+ }
+
err_exit:;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 8e34c1e49bf2..c03d38ed105d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -26,11 +26,13 @@ struct aq_nic_cfg_s {
u64 features;
u32 rxds; /* rx ring size, descriptors # */
u32 txds; /* tx ring size, descriptors # */
- u32 vecs; /* vecs==allocated irqs */
+ u32 vecs; /* allocated rx/tx vectors */
+ u32 link_irq_vec;
u32 irq_type;
u32 itr;
u16 rx_itr;
u16 tx_itr;
+ u32 rxpageorder;
u32 num_rss_queues;
u32 mtu;
u32 flow_control;
@@ -91,6 +93,7 @@ struct aq_nic_s {
const struct aq_fw_ops *aq_fw_ops;
struct aq_nic_cfg_s aq_nic_cfg;
struct timer_list service_timer;
+ struct work_struct service_task;
struct timer_list polling_timer;
struct aq_hw_link_status_s link_status;
struct {
@@ -103,6 +106,8 @@ struct aq_nic_s {
struct pci_dev *pdev;
unsigned int msix_entry_mask;
u32 irqvecs;
+ /* mutex to serialize FW interface access operations */
+ struct mutex fwreq_mutex;
struct aq_hw_rx_fltrs_s aq_hw_rx_fltrs;
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 0217ff4669a4..9cb0864d6d8d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -20,6 +20,7 @@
#include "hw_atl/hw_atl_a0.h"
#include "hw_atl/hw_atl_b0.h"
#include "aq_filters.h"
+#include "aq_drvinfo.h"
static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
@@ -42,9 +43,6 @@ static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), },
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), },
- { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111E), },
- { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112E), },
-
{}
};
@@ -74,9 +72,6 @@ static const struct aq_board_revision_s hw_atl_boards[] = {
{ AQ_DEVICE_ID_AQC109S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, },
{ AQ_DEVICE_ID_AQC111S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, },
{ AQ_DEVICE_ID_AQC112S, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, },
-
- { AQ_DEVICE_ID_AQC111E, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111e, },
- { AQ_DEVICE_ID_AQC112E, AQ_HWREV_ANY, &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112e, },
};
MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
@@ -139,26 +134,27 @@ err_exit:
}
int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
- char *name, void *aq_vec, cpumask_t *affinity_mask)
+ char *name, irq_handler_t irq_handler,
+ void *irq_arg, cpumask_t *affinity_mask)
{
struct pci_dev *pdev = self->pdev;
int err;
if (pdev->msix_enabled || pdev->msi_enabled)
- err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr, 0,
- name, aq_vec);
+ err = request_irq(pci_irq_vector(pdev, i), irq_handler, 0,
+ name, irq_arg);
else
err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy,
- IRQF_SHARED, name, aq_vec);
+ IRQF_SHARED, name, irq_arg);
if (err >= 0) {
self->msix_entry_mask |= (1 << i);
- self->aq_vec[i] = aq_vec;
- if (pdev->msix_enabled)
+ if (pdev->msix_enabled && affinity_mask)
irq_set_affinity_hint(pci_irq_vector(pdev, i),
affinity_mask);
}
+
return err;
}
@@ -166,16 +162,22 @@ void aq_pci_func_free_irqs(struct aq_nic_s *self)
{
struct pci_dev *pdev = self->pdev;
unsigned int i;
+ void *irq_data;
for (i = 32U; i--;) {
if (!((1U << i) & self->msix_entry_mask))
continue;
- if (i >= AQ_CFG_VECS_MAX)
+ if (self->aq_nic_cfg.link_irq_vec &&
+ i == self->aq_nic_cfg.link_irq_vec)
+ irq_data = self;
+ else if (i < AQ_CFG_VECS_MAX)
+ irq_data = self->aq_vec[i];
+ else
continue;
if (pdev->msix_enabled)
irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
- free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
+ free_irq(pci_irq_vector(pdev, i), irq_data);
self->msix_entry_mask &= ~(1U << i);
}
}
@@ -185,7 +187,7 @@ unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
if (self->pdev->msix_enabled)
return AQ_HW_IRQ_MSIX;
if (self->pdev->msi_enabled)
- return AQ_HW_IRQ_MSIX;
+ return AQ_HW_IRQ_MSI;
return AQ_HW_IRQ_LEGACY;
}
@@ -223,6 +225,8 @@ static int aq_pci_probe(struct pci_dev *pdev,
SET_NETDEV_DEV(ndev, &pdev->dev);
pci_set_drvdata(pdev, self);
+ mutex_init(&self->fwreq_mutex);
+
err = aq_pci_probe_get_hw_by_id(pdev, &self->aq_hw_ops,
&aq_nic_get_cfg(self)->aq_hw_caps);
if (err)
@@ -268,6 +272,7 @@ static int aq_pci_probe(struct pci_dev *pdev,
numvecs = min((u8)AQ_CFG_VECS_DEF,
aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs);
numvecs = min(numvecs, num_online_cpus());
+ numvecs += AQ_HW_SERVICE_IRQS;
/*enable interrupts */
#if !AQ_CFG_FORCE_LEGACY_INT
err = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
@@ -289,6 +294,8 @@ static int aq_pci_probe(struct pci_dev *pdev,
if (err < 0)
goto err_register;
+ aq_drvinfo_init(ndev);
+
return 0;
err_register:
@@ -365,4 +372,13 @@ static struct pci_driver aq_pci_ops = {
.shutdown = aq_pci_shutdown,
};
-module_pci_driver(aq_pci_ops);
+int aq_pci_func_register_driver(void)
+{
+ return pci_register_driver(&aq_pci_ops);
+}
+
+void aq_pci_func_unregister_driver(void)
+{
+ pci_unregister_driver(&aq_pci_ops);
+}
+
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
index aeee67bf69fa..670f9a940d65 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
@@ -24,9 +24,12 @@ struct aq_board_revision_s {
int aq_pci_func_init(struct pci_dev *pdev);
int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
- char *name, void *aq_vec,
- cpumask_t *affinity_mask);
+ char *name, irq_handler_t irq_handler,
+ void *irq_arg, cpumask_t *affinity_mask);
void aq_pci_func_free_irqs(struct aq_nic_s *self);
unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self);
+int aq_pci_func_register_driver(void);
+void aq_pci_func_unregister_driver(void);
+
#endif /* AQ_PCI_FUNC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index e2ffb159cbe2..350e385528fd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -12,10 +12,89 @@
#include "aq_ring.h"
#include "aq_nic.h"
#include "aq_hw.h"
+#include "aq_hw_utils.h"
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
+{
+ unsigned int len = PAGE_SIZE << rxpage->order;
+
+ dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE);
+
+ /* Drop the ref for being in the ring. */
+ __free_pages(rxpage->page, rxpage->order);
+ rxpage->page = NULL;
+}
+
+static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
+ struct device *dev)
+{
+ struct page *page;
+ dma_addr_t daddr;
+ int ret = -ENOMEM;
+
+ page = dev_alloc_pages(order);
+ if (unlikely(!page))
+ goto err_exit;
+
+ daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order,
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, daddr)))
+ goto free_page;
+
+ rxpage->page = page;
+ rxpage->daddr = daddr;
+ rxpage->order = order;
+ rxpage->pg_off = 0;
+
+ return 0;
+
+free_page:
+ __free_pages(page, order);
+
+err_exit:
+ return ret;
+}
+
+static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
+ int order)
+{
+ int ret;
+
+ if (rxbuf->rxdata.page) {
+ /* One means ring is the only user and can reuse */
+ if (page_ref_count(rxbuf->rxdata.page) > 1) {
+ /* Try reuse buffer */
+ rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
+ if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
+ (PAGE_SIZE << order)) {
+ self->stats.rx.pg_flips++;
+ } else {
+ /* Buffer exhausted. We have other users and
+ * should release this page and realloc
+ */
+ aq_free_rxpage(&rxbuf->rxdata,
+ aq_nic_get_dev(self->aq_nic));
+ self->stats.rx.pg_losts++;
+ }
+ } else {
+ rxbuf->rxdata.pg_off = 0;
+ self->stats.rx.pg_reuses++;
+ }
+ }
+
+ if (!rxbuf->rxdata.page) {
+ ret = aq_get_rxpage(&rxbuf->rxdata, order,
+ aq_nic_get_dev(self->aq_nic));
+ return ret;
+ }
+
+ return 0;
+}
+
static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
struct aq_nic_s *aq_nic)
{
@@ -81,6 +160,11 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
self->idx = idx;
self->size = aq_nic_cfg->rxds;
self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
+ self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
+ (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
+
+ if (aq_nic_cfg->rxpageorder > self->page_order)
+ self->page_order = aq_nic_cfg->rxpageorder;
self = aq_ring_alloc(self, aq_nic);
if (!self) {
@@ -201,22 +285,21 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
int budget)
{
struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
- int err = 0;
bool is_rsc_completed = true;
+ int err = 0;
for (; (self->sw_head != self->hw_head) && budget;
self->sw_head = aq_ring_next_dx(self, self->sw_head),
--budget, ++(*work_done)) {
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
+ struct aq_ring_buff_s *buff_ = NULL;
struct sk_buff *skb = NULL;
unsigned int next_ = 0U;
unsigned int i = 0U;
- struct aq_ring_buff_s *buff_ = NULL;
+ u16 hdr_len;
- if (buff->is_error) {
- __free_pages(buff->page, 0);
+ if (buff->is_error)
continue;
- }
if (buff->is_cleaned)
continue;
@@ -246,45 +329,67 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
}
}
+ dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
+ buff->rxdata.daddr,
+ buff->rxdata.pg_off,
+ buff->len, DMA_FROM_DEVICE);
+
/* for single fragment packets use build_skb() */
if (buff->is_eop &&
buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
- skb = build_skb(page_address(buff->page),
+ skb = build_skb(aq_buf_vaddr(&buff->rxdata),
AQ_CFG_RX_FRAME_MAX);
if (unlikely(!skb)) {
err = -ENOMEM;
goto err_exit;
}
-
skb_put(skb, buff->len);
+ page_ref_inc(buff->rxdata.page);
} else {
- skb = netdev_alloc_skb(ndev, ETH_HLEN);
+ skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
if (unlikely(!skb)) {
err = -ENOMEM;
goto err_exit;
}
- skb_put(skb, ETH_HLEN);
- memcpy(skb->data, page_address(buff->page), ETH_HLEN);
- skb_add_rx_frag(skb, 0, buff->page, ETH_HLEN,
- buff->len - ETH_HLEN,
- SKB_TRUESIZE(buff->len - ETH_HLEN));
+ hdr_len = buff->len;
+ if (hdr_len > AQ_CFG_RX_HDR_SIZE)
+ hdr_len = eth_get_headlen(skb->dev,
+ aq_buf_vaddr(&buff->rxdata),
+ AQ_CFG_RX_HDR_SIZE);
+
+ memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
+ ALIGN(hdr_len, sizeof(long)));
+
+ if (buff->len - hdr_len > 0) {
+ skb_add_rx_frag(skb, 0, buff->rxdata.page,
+ buff->rxdata.pg_off + hdr_len,
+ buff->len - hdr_len,
+ AQ_CFG_RX_FRAME_MAX);
+ page_ref_inc(buff->rxdata.page);
+ }
if (!buff->is_eop) {
- for (i = 1U, next_ = buff->next,
- buff_ = &self->buff_ring[next_];
- true; next_ = buff_->next,
- buff_ = &self->buff_ring[next_], ++i) {
- skb_add_rx_frag(skb, i,
- buff_->page, 0,
+ buff_ = buff;
+ i = 1U;
+ do {
+ next_ = buff_->next,
+ buff_ = &self->buff_ring[next_];
+
+ dma_sync_single_range_for_cpu(
+ aq_nic_get_dev(self->aq_nic),
+ buff_->rxdata.daddr,
+ buff_->rxdata.pg_off,
buff_->len,
- SKB_TRUESIZE(buff->len -
- ETH_HLEN));
+ DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, i++,
+ buff_->rxdata.page,
+ buff_->rxdata.pg_off,
+ buff_->len,
+ AQ_CFG_RX_FRAME_MAX);
+ page_ref_inc(buff_->rxdata.page);
buff_->is_cleaned = 1;
-
- if (buff_->is_eop)
- break;
- }
+ } while (!buff_->is_eop);
}
}
@@ -310,12 +415,15 @@ err_exit:
int aq_ring_rx_fill(struct aq_ring_s *self)
{
- unsigned int pages_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
- (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
+ unsigned int page_order = self->page_order;
struct aq_ring_buff_s *buff = NULL;
int err = 0;
int i = 0;
+ if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES,
+ self->size / 2))
+ return err;
+
for (i = aq_ring_avail_dx(self); i--;
self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
buff = &self->buff_ring[self->sw_tail];
@@ -323,30 +431,15 @@ int aq_ring_rx_fill(struct aq_ring_s *self)
buff->flags = 0U;
buff->len = AQ_CFG_RX_FRAME_MAX;
- buff->page = alloc_pages(GFP_ATOMIC | __GFP_COMP, pages_order);
- if (!buff->page) {
- err = -ENOMEM;
+ err = aq_get_rxpages(self, buff, page_order);
+ if (err)
goto err_exit;
- }
-
- buff->pa = dma_map_page(aq_nic_get_dev(self->aq_nic),
- buff->page, 0,
- AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
-
- if (dma_mapping_error(aq_nic_get_dev(self->aq_nic), buff->pa)) {
- err = -ENOMEM;
- goto err_exit;
- }
+ buff->pa = aq_buf_daddr(&buff->rxdata);
buff = NULL;
}
err_exit:
- if (err < 0) {
- if (buff && buff->page)
- __free_pages(buff->page, 0);
- }
-
return err;
}
@@ -359,10 +452,7 @@ void aq_ring_rx_deinit(struct aq_ring_s *self)
self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
- dma_unmap_page(aq_nic_get_dev(self->aq_nic), buff->pa,
- AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
-
- __free_pages(buff->page, 0);
+ aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic));
}
err_exit:;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index ac1329f4051d..cfffc301e746 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -17,6 +17,13 @@
struct page;
struct aq_nic_cfg_s;
+struct aq_rxpage {
+ struct page *page;
+ dma_addr_t daddr;
+ unsigned int order;
+ unsigned int pg_off;
+};
+
/* TxC SOP DX EOP
* +----------+----------+----------+-----------
* 8bytes|len l3,l4 | pa | pa | pa
@@ -31,28 +38,21 @@ struct aq_nic_cfg_s;
*/
struct __packed aq_ring_buff_s {
union {
+ /* RX/TX */
+ dma_addr_t pa;
/* RX */
struct {
u32 rss_hash;
u16 next;
u8 is_hash_l4;
u8 rsvd1;
- struct page *page;
+ struct aq_rxpage rxdata;
};
/* EOP */
struct {
dma_addr_t pa_eop;
struct sk_buff *skb;
};
- /* DX */
- struct {
- dma_addr_t pa;
- };
- /* SOP */
- struct {
- dma_addr_t pa_sop;
- u32 len_pkt_sop;
- };
/* TxC */
struct {
u32 mss;
@@ -91,6 +91,9 @@ struct aq_ring_stats_rx_s {
u64 bytes;
u64 lro_packets;
u64 jumbo_packets;
+ u64 pg_losts;
+ u64 pg_flips;
+ u64 pg_reuses;
};
struct aq_ring_stats_tx_s {
@@ -116,6 +119,7 @@ struct aq_ring_s {
unsigned int size; /* descriptors number */
unsigned int dx_size; /* TX or RX descriptor size, */
/* stored here for fater math */
+ unsigned int page_order;
union aq_ring_stats_s stats;
dma_addr_t dx_ring_pa;
};
@@ -126,6 +130,16 @@ struct aq_ring_param_s {
cpumask_t affinity_mask;
};
+static inline void *aq_buf_vaddr(struct aq_rxpage *rxpage)
+{
+ return page_to_virt(rxpage->page) + rxpage->pg_off;
+}
+
+static inline dma_addr_t aq_buf_daddr(struct aq_rxpage *rxpage)
+{
+ return rxpage->daddr + rxpage->pg_off;
+}
+
static inline unsigned int aq_ring_next_dx(struct aq_ring_s *self,
unsigned int dx)
{
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index d335c334fa56..a2e4ca1782ae 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -353,6 +353,9 @@ void aq_vec_add_stats(struct aq_vec_s *self,
stats_rx->errors += rx->errors;
stats_rx->jumbo_packets += rx->jumbo_packets;
stats_rx->lro_packets += rx->lro_packets;
+ stats_rx->pg_losts += rx->pg_losts;
+ stats_rx->pg_flips += rx->pg_flips;
+ stats_rx->pg_reuses += rx->pg_reuses;
stats_tx->packets += tx->packets;
stats_tx->bytes += tx->bytes;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index f6f8338153a2..9fe507fe2d7f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -350,10 +350,10 @@ err_exit:
static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
- { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
- { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */
- { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */
- { 0x20000022U, 0x20000026U } /* AQ_IRQ_MSIX */
+ [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
+ [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
+ [AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
+ [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
int err = 0;
@@ -619,8 +619,6 @@ err_exit:
static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- struct device *ndev = aq_nic_get_dev(ring->aq_nic);
-
for (; ring->hw_head != ring->sw_tail;
ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
struct aq_ring_buff_s *buff = NULL;
@@ -687,8 +685,6 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
is_err &= ~0x18U;
is_err &= ~0x04U;
- dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
-
if (is_err || rxd_wb->type & 0x1000U) {
/* status error or DMA error */
buff->is_error = 1U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index b31dba1b1a55..bfcda12d73de 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -259,7 +259,13 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
hw_atl_rpo_lro_inactive_interval_set(self, 0);
- hw_atl_rpo_lro_max_coalescing_interval_set(self, 2);
+ /* the LRO timebase divider is 5 uS (0x61a),
+ * which is multiplied by 50(0x32)
+ * to get a maximum coalescing interval of 250 uS,
+ * which is the default value
+ */
+ hw_atl_rpo_lro_max_coalescing_interval_set(self, 50);
+
hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
@@ -273,6 +279,10 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
hw_atl_rpo_lro_en_set(self,
aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
+ hw_atl_itr_rsc_en_set(self,
+ aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
+
+ hw_atl_itr_rsc_delay_set(self, 1U);
}
return aq_hw_err_from_flags(self);
}
@@ -378,10 +388,10 @@ err_exit:
static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
- { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
- { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */
- { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */
- { 0x20000022U, 0x20000026U } /* AQ_IRQ_MSIX */
+ [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
+ [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
+ [AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
+ [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
int err = 0;
@@ -433,6 +443,11 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
+ /* Enable link interrupt */
+ if (aq_nic_cfg->link_irq_vec)
+ hw_atl_reg_gen_irq_map_set(self, BIT(7) |
+ aq_nic_cfg->link_irq_vec, 3U);
+
hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
err_exit:
@@ -654,8 +669,6 @@ err_exit:
static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- struct device *ndev = aq_nic_get_dev(ring->aq_nic);
-
for (; ring->hw_head != ring->sw_tail;
ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
struct aq_ring_buff_s *buff = NULL;
@@ -697,8 +710,6 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
buff->is_cso_err = 0U;
}
- dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
-
if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
/* MAC error or DMA error */
buff->is_error = 1U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
index 2cc8dacfdc27..b1c0b6850e60 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
@@ -32,9 +32,6 @@ extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc109;
#define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc108
#define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc109
-#define hw_atl_b0_caps_aqc111e hw_atl_b0_caps_aqc108
-#define hw_atl_b0_caps_aqc112e hw_atl_b0_caps_aqc109
-
extern const struct aq_hw_ops hw_atl_ops_b0;
#define hw_atl_ops_b1 hw_atl_ops_b0
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index b318eefd36ae..ea98a08d7820 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -78,7 +78,7 @@
#define HW_ATL_B0_TC_MAX 1U
#define HW_ATL_B0_RSS_MAX 8U
-#define HW_ATL_B0_LRO_RXD_MAX 2U
+#define HW_ATL_B0_LRO_RXD_MAX 16U
#define HW_ATL_B0_RS_SLIP_ENABLED 0U
/* (256k -1(max pay_len) - 54(header)) */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 0722b8e01964..eaab25cd08b3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -49,11 +49,6 @@ u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw)
HW_ATL_GLB_SOFT_RES_SHIFT);
}
-u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw)
-{
- return aq_hw_read_reg(aq_hw, HW_ATL_RX_DMA_STAT_COUNTER7_ADR);
-}
-
u32 hw_atl_reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
{
return aq_hw_read_reg(aq_hw, HW_ATL_GLB_MIF_ID_ADR);
@@ -65,44 +60,24 @@ u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw)
return aq_hw_read_reg(aq_hw, HW_ATL_RPB_RX_DMA_DROP_PKT_CNT_ADR);
}
-u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
-{
- return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW);
-}
-
-u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+u64 hw_atl_stats_rx_dma_good_octet_counter_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW);
+ return aq_hw_read_reg64(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERLSW);
}
-u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
+u64 hw_atl_stats_rx_dma_good_pkt_counter_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW);
+ return aq_hw_read_reg64(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERLSW);
}
-u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+u64 hw_atl_stats_tx_dma_good_octet_counter_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW);
+ return aq_hw_read_reg64(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERLSW);
}
-u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
+u64 hw_atl_stats_tx_dma_good_pkt_counter_get(struct aq_hw_s *aq_hw)
{
- return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_OCTET_COUNTERMSW);
-}
-
-u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
-{
- return aq_hw_read_reg(aq_hw, HW_ATL_STATS_RX_DMA_GOOD_PKT_COUNTERMSW);
-}
-
-u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
-{
- return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_OCTET_COUNTERMSW);
-}
-
-u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
-{
- return aq_hw_read_reg(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERMSW);
+ return aq_hw_read_reg64(aq_hw, HW_ATL_STATS_TX_DMA_GOOD_PKT_COUNTERLSW);
}
/* interrupt */
@@ -315,6 +290,21 @@ void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
HW_ATL_ITR_RES_SHIFT, res_irq);
}
+/* set RSC interrupt */
+void hw_atl_itr_rsc_en_set(struct aq_hw_s *aq_hw, u32 enable)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_RSC_EN_ADR, enable);
+}
+
+/* set RSC delay */
+void hw_atl_itr_rsc_delay_set(struct aq_hw_s *aq_hw, u32 delay)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_RSC_DELAY_ADR,
+ HW_ATL_ITR_RSC_DELAY_MSK,
+ HW_ATL_ITR_RSC_DELAY_SHIFT,
+ delay);
+}
+
/* rdm */
void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
{
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index d46351890b16..2eb44e1cff70 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -40,29 +40,17 @@ u32 hw_atl_glb_soft_res_get(struct aq_hw_s *aq_hw);
u32 hw_atl_rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
-/* get rx dma good octet counter lsw */
-u32 hw_atl_stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+/* get rx dma good octet counter */
+u64 hw_atl_stats_rx_dma_good_octet_counter_get(struct aq_hw_s *aq_hw);
-/* get rx dma good packet counter lsw */
-u32 hw_atl_stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
+/* get rx dma good packet counter */
+u64 hw_atl_stats_rx_dma_good_pkt_counter_get(struct aq_hw_s *aq_hw);
-/* get tx dma good octet counter lsw */
-u32 hw_atl_stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+/* get tx dma good octet counter */
+u64 hw_atl_stats_tx_dma_good_octet_counter_get(struct aq_hw_s *aq_hw);
-/* get tx dma good packet counter lsw */
-u32 hw_atl_stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
-
-/* get rx dma good octet counter msw */
-u32 hw_atl_stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
-
-/* get rx dma good packet counter msw */
-u32 hw_atl_stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
-
-/* get tx dma good octet counter msw */
-u32 hw_atl_stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
-
-/* get tx dma good packet counter msw */
-u32 hw_atl_stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
+/* get tx dma good packet counter */
+u64 hw_atl_stats_tx_dma_good_pkt_counter_get(struct aq_hw_s *aq_hw);
/* get msm rx errors counter register */
u32 hw_atl_reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw);
@@ -82,9 +70,6 @@ u32 hw_atl_reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
/* get msm rx unicast octets counter register 0 */
u32 hw_atl_reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
-/* get rx dma statistics counter 7 */
-u32 hw_atl_reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw);
-
/* get msm tx errors counter register */
u32 hw_atl_reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw);
@@ -152,6 +137,12 @@ u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw);
/* set reset interrupt */
void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
+/* set RSC interrupt */
+void hw_atl_itr_rsc_en_set(struct aq_hw_s *aq_hw, u32 enable);
+
+/* set RSC delay */
+void hw_atl_itr_rsc_delay_set(struct aq_hw_s *aq_hw, u32 delay);
+
/* rdm */
/* set cpu id */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index fb45bc2d99cf..b64140924a02 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -58,9 +58,6 @@
/* preprocessor definitions for msm rx unicast octets counter register 0 */
#define HW_ATL_MAC_MSM_RX_UCST_OCTETS_COUNTER0_ADR 0x000001b8u
-/* preprocessor definitions for rx dma statistics counter 7 */
-#define HW_ATL_RX_DMA_STAT_COUNTER7_ADR 0x00006818u
-
/* preprocessor definitions for msm tx unicast frames counter register */
#define HW_ATL_MAC_MSM_TX_UCST_FRM_CNT_ADR 0x00000108u
@@ -95,6 +92,19 @@
#define HW_ATL_ITR_RES_MSK 0x80000000
/* lower bit position of bitfield itr_reset */
#define HW_ATL_ITR_RES_SHIFT 31
+
+/* register address for bitfield rsc_en */
+#define HW_ATL_ITR_RSC_EN_ADR 0x00002200
+
+/* register address for bitfield rsc_delay */
+#define HW_ATL_ITR_RSC_DELAY_ADR 0x00002204
+/* bitmask for bitfield rsc_delay */
+#define HW_ATL_ITR_RSC_DELAY_MSK 0x0000000f
+/* width of bitfield rsc_delay */
+#define HW_ATL_ITR_RSC_DELAY_WIDTH 4
+/* lower bit position of bitfield rsc_delay */
+#define HW_ATL_ITR_RSC_DELAY_SHIFT 0
+
/* register address for bitfield dca{d}_cpuid[7:0] */
#define HW_ATL_RDM_DCADCPUID_ADR(dca) (0x00006100 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_cpuid[7:0] */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index eb4b99d56081..1208f7ecdd76 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -545,7 +545,7 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
pmbox->stats.ubtc = pmbox->stats.uptc * mtu;
pmbox->stats.dpc = atomic_read(&self->dpc);
} else {
- pmbox->stats.dpc = hw_atl_reg_rx_dma_stat_counter7get(self);
+ pmbox->stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
}
err_exit:;
@@ -763,6 +763,7 @@ static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
int hw_atl_utils_update_stats(struct aq_hw_s *self)
{
struct hw_atl_utils_mbox mbox;
+ struct aq_stats_s *cs = &self->curr_stats;
hw_atl_utils_mpi_read_stats(self, &mbox);
@@ -789,10 +790,11 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self)
AQ_SDELTA(dpc);
}
#undef AQ_SDELTA
- self->curr_stats.dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counterlsw_get(self);
- self->curr_stats.dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counterlsw_get(self);
- self->curr_stats.dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counterlsw_get(self);
- self->curr_stats.dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counterlsw_get(self);
+
+ cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self);
+ cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self);
+ cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self);
+ cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self);
memcpy(&self->last_stats, &mbox.stats, sizeof(mbox.stats));
@@ -960,6 +962,7 @@ const struct aq_fw_ops aq_fw_1x_ops = {
.set_state = hw_atl_utils_mpi_set_state,
.update_link_status = hw_atl_utils_mpi_get_link_status,
.update_stats = hw_atl_utils_update_stats,
+ .get_phy_temp = NULL,
.set_power = aq_fw1x_set_power,
.set_eee_rate = NULL,
.get_eee_rate = NULL,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index fe6c5658e016..fbc9d6ac841f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -38,6 +38,7 @@
#define HW_ATL_FW2X_CTRL_WOL BIT(CTRL_WOL)
#define HW_ATL_FW2X_CTRL_LINK_DROP BIT(CTRL_LINK_DROP)
#define HW_ATL_FW2X_CTRL_PAUSE BIT(CTRL_PAUSE)
+#define HW_ATL_FW2X_CTRL_TEMPERATURE BIT(CTRL_TEMPERATURE)
#define HW_ATL_FW2X_CTRL_ASYMMETRIC_PAUSE BIT(CTRL_ASYMMETRIC_PAUSE)
#define HW_ATL_FW2X_CTRL_FORCE_RECONNECT BIT(CTRL_FORCE_RECONNECT)
@@ -310,6 +311,40 @@ static int aq_fw2x_update_stats(struct aq_hw_s *self)
return hw_atl_utils_update_stats(self);
}
+static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
+{
+ u32 mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ u32 temp_val = mpi_opts & HW_ATL_FW2X_CTRL_TEMPERATURE;
+ u32 phy_temp_offset;
+ u32 temp_res;
+ int err = 0;
+ u32 val;
+
+ phy_temp_offset = self->mbox_addr +
+ offsetof(struct hw_atl_utils_mbox, info) +
+ offsetof(struct hw_aq_info, phy_temperature);
+ /* Toggle statistics bit for FW to 0x36C.18 (CTRL_TEMPERATURE) */
+ mpi_opts = mpi_opts ^ HW_ATL_FW2X_CTRL_TEMPERATURE;
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+ /* Wait FW to report back */
+ err = readx_poll_timeout_atomic(aq_fw2x_state2_get, self, val,
+ temp_val !=
+ (val & HW_ATL_FW2X_CTRL_TEMPERATURE),
+ 1U, 10000U);
+ err = hw_atl_utils_fw_downld_dwords(self, phy_temp_offset,
+ &temp_res, 1);
+
+ if (err)
+ return err;
+
+ /* Convert PHY temperature from 1/256 degree Celsius
+ * to 1/1000 degree Celsius.
+ */
+ *temp = temp_res * 1000 / 256;
+
+ return 0;
+}
+
static int aq_fw2x_set_sleep_proxy(struct aq_hw_s *self, u8 *mac)
{
struct hw_atl_utils_fw_rpc *rpc = NULL;
@@ -509,6 +544,7 @@ const struct aq_fw_ops aq_fw_2x_ops = {
.set_state = aq_fw2x_set_state,
.update_link_status = aq_fw2x_update_link_status,
.update_stats = aq_fw2x_update_stats,
+ .get_phy_temp = aq_fw2x_get_phy_temp,
.set_power = aq_fw2x_set_power,
.set_eee_rate = aq_fw2x_set_eee_rate,
.get_eee_rate = aq_fw2x_get_eee_rate,
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index ff3d68532f5f..7f89ad5c336d 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -960,7 +960,7 @@ int arc_emac_probe(struct net_device *ndev, int interface)
/* Get MAC address from device tree */
mac_addr = of_get_mac_address(dev->of_node);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
else
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 6f56276015a4..3c4967eecef1 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -404,6 +404,7 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int dma_len;
unsigned int align;
unsigned int next;
+ bool xmit_more;
if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) {
netif_stop_queue(dev);
@@ -423,9 +424,10 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+ xmit_more = netdev_xmit_more();
if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) {
netif_stop_queue(dev);
- skb->xmit_more = 0;
+ xmit_more = false;
}
next = priv->tx_next;
@@ -450,7 +452,7 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
desc->n_addr = priv->tx_bufs[next].dma_desc;
desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len;
- if (!skb->xmit_more)
+ if (!xmit_more)
desc->config |= DESC_EOC;
txb->skb = skb;
@@ -468,7 +470,7 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
priv->tx_next = next;
- if (!skb->xmit_more) {
+ if (!xmit_more) {
smp_wmb();
priv->tx_chain->ready = true;
priv->tx_chain = NULL;
@@ -1461,7 +1463,7 @@ static int nb8800_probe(struct platform_device *pdev)
dev->irq = irq;
mac = of_get_mac_address(pdev->dev.of_node);
- if (mac)
+ if (!IS_ERR(mac))
ether_addr_copy(dev->dev_addr, mac);
if (!is_valid_ether_addr(dev->dev_addr))
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 716bfbba59cf..461b2c0b2ed6 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -196,6 +196,7 @@ config BNXT
depends on PCI
select FW_LOADER
select LIBCRC32C
+ select NET_DEVLINK
---help---
This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
Ethernet cards. To compile this driver as a module, choose M here:
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index bc3ac369cbe3..c623896e3ccb 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -116,15 +116,6 @@ static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO);
}
-static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
- struct dma_desc *desc,
- unsigned int port)
-{
- /* Ports are latched, so write upper address first */
- tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
- tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
-}
-
/* Ethtool operations */
static void bcm_sysport_set_rx_csum(struct net_device *dev,
netdev_features_t wanted)
@@ -1291,11 +1282,10 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
struct bcm_sysport_tx_ring *ring;
struct bcm_sysport_cb *cb;
struct netdev_queue *txq;
- struct dma_desc *desc;
+ u32 len_status, addr_lo;
unsigned int skb_len;
unsigned long flags;
dma_addr_t mapping;
- u32 len_status;
u16 queue;
int ret;
@@ -1338,10 +1328,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
dma_unmap_addr_set(cb, dma_addr, mapping);
dma_unmap_len_set(cb, dma_len, skb_len);
- /* Fetch a descriptor entry from our pool */
- desc = ring->desc_cpu;
-
- desc->addr_lo = lower_32_bits(mapping);
+ addr_lo = lower_32_bits(mapping);
len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
len_status |= (skb_len << DESC_LEN_SHIFT);
len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
@@ -1354,16 +1341,9 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
ring->curr_desc = 0;
ring->desc_count--;
- /* Ensure write completion of the descriptor status/length
- * in DRAM before the System Port WRITE_PORT register latches
- * the value
- */
- wmb();
- desc->addr_status_len = len_status;
- wmb();
-
- /* Write this descriptor address to the RING write port */
- tdma_port_write_desc_addr(priv, desc, ring->index);
+ /* Ports are latched, so write upper address first */
+ tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
+ tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
/* Check ring space and update SW control flow */
if (ring->desc_count == 0)
@@ -1489,28 +1469,14 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
unsigned int index)
{
struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
- struct device *kdev = &priv->pdev->dev;
size_t size;
- void *p;
u32 reg;
/* Simple descriptors partitioning for now */
size = 256;
- /* We just need one DMA descriptor which is DMA-able, since writing to
- * the port will allocate a new descriptor in its internal linked-list
- */
- p = dma_alloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
- GFP_KERNEL);
- if (!p) {
- netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
- return -ENOMEM;
- }
-
ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
if (!ring->cbs) {
- dma_free_coherent(kdev, sizeof(struct dma_desc),
- ring->desc_cpu, ring->desc_dma);
netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
return -ENOMEM;
}
@@ -1523,7 +1489,6 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
ring->size = size;
ring->clean_index = 0;
ring->alloc_size = ring->size;
- ring->desc_cpu = p;
ring->desc_count = ring->size;
ring->curr_desc = 0;
@@ -1578,8 +1543,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
napi_enable(&ring->napi);
netif_dbg(priv, hw, priv->netdev,
- "TDMA cfg, size=%d, desc_cpu=%p switch q=%d,port=%d\n",
- ring->size, ring->desc_cpu, ring->switch_queue,
+ "TDMA cfg, size=%d, switch q=%d,port=%d\n",
+ ring->size, ring->switch_queue,
ring->switch_port);
return 0;
@@ -1589,7 +1554,6 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
unsigned int index)
{
struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
- struct device *kdev = &priv->pdev->dev;
u32 reg;
/* Caller should stop the TDMA engine */
@@ -1611,12 +1575,6 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
kfree(ring->cbs);
ring->cbs = NULL;
-
- if (ring->desc_dma) {
- dma_free_coherent(kdev, sizeof(struct dma_desc),
- ring->desc_cpu, ring->desc_dma);
- ring->desc_dma = 0;
- }
ring->size = 0;
ring->alloc_size = 0;
@@ -2274,8 +2232,7 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = {
};
static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
u16 queue = skb_get_queue_mapping(skb);
@@ -2283,7 +2240,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
unsigned int q, port;
if (!netdev_uses_dsa(dev))
- return fallback(dev, skb, NULL);
+ return netdev_pick_tx(dev, skb, NULL);
/* DSA tagging layer will have configured the correct queue */
q = BRCM_TAG_GET_QUEUE(queue);
@@ -2291,7 +2248,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
if (unlikely(!tx_ring))
- return fallback(dev, skb, NULL);
+ return netdev_pick_tx(dev, skb, NULL);
return tx_ring->index;
}
@@ -2548,7 +2505,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
/* Initialize netdevice members */
macaddr = of_get_mac_address(dn);
- if (!macaddr || !is_valid_ether_addr(macaddr)) {
+ if (IS_ERR(macaddr)) {
dev_warn(&pdev->dev, "using random Ethernet MAC\n");
eth_hw_addr_random(dev);
} else {
@@ -2599,11 +2556,11 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
dev_info(&pdev->dev,
- "Broadcom SYSTEMPORT%s" REV_FMT
- " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
+ "Broadcom SYSTEMPORT%s " REV_FMT
+ " (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
priv->is_lite ? " Lite" : "",
(priv->rev >> 8) & 0xff, priv->rev & 0xff,
- priv->base, priv->irq0, priv->irq1, txq, rxq);
+ priv->irq0, priv->irq1, txq, rxq);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 0b192fea9c5d..6f3141c86436 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -516,12 +516,6 @@ struct bcm_rsb {
#define TDMA_DEBUG 0x64c
-/* Transmit/Receive descriptor */
-struct dma_desc {
- u32 addr_status_len;
- u32 addr_lo;
-};
-
/* Number of Receive hardware descriptor words */
#define SP_NUM_HW_RX_DESC_WORDS 1024
#define SP_LT_NUM_HW_RX_DESC_WORDS 256
@@ -530,7 +524,7 @@ struct dma_desc {
#define SP_NUM_TX_DESC 1536
#define SP_LT_NUM_TX_DESC 256
-#define WORDS_PER_DESC (sizeof(struct dma_desc) / sizeof(u32))
+#define WORDS_PER_DESC 2
/* Rx/Tx common counter group.*/
struct bcm_sysport_pkt_counters {
@@ -718,7 +712,6 @@ struct bcm_sysport_net_dim {
struct bcm_sysport_tx_ring {
spinlock_t lock; /* Ring lock for tx reclaim/xmit */
struct napi_struct napi; /* NAPI per tx queue */
- dma_addr_t desc_dma; /* DMA cookie */
unsigned int index; /* Ring index */
unsigned int size; /* Ring current size */
unsigned int alloc_size; /* Ring one-time allocated size */
@@ -727,7 +720,6 @@ struct bcm_sysport_tx_ring {
unsigned int c_index; /* Last consumer index */
unsigned int clean_index; /* Current clean index */
struct bcm_sysport_cb *cbs; /* Transmit control blocks */
- struct dma_desc *desc_cpu; /* CPU view of the descriptor */
struct bcm_sysport_priv *priv; /* private context backpointer */
unsigned long packets; /* packets statistics */
unsigned long bytes; /* bytes statistics */
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 6fe074c1588b..34d18302b1a3 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -132,7 +132,7 @@ static int bgmac_probe(struct bcma_device *core)
mac = of_get_mac_address(bgmac->dev->of_node);
/* If no MAC address assigned via device tree, check SPROM */
- if (!mac) {
+ if (IS_ERR_OR_NULL(mac)) {
switch (core->core_unit) {
case 0:
mac = sprom->et0mac;
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 894eda5b13cf..6dc0dd91ad11 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -193,7 +193,7 @@ static int bgmac_probe(struct platform_device *pdev)
bgmac->dma_dev = &pdev->dev;
mac_addr = of_get_mac_address(np);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
ether_addr_copy(bgmac->net_dev->dev_addr, mac_addr);
else
dev_warn(&pdev->dev, "MAC address not present in device tree\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 0c8f5b546c6f..008ad0ca89ba 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1909,8 +1909,7 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
}
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -1932,7 +1931,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* select a non-FCoE queue */
- return fallback(dev, skb, NULL) %
+ return netdev_pick_tx(dev, skb, NULL) %
(BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 2d57af9c061c..c2f6e44e9a3f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -498,8 +498,7 @@ int bnx2x_set_vf_spoofchk(struct net_device *dev, int idx, bool val);
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback);
+ struct net_device *sb_dev);
static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
struct bnx2x_fastpath *fp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index d9057c8bbeef..78326a6c0aba 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -3024,7 +3024,7 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 13
-#define BCM_5710_FW_REVISION_VERSION 1
+#define BCM_5710_FW_REVISION_VERSION 11
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -3639,8 +3639,10 @@ struct client_init_rx_data {
#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1
#define CLIENT_INIT_RX_DATA_TPA_MODE (0x1<<2)
#define CLIENT_INIT_RX_DATA_TPA_MODE_SHIFT 2
-#define CLIENT_INIT_RX_DATA_RESERVED5 (0x1F<<3)
-#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 3
+#define CLIENT_INIT_RX_DATA_TPA_OVER_VLAN_DISABLE (0x1<<3)
+#define CLIENT_INIT_RX_DATA_TPA_OVER_VLAN_DISABLE_SHIFT 3
+#define CLIENT_INIT_RX_DATA_RESERVED5 (0xF<<4)
+#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 4
u8 vmqueue_mode_en_flg;
u8 extra_data_over_sgl_en_flg;
u8 cache_line_alignment_log_size;
@@ -3831,7 +3833,7 @@ struct eth_classify_cmd_header {
*/
struct eth_classify_header {
u8 rule_cnt;
- u8 reserved0;
+ u8 warning_on_error;
__le16 reserved1;
__le32 echo;
};
@@ -4752,6 +4754,8 @@ struct tpa_update_ramrod_data {
__le32 sge_page_base_hi;
__le16 sge_pause_thr_low;
__le16 sge_pause_thr_high;
+ u8 tpa_over_vlan_disable;
+ u8 reserved[7];
};
@@ -4946,7 +4950,7 @@ struct fairness_vars_per_port {
u32 upper_bound;
u32 fair_threshold;
u32 fairness_timeout;
- u32 reserved0;
+ u32 size_thr;
};
/*
@@ -5415,7 +5419,9 @@ struct function_start_data {
u8 sd_vlan_force_pri_val;
u8 c2s_pri_tt_valid;
u8 c2s_pri_default;
- u8 reserved2[6];
+ u8 tx_vlan_filtering_enable;
+ u8 tx_vlan_filtering_use_pvid;
+ u8 reserved2[4];
struct c2s_pri_trans_table_entry c2s_pri_trans_table;
};
@@ -5448,7 +5454,8 @@ struct function_update_data {
u8 reserved1;
__le16 sd_vlan_tag;
__le16 sd_vlan_eth_type;
- __le16 reserved0;
+ u8 tx_vlan_filtering_pvid_change_flg;
+ u8 reserved0;
__le32 reserved2;
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 3716c828ff5d..03ac10b1cd1e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -15347,27 +15347,47 @@ static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
return 0;
}
+#define BNX2X_P2P_DETECT_PARAM_MASK 0x5F5
+#define BNX2X_P2P_DETECT_RULE_MASK 0x3DBB
+#define BNX2X_PTP_TX_ON_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
+#define BNX2X_PTP_TX_ON_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
+#define BNX2X_PTP_V1_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EE)
+#define BNX2X_PTP_V1_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FFE)
+#define BNX2X_PTP_V2_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EA)
+#define BNX2X_PTP_V2_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FEE)
+#define BNX2X_PTP_V2_L2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6BF)
+#define BNX2X_PTP_V2_L2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EFF)
+#define BNX2X_PTP_V2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
+#define BNX2X_PTP_V2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
+
int bnx2x_configure_ptp_filters(struct bnx2x *bp)
{
int port = BP_PORT(bp);
+ u32 param, rule;
int rc;
if (!bp->hwtstamp_ioctl_called)
return 0;
+ param = port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
+ NIG_REG_P0_TLLH_PTP_PARAM_MASK;
+ rule = port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
+ NIG_REG_P0_TLLH_PTP_RULE_MASK;
switch (bp->tx_type) {
case HWTSTAMP_TX_ON:
bp->flags |= TX_TIMESTAMPING_EN;
- REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
- NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA);
- REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
- NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE);
+ REG_WR(bp, param, BNX2X_PTP_TX_ON_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK);
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
BNX2X_ERR("One-step timestamping is not supported\n");
return -ERANGE;
}
+ param = port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+ NIG_REG_P0_LLH_PTP_PARAM_MASK;
+ rule = port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+ NIG_REG_P0_LLH_PTP_RULE_MASK;
switch (bp->rx_filter) {
case HWTSTAMP_FILTER_NONE:
break;
@@ -15381,30 +15401,24 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
/* Initialize PTP detection for UDP/IPv4 events */
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE);
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE);
+ REG_WR(bp, param, BNX2X_PTP_V1_L4_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V1_L4_RULE_MASK);
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
/* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA);
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE);
+ REG_WR(bp, param, BNX2X_PTP_V2_L4_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V2_L4_RULE_MASK);
break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
/* Initialize PTP detection L2 events */
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF);
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF);
+ REG_WR(bp, param, BNX2X_PTP_V2_L2_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V2_L2_RULE_MASK);
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
@@ -15412,10 +15426,8 @@ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
/* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA);
- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE);
+ REG_WR(bp, param, BNX2X_PTP_V2_PARAM_MASK);
+ REG_WR(bp, rule, BNX2X_PTP_V2_RULE_MASK);
break;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 2a4341708c0f..8314c00d7537 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -114,6 +114,7 @@ enum board_idx {
BCM5745x_NPAR,
BCM57508,
BCM57504,
+ BCM57502,
BCM58802,
BCM58804,
BCM58808,
@@ -158,6 +159,7 @@ static const struct {
[BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
[BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
[BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
+ [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
[BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
[BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
[BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
@@ -205,6 +207,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
{ PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
{ PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
+ { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
{ PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
{ PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
#ifdef CONFIG_BNXT_SRIOV
@@ -216,6 +219,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
{ PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
+ { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
#endif
@@ -551,13 +555,13 @@ normal_tx:
prod = NEXT_TX(prod);
txr->tx_prod = prod;
- if (!skb->xmit_more || netif_xmit_stopped(txq))
+ if (!netdev_xmit_more() || netif_xmit_stopped(txq))
bnxt_db_write(bp, &txr->tx_db, prod);
tx_done:
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
- if (skb->xmit_more && !tx_buf->is_push)
+ if (netdev_xmit_more() && !tx_buf->is_push)
bnxt_db_write(bp, &txr->tx_db, prod);
netif_tx_stop_queue(txq);
@@ -897,7 +901,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
DMA_ATTR_WEAK_ORDERING);
if (unlikely(!payload))
- payload = eth_get_headlen(data_ptr, len);
+ payload = eth_get_headlen(bp->dev, data_ptr, len);
skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
if (!skb) {
@@ -3393,6 +3397,12 @@ static void bnxt_free_port_stats(struct bnxt *bp)
bp->hw_rx_port_stats_ext_map);
bp->hw_rx_port_stats_ext = NULL;
}
+
+ if (bp->hw_pcie_stats) {
+ dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
+ bp->hw_pcie_stats, bp->hw_pcie_stats_map);
+ bp->hw_pcie_stats = NULL;
+ }
}
static void bnxt_free_ring_stats(struct bnxt *bp)
@@ -3437,56 +3447,68 @@ static int bnxt_alloc_stats(struct bnxt *bp)
cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
}
- if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
- if (bp->hw_rx_port_stats)
- goto alloc_ext_stats;
+ if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
+ return 0;
- bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
- sizeof(struct tx_port_stats) + 1024;
+ if (bp->hw_rx_port_stats)
+ goto alloc_ext_stats;
- bp->hw_rx_port_stats =
- dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
- &bp->hw_rx_port_stats_map,
- GFP_KERNEL);
- if (!bp->hw_rx_port_stats)
- return -ENOMEM;
+ bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
+ sizeof(struct tx_port_stats) + 1024;
- bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
- 512;
- bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
- sizeof(struct rx_port_stats) + 512;
- bp->flags |= BNXT_FLAG_PORT_STATS;
+ bp->hw_rx_port_stats =
+ dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
+ &bp->hw_rx_port_stats_map,
+ GFP_KERNEL);
+ if (!bp->hw_rx_port_stats)
+ return -ENOMEM;
+
+ bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512;
+ bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
+ sizeof(struct rx_port_stats) + 512;
+ bp->flags |= BNXT_FLAG_PORT_STATS;
alloc_ext_stats:
- /* Display extended statistics only if FW supports it */
- if (bp->hwrm_spec_code < 0x10804 ||
- bp->hwrm_spec_code == 0x10900)
+ /* Display extended statistics only if FW supports it */
+ if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
+ if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
return 0;
- if (bp->hw_rx_port_stats_ext)
- goto alloc_tx_ext_stats;
+ if (bp->hw_rx_port_stats_ext)
+ goto alloc_tx_ext_stats;
- bp->hw_rx_port_stats_ext =
- dma_alloc_coherent(&pdev->dev,
- sizeof(struct rx_port_stats_ext),
- &bp->hw_rx_port_stats_ext_map,
- GFP_KERNEL);
- if (!bp->hw_rx_port_stats_ext)
- return 0;
+ bp->hw_rx_port_stats_ext =
+ dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
+ &bp->hw_rx_port_stats_ext_map, GFP_KERNEL);
+ if (!bp->hw_rx_port_stats_ext)
+ return 0;
alloc_tx_ext_stats:
- if (bp->hw_tx_port_stats_ext)
- return 0;
+ if (bp->hw_tx_port_stats_ext)
+ goto alloc_pcie_stats;
- if (bp->hwrm_spec_code >= 0x10902) {
- bp->hw_tx_port_stats_ext =
- dma_alloc_coherent(&pdev->dev,
- sizeof(struct tx_port_stats_ext),
- &bp->hw_tx_port_stats_ext_map,
- GFP_KERNEL);
- }
- bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
+ if (bp->hwrm_spec_code >= 0x10902 ||
+ (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
+ bp->hw_tx_port_stats_ext =
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct tx_port_stats_ext),
+ &bp->hw_tx_port_stats_ext_map,
+ GFP_KERNEL);
}
+ bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
+
+alloc_pcie_stats:
+ if (bp->hw_pcie_stats ||
+ !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
+ return 0;
+
+ bp->hw_pcie_stats =
+ dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
+ &bp->hw_pcie_stats_map, GFP_KERNEL);
+ if (!bp->hw_pcie_stats)
+ return 0;
+
+ bp->flags |= BNXT_FLAG_PCIE_STATS;
return 0;
}
@@ -4205,16 +4227,25 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
struct hwrm_cfa_ntuple_filter_alloc_output *resp;
struct flow_keys *keys = &fltr->fkeys;
+ struct bnxt_vnic_info *vnic;
+ u32 dst_ena = 0;
int rc = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
- req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
+ if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX) {
+ dst_ena = CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX;
+ req.rfs_ring_tbl_idx = cpu_to_le16(fltr->rxq);
+ vnic = &bp->vnic_info[0];
+ } else {
+ vnic = &bp->vnic_info[fltr->rxq + 1];
+ }
+ req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
+ req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS | dst_ena);
req.ethertype = htons(ETH_P_IP);
memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
@@ -4252,7 +4283,6 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
req.dst_port = keys->ports.dst;
req.dst_port_mask = cpu_to_be16(0xffff);
- req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
@@ -5500,11 +5530,13 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
stat = bnxt_get_func_stat_ctxs(bp);
if (BNXT_NEW_RM(bp) &&
(hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
- hw_resc->resv_irqs < nq || hw_resc->resv_vnics != vnic ||
- hw_resc->resv_stat_ctxs != stat ||
+ hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
(hw_resc->resv_hw_ring_grps != grp &&
!(bp->flags & BNXT_FLAG_CHIP_P5))))
return true;
+ if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
+ hw_resc->resv_irqs != nq)
+ return true;
return false;
}
@@ -6053,6 +6085,8 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
ctx->tqm_entries_multiple = 1;
ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
+ ctx->mrav_num_entries_units =
+ le16_to_cpu(resp->mrav_num_entries_units);
ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
} else {
@@ -6099,6 +6133,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
struct bnxt_ctx_pg_info *ctx_pg;
__le32 *num_entries;
__le64 *pg_dir;
+ u32 flags = 0;
u8 *pg_attr;
int i, rc;
u32 ena;
@@ -6158,6 +6193,9 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
ctx_pg = &ctx->mrav_mem;
req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
+ if (ctx->mrav_num_entries_units)
+ flags |=
+ FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req.mrav_pg_size_mrav_lvl,
@@ -6184,6 +6222,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
*num_entries = cpu_to_le32(ctx_pg->entries);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
}
+ req.flags = cpu_to_le32(flags);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
rc = -EIO;
@@ -6322,6 +6361,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
u32 mem_size, ena, entries;
+ u32 num_mr, num_ah;
u32 extra_srqs = 0;
u32 extra_qps = 0;
u8 pg_lvl = 1;
@@ -6385,12 +6425,21 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
goto skip_rdma;
ctx_pg = &ctx->mrav_mem;
- ctx_pg->entries = extra_qps * 4;
+ /* 128K extra is needed to accommodate static AH context
+ * allocation by f/w.
+ */
+ num_mr = 1024 * 256;
+ num_ah = 1024 * 128;
+ ctx_pg->entries = num_mr + num_ah;
mem_size = ctx->mrav_entry_size * ctx_pg->entries;
rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2);
if (rc)
return rc;
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
+ if (ctx->mrav_num_entries_units)
+ ctx_pg->entries =
+ ((num_mr / ctx->mrav_num_entries_units) << 16) |
+ (num_ah / ctx->mrav_num_entries_units);
ctx_pg = &ctx->tim_mem;
ctx_pg->entries = ctx->qp_mem.entries;
@@ -6505,6 +6554,10 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->flags |= BNXT_FLAG_ROCEV1_CAP;
if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
bp->flags |= BNXT_FLAG_ROCEV2_CAP;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
bp->tx_push_thresh = 0;
if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
@@ -6577,6 +6630,34 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
return 0;
}
+static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
+{
+ struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
+ struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
+ int rc = 0;
+ u32 flags;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
+ return 0;
+
+ resp = bp->hwrm_cmd_resp_addr;
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto hwrm_cfa_adv_qcaps_exit;
+
+ flags = le32_to_cpu(resp->flags);
+ if (flags &
+ CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX;
+
+hwrm_cfa_adv_qcaps_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
static int bnxt_hwrm_func_reset(struct bnxt *bp)
{
struct hwrm_func_reset_input req = {0};
@@ -6668,6 +6749,15 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
+ if (strlen(resp->active_pkg_name)) {
+ int fw_ver_len = strlen(bp->fw_ver_str);
+
+ snprintf(bp->fw_ver_str + fw_ver_len,
+ FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
+ resp->active_pkg_name);
+ bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
+ }
+
bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
if (!bp->hwrm_cmd_timeout)
bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
@@ -6700,6 +6790,10 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
+ if (dev_caps_cfg &
+ VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
+
hwrm_ver_get_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -6805,6 +6899,19 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
return rc;
}
+static int bnxt_hwrm_pcie_qstats(struct bnxt *bp)
+{
+ struct hwrm_pcie_qstats_input req = {0};
+
+ if (!(bp->flags & BNXT_FLAG_PCIE_STATS))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
+ req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats));
+ req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
{
if (bp->vxlan_port_cnt) {
@@ -8652,7 +8759,7 @@ static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
req.port_id = cpu_to_le16(bp->pf.port_id);
req.phy_addr = phy_addr;
req.reg_addr = cpu_to_le16(reg & 0x1f);
- if (bp->link_info.support_speeds & BNXT_LINK_SPEED_MSK_10GB) {
+ if (mdio_phy_id_is_c45(phy_addr)) {
req.cl45_mdio = 1;
req.phy_addr = mdio_phy_id_prtad(phy_addr);
req.dev_addr = mdio_phy_id_devad(phy_addr);
@@ -8679,7 +8786,7 @@ static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
req.port_id = cpu_to_le16(bp->pf.port_id);
req.phy_addr = phy_addr;
req.reg_addr = cpu_to_le16(reg & 0x1f);
- if (bp->link_info.support_speeds & BNXT_LINK_SPEED_MSK_10GB) {
+ if (mdio_phy_id_is_c45(phy_addr)) {
req.cl45_mdio = 1;
req.phy_addr = mdio_phy_id_prtad(phy_addr);
req.dev_addr = mdio_phy_id_devad(phy_addr);
@@ -8997,8 +9104,11 @@ static bool bnxt_can_reserve_rings(struct bnxt *bp)
/* If the chip and firmware supports RFS */
static bool bnxt_rfs_supported(struct bnxt *bp)
{
- if (bp->flags & BNXT_FLAG_CHIP_P5)
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX)
+ return true;
return false;
+ }
if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
return true;
if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
@@ -9013,7 +9123,7 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
int vnics, max_vnics, max_rss_ctxs;
if (bp->flags & BNXT_FLAG_CHIP_P5)
- return false;
+ return bnxt_rfs_supported(bp);
if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
return false;
@@ -9395,6 +9505,7 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
bnxt_hwrm_port_qstats(bp);
bnxt_hwrm_port_qstats_ext(bp);
+ bnxt_hwrm_pcie_qstats(bp);
}
if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
@@ -10065,23 +10176,6 @@ static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
return rc;
}
-static int bnxt_get_phys_port_name(struct net_device *dev, char *buf,
- size_t len)
-{
- struct bnxt *bp = netdev_priv(dev);
- int rc;
-
- /* The PF and it's VF-reps only support the switchdev framework */
- if (!BNXT_PF(bp))
- return -EOPNOTSUPP;
-
- rc = snprintf(buf, len, "p%d", bp->pf.port_id);
-
- if (rc >= len)
- return -EOPNOTSUPP;
- return 0;
-}
-
int bnxt_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid)
{
@@ -10100,6 +10194,13 @@ int bnxt_get_port_parent_id(struct net_device *dev,
return 0;
}
+static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ return &bp->dl_port;
+}
+
static const struct net_device_ops bnxt_netdev_ops = {
.ndo_open = bnxt_open,
.ndo_start_xmit = bnxt_start_xmit,
@@ -10131,8 +10232,7 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_bpf = bnxt_xdp,
.ndo_bridge_getlink = bnxt_bridge_getlink,
.ndo_bridge_setlink = bnxt_bridge_setlink,
- .ndo_get_port_parent_id = bnxt_get_port_parent_id,
- .ndo_get_phys_port_name = bnxt_get_phys_port_name
+ .ndo_get_devlink_port = bnxt_get_devlink_port,
};
static void bnxt_remove_one(struct pci_dev *pdev)
@@ -10456,6 +10556,26 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
return rc;
}
+static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
+{
+ struct pci_dev *pdev = bp->pdev;
+ int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
+ u32 dw;
+
+ if (!pos) {
+ netdev_info(bp->dev, "Unable do read adapter's DSN");
+ return -EOPNOTSUPP;
+ }
+
+ /* DSN (two dw) is at an offset of 4 from the cap pos */
+ pos += 4;
+ pci_read_config_dword(pdev, pos, &dw);
+ put_unaligned_le32(dw, &dsn[0]);
+ pci_read_config_dword(pdev, pos + 4, &dw);
+ put_unaligned_le32(dw, &dsn[4]);
+ return 0;
+}
+
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int version_printed;
@@ -10589,6 +10709,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = -1;
goto init_err_pci_clean;
}
+
+ rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
+ if (rc)
+ netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
+ rc);
+
rc = bnxt_init_mac_addr(bp);
if (rc) {
dev_err(&pdev->dev, "Unable to initialize mac address.\n");
@@ -10596,6 +10722,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_err_pci_clean;
}
+ /* Read the adapter's DSN to use as the eswitch switch_id */
+ rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
+ if (rc)
+ goto init_err_pci_clean;
+
bnxt_hwrm_func_qcfg(bp);
bnxt_hwrm_vnic_qcaps(bp);
bnxt_hwrm_port_led_qcaps(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index cf81ace7a6e6..eca36dd6b751 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1227,6 +1227,7 @@ struct bnxt_ctx_mem_info {
u16 mrav_entry_size;
u16 tim_entry_size;
u32 tim_max_entries;
+ u16 mrav_num_entries_units;
u8 tqm_entries_multiple;
u32 flags;
@@ -1354,6 +1355,7 @@ struct bnxt {
#define BNXT_FLAG_DIM 0x2000000
#define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
#define BNXT_FLAG_PORT_STATS_EXT 0x10000000
+ #define BNXT_FLAG_PCIE_STATS 0x40000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -1480,6 +1482,11 @@ struct bnxt {
#define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080
#define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400
#define BNXT_FW_CAP_TRUSTED_VF 0x00000800
+ #define BNXT_FW_CAP_PKG_VER 0x00004000
+ #define BNXT_FW_CAP_CFA_ADV_FLOW 0x00008000
+ #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX 0x00010000
+ #define BNXT_FW_CAP_PCIE_STATS_SUPPORTED 0x00020000
+ #define BNXT_FW_CAP_EXT_STATS_SUPPORTED 0x00040000
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
@@ -1498,10 +1505,12 @@ struct bnxt {
struct tx_port_stats *hw_tx_port_stats;
struct rx_port_stats_ext *hw_rx_port_stats_ext;
struct tx_port_stats_ext *hw_tx_port_stats_ext;
+ struct pcie_ctx_hw_stats *hw_pcie_stats;
dma_addr_t hw_rx_port_stats_map;
dma_addr_t hw_tx_port_stats_map;
dma_addr_t hw_rx_port_stats_ext_map;
dma_addr_t hw_tx_port_stats_ext_map;
+ dma_addr_t hw_pcie_stats_map;
int hw_port_stats_size;
u16 fw_rx_stats_ext_size;
u16 fw_tx_stats_ext_size;
@@ -1634,6 +1643,9 @@ struct bnxt {
#define BNXT_TX_STATS_EXT_OFFSET(counter) \
(offsetof(struct tx_port_stats_ext, counter) / 8)
+#define BNXT_PCIE_STATS_OFFSET(counter) \
+ (offsetof(struct pcie_ctx_hw_stats, counter) / 8)
+
#define I2C_DEV_ADDR_A0 0xa0
#define I2C_DEV_ADDR_A2 0xa2
#define SFF_DIAG_SUPPORT_OFFSET 0x5c
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index e1feb97bcd81..549c90d3e465 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -9,6 +9,7 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <net/devlink.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_vfr.h"
@@ -228,6 +229,9 @@ int bnxt_dl_register(struct bnxt *bp)
goto err_dl_unreg;
}
+ devlink_port_attrs_set(&bp->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ bp->pf.port_id, false, 0,
+ bp->switch_id, sizeof(bp->switch_id));
rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id);
if (rc) {
netdev_err(bp->dev, "devlink_port_register failed");
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index adabbe94a259..b1263821a6e9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -235,6 +235,9 @@ reset_coalesce:
BNXT_TX_STATS_PRI_ENTRY(counter, 6), \
BNXT_TX_STATS_PRI_ENTRY(counter, 7)
+#define BNXT_PCIE_STATS_ENTRY(counter) \
+ { BNXT_PCIE_STATS_OFFSET(counter), __stringify(counter) }
+
enum {
RX_TOTAL_DISCARDS,
TX_TOTAL_DISCARDS,
@@ -345,6 +348,10 @@ static const struct {
BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
BNXT_RX_STATS_EXT_COS_ENTRIES,
BNXT_RX_STATS_EXT_PFC_ENTRIES,
+ BNXT_RX_STATS_EXT_ENTRY(rx_bits),
+ BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold),
+ BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
+ BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
};
static const struct {
@@ -383,6 +390,24 @@ static const struct {
BNXT_TX_STATS_PRI_ENTRIES(tx_packets),
};
+static const struct {
+ long offset;
+ char string[ETH_GSTRING_LEN];
+} bnxt_pcie_stats_arr[] = {
+ BNXT_PCIE_STATS_ENTRY(pcie_pl_signal_integrity),
+ BNXT_PCIE_STATS_ENTRY(pcie_dl_signal_integrity),
+ BNXT_PCIE_STATS_ENTRY(pcie_tl_signal_integrity),
+ BNXT_PCIE_STATS_ENTRY(pcie_link_integrity),
+ BNXT_PCIE_STATS_ENTRY(pcie_tx_traffic_rate),
+ BNXT_PCIE_STATS_ENTRY(pcie_rx_traffic_rate),
+ BNXT_PCIE_STATS_ENTRY(pcie_tx_dllp_statistics),
+ BNXT_PCIE_STATS_ENTRY(pcie_rx_dllp_statistics),
+ BNXT_PCIE_STATS_ENTRY(pcie_equalization_time),
+ BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[0]),
+ BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram[2]),
+ BNXT_PCIE_STATS_ENTRY(pcie_recovery_histogram),
+};
+
#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
#define BNXT_NUM_STATS_PRI \
@@ -390,6 +415,7 @@ static const struct {
ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \
ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \
ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
+#define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr)
static int bnxt_get_num_stats(struct bnxt *bp)
{
@@ -407,6 +433,9 @@ static int bnxt_get_num_stats(struct bnxt *bp)
num_stats += BNXT_NUM_STATS_PRI;
}
+ if (bp->flags & BNXT_FLAG_PCIE_STATS)
+ num_stats += BNXT_NUM_PCIE_STATS;
+
return num_stats;
}
@@ -509,6 +538,14 @@ skip_ring_stats:
}
}
}
+ if (bp->flags & BNXT_FLAG_PCIE_STATS) {
+ __le64 *pcie_stats = (__le64 *)bp->hw_pcie_stats;
+
+ for (i = 0; i < BNXT_NUM_PCIE_STATS; i++, j++) {
+ buf[j] = le64_to_cpu(*(pcie_stats +
+ bnxt_pcie_stats_arr[i].offset));
+ }
+ }
}
static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -609,6 +646,12 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
}
}
}
+ if (bp->flags & BNXT_FLAG_PCIE_STATS) {
+ for (i = 0; i < BNXT_NUM_PCIE_STATS; i++) {
+ strcpy(buf, bnxt_pcie_stats_arr[i].string);
+ buf += ETH_GSTRING_LEN;
+ }
+ }
break;
case ETH_SS_TEST:
if (bp->num_tests)
@@ -3262,7 +3305,8 @@ void bnxt_ethtool_init(struct bnxt *bp)
struct net_device *dev = bp->dev;
int i, rc;
- bnxt_get_pkgver(dev);
+ if (!(bp->fw_cap & BNXT_FW_CAP_PKG_VER))
+ bnxt_get_pkgver(dev);
if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
return;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index b6c610339501..12bbb2a207d0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -89,7 +89,10 @@ struct hwrm_short_input {
__le16 signature;
#define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
#define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD
- __le16 unused_0;
+ __le16 target_id;
+ #define SHORT_REQ_TARGET_ID_DEFAULT 0x0UL
+ #define SHORT_REQ_TARGET_ID_TOOLS 0xfffdUL
+ #define SHORT_REQ_TARGET_ID_LAST SHORT_REQ_TARGET_ID_TOOLS
__le16 size;
__le64 req_addr;
};
@@ -211,6 +214,7 @@ struct cmd_nums {
#define HWRM_FWD_RESP 0xd2UL
#define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL
#define HWRM_OEM_CMD 0xd4UL
+ #define HWRM_PORT_PRBS_TEST 0xd5UL
#define HWRM_TEMP_MONITOR_QUERY 0xe0UL
#define HWRM_WOL_FILTER_ALLOC 0xf0UL
#define HWRM_WOL_FILTER_FREE 0xf1UL
@@ -262,6 +266,7 @@ struct cmd_nums {
#define HWRM_CFA_EEM_QCFG 0x122UL
#define HWRM_CFA_EEM_OP 0x123UL
#define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL
+ #define HWRM_CFA_TFLIB 0x125UL
#define HWRM_ENGINE_CKV_HELLO 0x12dUL
#define HWRM_ENGINE_CKV_STATUS 0x12eUL
#define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
@@ -272,6 +277,7 @@ struct cmd_nums {
#define HWRM_ENGINE_CKV_RNG_GET 0x134UL
#define HWRM_ENGINE_CKV_KEY_GEN 0x135UL
#define HWRM_ENGINE_CKV_KEY_LABEL_CFG 0x136UL
+ #define HWRM_ENGINE_CKV_KEY_LABEL_QCFG 0x137UL
#define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL
#define HWRM_ENGINE_QG_QUERY 0x13dUL
#define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL
@@ -312,6 +318,11 @@ struct cmd_nums {
#define HWRM_SELFTEST_IRQ 0x202UL
#define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL
#define HWRM_PCIE_QSTATS 0x204UL
+ #define HWRM_MFG_FRU_WRITE_CONTROL 0x205UL
+ #define HWRM_MFG_TIMERS_QUERY 0x206UL
+ #define HWRM_MFG_OTP_CFG 0x207UL
+ #define HWRM_MFG_OTP_QCFG 0x208UL
+ #define HWRM_MFG_HDMA_TEST 0x209UL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
#define HWRM_DBG_WRITE_DIRECT 0xff12UL
@@ -325,6 +336,8 @@ struct cmd_nums {
#define HWRM_DBG_FW_CLI 0xff1aUL
#define HWRM_DBG_I2C_CMD 0xff1bUL
#define HWRM_DBG_RING_INFO_GET 0xff1cUL
+ #define HWRM_DBG_CRASHDUMP_HEADER 0xff1dUL
+ #define HWRM_DBG_CRASHDUMP_ERASE 0xff1eUL
#define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
#define HWRM_NVM_VALIDATE_OPTION 0xffefUL
#define HWRM_NVM_FLUSH 0xfff0UL
@@ -350,23 +363,26 @@ struct cmd_nums {
/* ret_codes (size:64b/8B) */
struct ret_codes {
__le16 error_code;
- #define HWRM_ERR_CODE_SUCCESS 0x0UL
- #define HWRM_ERR_CODE_FAIL 0x1UL
- #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
- #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
- #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
- #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
- #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
- #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
- #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
- #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
- #define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL
- #define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL
- #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
- #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
- #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
- #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
- #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
+ #define HWRM_ERR_CODE_SUCCESS 0x0UL
+ #define HWRM_ERR_CODE_FAIL 0x1UL
+ #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
+ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
+ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
+ #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
+ #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
+ #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
+ #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL
+ #define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL
+ #define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL
+ #define HWRM_ERR_CODE_NO_FLOW_COUNTER_DURING_ALLOC 0xcUL
+ #define HWRM_ERR_CODE_KEY_HASH_COLLISION 0xdUL
+ #define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL
+ #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
+ #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
+ #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
+ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
+ #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
__le16 unused_0[3];
};
@@ -387,11 +403,15 @@ struct hwrm_err_output {
#define HW_HASH_INDEX_SIZE 0x80
#define HW_HASH_KEY_SIZE 40
#define HWRM_RESP_VALID_KEY 1
+#define HWRM_TARGET_ID_BONO 0xFFF8
+#define HWRM_TARGET_ID_KONG 0xFFF9
+#define HWRM_TARGET_ID_APE 0xFFFA
+#define HWRM_TARGET_ID_TOOLS 0xFFFD
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 0
-#define HWRM_VERSION_RSVD 47
-#define HWRM_VERSION_STR "1.10.0.47"
+#define HWRM_VERSION_RSVD 69
+#define HWRM_VERSION_STR "1.10.0.69"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -442,6 +462,7 @@ struct hwrm_ver_get_output {
#define VER_GET_RESP_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED 0x400UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
u8 roce_fw_maj_8b;
u8 roce_fw_min_8b;
u8 roce_fw_bld_8b;
@@ -449,7 +470,7 @@ struct hwrm_ver_get_output {
char hwrm_fw_name[16];
char mgmt_fw_name[16];
char netctrl_fw_name[16];
- u8 reserved2[16];
+ char active_pkg_name[16];
char roce_fw_name[16];
__le16 chip_num;
u8 chip_rev;
@@ -1047,6 +1068,7 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL
#define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL
#define FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED 0x1000000UL
u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
@@ -1715,7 +1737,7 @@ struct hwrm_func_backing_store_qcaps_output {
__le16 mrav_entry_size;
__le16 tim_entry_size;
__le32 tim_max_entries;
- u8 unused_0[2];
+ __le16 mrav_num_entries_units;
u8 tqm_entries_multiple;
u8 valid;
};
@@ -1728,7 +1750,8 @@ struct hwrm_func_backing_store_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL
__le32 enables;
#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
#define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
@@ -2580,7 +2603,7 @@ struct hwrm_port_phy_qcfg_output {
u8 valid;
};
-/* hwrm_port_mac_cfg_input (size:320b/40B) */
+/* hwrm_port_mac_cfg_input (size:384b/48B) */
struct hwrm_port_mac_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -2601,6 +2624,7 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_ONE_STEP_TX_TS 0x2000UL
__le32 enables;
#define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
#define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
@@ -2610,6 +2634,7 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
#define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
#define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
+ #define PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB 0x200UL
__le16 port_id;
u8 ipg;
u8 lpbk;
@@ -2642,6 +2667,8 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
#define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
u8 unused_0[3];
+ __s32 ptp_freq_adj_ppb;
+ u8 unused_1[4];
};
/* hwrm_port_mac_cfg_output (size:128b/16B) */
@@ -2680,8 +2707,9 @@ struct hwrm_port_mac_ptp_qcfg_output {
__le16 seq_id;
__le16 resp_len;
u8 flags;
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
+ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_ONE_STEP_TX_TS 0x4UL
u8 unused_0[3];
__le32 rx_ts_reg_off_lower;
__le32 rx_ts_reg_off_upper;
@@ -2888,7 +2916,7 @@ struct tx_port_stats_ext {
__le64 pfc_pri7_tx_transitions;
};
-/* rx_port_stats_ext (size:2368b/296B) */
+/* rx_port_stats_ext (size:2624b/328B) */
struct rx_port_stats_ext {
__le64 link_down_events;
__le64 continuous_pause_events;
@@ -2927,6 +2955,10 @@ struct rx_port_stats_ext {
__le64 pfc_pri6_rx_transitions;
__le64 pfc_pri7_rx_duration_us;
__le64 pfc_pri7_rx_transitions;
+ __le64 rx_bits;
+ __le64 rx_buffer_passed_threshold;
+ __le64 rx_pcs_symbol_err;
+ __le64 rx_corrected_bits;
};
/* hwrm_port_qstats_ext_input (size:320b/40B) */
@@ -3029,6 +3061,35 @@ struct hwrm_port_lpbk_clr_stats_output {
u8 valid;
};
+/* hwrm_port_ts_query_input (size:192b/24B) */
+struct hwrm_port_ts_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH 0x1UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_TX 0x0UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_RX 0x1UL
+ #define PORT_TS_QUERY_REQ_FLAGS_PATH_LAST PORT_TS_QUERY_REQ_FLAGS_PATH_RX
+ #define PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME 0x2UL
+ __le16 port_id;
+ u8 unused_0[2];
+};
+
+/* hwrm_port_ts_query_output (size:192b/24B) */
+struct hwrm_port_ts_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le64 ptp_msg_ts;
+ __le16 ptp_msg_seqid;
+ u8 unused_0[5];
+ u8 valid;
+};
+
/* hwrm_port_phy_qcaps_input (size:192b/24B) */
struct hwrm_port_phy_qcaps_input {
__le16 req_type;
@@ -4703,7 +4764,8 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
#define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
#define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
- u8 unused_1[7];
+ __le16 max_aggs_supported;
+ u8 unused_1[5];
u8 valid;
};
@@ -4723,6 +4785,7 @@ struct hwrm_vnic_tpa_cfg_input {
#define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
#define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
#define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
+ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_PACK_AS_GRO 0x100UL
__le32 enables;
#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
@@ -5254,6 +5317,8 @@ struct hwrm_cfa_l2_filter_alloc_input {
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 4)
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 4)
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_XDP_DISABLE 0x40UL
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_SOURCE_VALID 0x80UL
__le32 enables;
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
@@ -5272,8 +5337,11 @@ struct hwrm_cfa_l2_filter_alloc_input {
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
#define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_NUM_VLANS 0x20000UL
+ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_NUM_VLANS 0x40000UL
u8 l2_addr[6];
- u8 unused_0[2];
+ u8 num_vlans;
+ u8 t_num_vlans;
u8 l2_addr_mask[6];
__le16 l2_ovlan;
__le16 l2_ovlan_mask;
@@ -5338,6 +5406,16 @@ struct hwrm_cfa_l2_filter_alloc_output {
__le16 resp_len;
__le64 l2_filter_id;
__le32 flow_id;
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_L2_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
u8 unused_0[3];
u8 valid;
};
@@ -5504,6 +5582,16 @@ struct hwrm_cfa_tunnel_filter_alloc_output {
__le16 resp_len;
__le64 tunnel_filter_id;
__le32 flow_id;
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_TUNNEL_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
u8 unused_0[3];
u8 valid;
};
@@ -5646,7 +5734,7 @@ struct hwrm_cfa_encap_record_free_output {
u8 valid;
};
-/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */
+/* hwrm_cfa_ntuple_filter_alloc_input (size:1088b/136B) */
struct hwrm_cfa_ntuple_filter_alloc_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -5678,6 +5766,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_RFS_RING_TBL_IDX 0x80000UL
__le64 l2_filter_id;
u8 src_macaddr[6];
__be16 ethertype;
@@ -5725,6 +5814,8 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
__be16 dst_port;
__be16 dst_port_mask;
__le64 ntuple_filter_id_hint;
+ __le16 rfs_ring_tbl_idx;
+ u8 unused_0[6];
};
/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
@@ -5735,6 +5826,16 @@ struct hwrm_cfa_ntuple_filter_alloc_output {
__le16 resp_len;
__le64 ntuple_filter_id;
__le32 flow_id;
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_NTUPLE_FILTER_ALLOC_RESP_FLOW_ID_DIR_TX
u8 unused_0[3];
u8 valid;
};
@@ -5934,19 +6035,20 @@ struct hwrm_cfa_flow_alloc_input {
__le16 src_fid;
__le32 tunnel_handle;
__le16 action_flags;
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FLOW_AGING_ENABLED 0x800UL
- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_PRI_HINT 0x1000UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FLOW_AGING_ENABLED 0x800UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_PRI_HINT 0x1000UL
+ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NO_FLOW_COUNTER_ALLOC 0x2000UL
__le16 dst_fid;
__be16 l2_rewrite_vlan_tpid;
__be16 l2_rewrite_vlan_tci;
@@ -5997,6 +6099,16 @@ struct hwrm_cfa_flow_alloc_output {
__le16 flow_handle;
u8 unused_0[2];
__le32 flow_id;
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_MASK 0x3fffffffUL
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_VALUE_SFT 0
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE 0x40000000UL
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_INT (0x0UL << 30)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT (0x1UL << 30)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_TYPE_EXT
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR 0x80000000UL
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_RX (0x0UL << 31)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX (0x1UL << 31)
+ #define CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_LAST CFA_FLOW_ALLOC_RESP_FLOW_ID_DIR_TX
__le64 ext_flow_handle;
__le32 flow_counter_id;
u8 unused_1[3];
@@ -6011,7 +6123,8 @@ struct hwrm_cfa_flow_free_input {
__le16 target_id;
__le64 resp_addr;
__le16 flow_handle;
- u8 unused_0[6];
+ __le16 unused_0;
+ __le32 flow_counter_id;
__le64 ext_flow_handle;
};
@@ -6199,8 +6312,10 @@ struct hwrm_cfa_eem_qcaps_output {
__le16 seq_id;
__le16 resp_len;
__le32 flags;
- #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_TX 0x1UL
- #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_TX 0x1UL
+ #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_RX 0x2UL
+ #define CFA_EEM_QCAPS_RESP_FLAGS_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x4UL
+ #define CFA_EEM_QCAPS_RESP_FLAGS_DETACHED_CENTRALIZED_MEMORY_MODEL_SUPPORTED 0x8UL
__le32 unused_0;
__le32 supported;
#define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY0_TABLE 0x1UL
@@ -6226,7 +6341,9 @@ struct hwrm_cfa_eem_cfg_input {
#define CFA_EEM_CFG_REQ_FLAGS_PATH_TX 0x1UL
#define CFA_EEM_CFG_REQ_FLAGS_PATH_RX 0x2UL
#define CFA_EEM_CFG_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL
- __le32 unused_0;
+ #define CFA_EEM_CFG_REQ_FLAGS_SECONDARY_PF 0x8UL
+ __le16 group_id;
+ __le16 unused_0;
__le32 num_entries;
__le32 unused_1;
__le16 key0_ctx_id;
@@ -6258,7 +6375,7 @@ struct hwrm_cfa_eem_qcfg_input {
__le32 unused_0;
};
-/* hwrm_cfa_eem_qcfg_output (size:128b/16B) */
+/* hwrm_cfa_eem_qcfg_output (size:192b/24B) */
struct hwrm_cfa_eem_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -6269,6 +6386,8 @@ struct hwrm_cfa_eem_qcfg_output {
#define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL
#define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL
__le32 num_entries;
+ u8 unused_0[7];
+ u8 valid;
};
/* hwrm_cfa_eem_op_input (size:192b/24B) */
@@ -6300,6 +6419,39 @@ struct hwrm_cfa_eem_op_output {
u8 valid;
};
+/* hwrm_cfa_adv_flow_mgnt_qcaps_input (size:256b/32B) */
+struct hwrm_cfa_adv_flow_mgnt_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 unused_0[4];
+};
+
+/* hwrm_cfa_adv_flow_mgnt_qcaps_output (size:128b/16B) */
+struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 flags;
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
+ u8 unused_0[3];
+ u8 valid;
+};
+
/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_query_input {
__le16 req_type;
@@ -6636,7 +6788,8 @@ struct hwrm_fw_qstatus_output {
#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
- #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER 0x3UL
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER
u8 unused_0[6];
u8 valid;
};
@@ -6659,8 +6812,8 @@ struct hwrm_fw_set_time_input {
u8 unused_0;
__le16 millisecond;
__le16 zone;
- #define FW_SET_TIME_REQ_ZONE_UTC 0x0UL
- #define FW_SET_TIME_REQ_ZONE_UNKNOWN 0xffffUL
+ #define FW_SET_TIME_REQ_ZONE_UTC 0
+ #define FW_SET_TIME_REQ_ZONE_UNKNOWN 65535
#define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN
u8 unused_1[4];
};
@@ -7064,7 +7217,9 @@ struct hwrm_dbg_coredump_list_input {
__le64 host_dest_addr;
__le32 host_buf_len;
__le16 seq_no;
- u8 unused_0[2];
+ u8 flags;
+ #define DBG_COREDUMP_LIST_REQ_FLAGS_CRASHDUMP 0x1UL
+ u8 unused_0[1];
};
/* hwrm_dbg_coredump_list_output (size:128b/16B) */
@@ -7392,7 +7547,9 @@ struct hwrm_nvm_get_dev_info_output {
__le32 nvram_size;
__le32 reserved_size;
__le32 available_size;
- u8 unused_0[3];
+ u8 nvm_cfg_ver_maj;
+ u8 nvm_cfg_ver_min;
+ u8 nvm_cfg_ver_upd;
u8 valid;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 2bdd2da9aac7..f760921389a3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -406,26 +406,6 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
dev->min_mtu = ETH_ZLEN;
}
-static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
-{
- struct pci_dev *pdev = bp->pdev;
- int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
- u32 dw;
-
- if (!pos) {
- netdev_info(bp->dev, "Unable do read adapter's DSN");
- return -EOPNOTSUPP;
- }
-
- /* DSN (two dw) is at an offset of 4 from the cap pos */
- pos += 4;
- pci_read_config_dword(pdev, pos, &dw);
- put_unaligned_le32(dw, &dsn[0]);
- pci_read_config_dword(pdev, pos + 4, &dw);
- put_unaligned_le32(dw, &dsn[4]);
- return 0;
-}
-
static int bnxt_vf_reps_create(struct bnxt *bp)
{
u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev);
@@ -490,11 +470,6 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
}
}
- /* Read the adapter's DSN to use as the eswitch switch_id */
- rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
- if (rc)
- goto err;
-
/* publish cfa_code_map only after all VF-reps have been initialized */
bp->cfa_code_map = cfa_code_map;
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 510dfc1c236b..57dc3cbff36e 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -4038,15 +4038,14 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
case L5CM_RAMROD_CMD_ID_CLOSE: {
struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
- if (l4kcqe->status != 0 || l5kcqe->completion_status != 0) {
- netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
- l4kcqe->status, l5kcqe->completion_status);
- opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
- /* Fall through */
- } else {
+ if (l4kcqe->status == 0 && l5kcqe->completion_status == 0)
break;
- }
+
+ netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
+ l4kcqe->status, l5kcqe->completion_status);
+ opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
}
+ /* Fall through */
case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 983245c0867c..374b9ff05c88 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1665,7 +1665,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
netif_tx_stop_queue(txq);
- if (!skb->xmit_more || netif_xmit_stopped(txq))
+ if (!netdev_xmit_more() || netif_xmit_stopped(txq))
/* Packets are ready, update producer index */
bcmgenet_tdma_ring_writel(priv, ring->index,
ring->prod_index, TDMA_PROD_INDEX);
@@ -3476,7 +3476,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (dn) {
macaddr = of_get_mac_address(dn);
- if (!macaddr) {
+ if (IS_ERR(macaddr)) {
dev_err(&pdev->dev, "can't find MAC address\n");
err = -EINVAL;
goto err;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 2aebd4bbb67d..6d1f9c822548 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8151,7 +8151,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_wake_queue(txq);
}
- if (!skb->xmit_more || netif_xmit_stopped(txq)) {
+ if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
/* Packets are ready, update Tx producer idx on card. */
tw32_tx_mbox(tnapi->prodmbox, entry);
}
@@ -12757,9 +12757,6 @@ static int tg3_set_phys_id(struct net_device *dev,
{
struct tg3 *tp = netdev_priv(dev);
- if (!netif_running(tp->dev))
- return -EAGAIN;
-
switch (state) {
case ETHTOOL_ID_ACTIVE:
return 1; /* cycle on/off once per second */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 3da2795e2486..c049410bc888 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -285,34 +285,22 @@ static void macb_set_hwaddr(struct macb *bp)
static void macb_get_hwaddr(struct macb *bp)
{
- struct macb_platform_data *pdata;
u32 bottom;
u16 top;
u8 addr[6];
int i;
- pdata = dev_get_platdata(&bp->pdev->dev);
-
/* Check all 4 address register for valid address */
for (i = 0; i < 4; i++) {
bottom = macb_or_gem_readl(bp, SA1B + i * 8);
top = macb_or_gem_readl(bp, SA1T + i * 8);
- if (pdata && pdata->rev_eth_addr) {
- addr[5] = bottom & 0xff;
- addr[4] = (bottom >> 8) & 0xff;
- addr[3] = (bottom >> 16) & 0xff;
- addr[2] = (bottom >> 24) & 0xff;
- addr[1] = top & 0xff;
- addr[0] = (top & 0xff00) >> 8;
- } else {
- addr[0] = bottom & 0xff;
- addr[1] = (bottom >> 8) & 0xff;
- addr[2] = (bottom >> 16) & 0xff;
- addr[3] = (bottom >> 24) & 0xff;
- addr[4] = top & 0xff;
- addr[5] = (top >> 8) & 0xff;
- }
+ addr[0] = bottom & 0xff;
+ addr[1] = (bottom >> 8) & 0xff;
+ addr[2] = (bottom >> 16) & 0xff;
+ addr[3] = (bottom >> 24) & 0xff;
+ addr[4] = top & 0xff;
+ addr[5] = (top >> 8) & 0xff;
if (is_valid_ether_addr(addr)) {
memcpy(bp->dev->dev_addr, addr, sizeof(addr));
@@ -510,12 +498,10 @@ static void macb_handle_link_change(struct net_device *dev)
static int macb_mii_probe(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
- struct macb_platform_data *pdata;
struct phy_device *phydev;
struct device_node *np;
- int phy_irq, ret, i;
+ int ret, i;
- pdata = dev_get_platdata(&bp->pdev->dev);
np = bp->pdev->dev.of_node;
ret = 0;
@@ -530,8 +516,6 @@ static int macb_mii_probe(struct net_device *dev)
*/
if (!bp->phy_node && !phy_find_first(bp->mii_bus)) {
for (i = 0; i < PHY_MAX_ADDR; i++) {
- struct phy_device *phydev;
-
phydev = mdiobus_scan(bp->mii_bus, i);
if (IS_ERR(phydev) &&
PTR_ERR(phydev) != -ENODEV) {
@@ -559,19 +543,6 @@ static int macb_mii_probe(struct net_device *dev)
return -ENXIO;
}
- if (pdata) {
- if (gpio_is_valid(pdata->phy_irq_pin)) {
- ret = devm_gpio_request(&bp->pdev->dev,
- pdata->phy_irq_pin, "phy int");
- if (!ret) {
- phy_irq = gpio_to_irq(pdata->phy_irq_pin);
- phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
- }
- } else {
- phydev->irq = PHY_POLL;
- }
- }
-
/* attach the mac to the phy */
ret = phy_connect_direct(dev, phydev, &macb_handle_link_change,
bp->phy_interface);
@@ -600,7 +571,6 @@ static int macb_mii_probe(struct net_device *dev)
static int macb_mii_init(struct macb *bp)
{
- struct macb_platform_data *pdata;
struct device_node *np;
int err = -ENXIO;
@@ -620,7 +590,6 @@ static int macb_mii_init(struct macb *bp)
bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
bp->mii_bus->parent = &bp->pdev->dev;
- pdata = dev_get_platdata(&bp->pdev->dev);
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
@@ -634,9 +603,6 @@ static int macb_mii_init(struct macb *bp)
err = mdiobus_register(bp->mii_bus);
} else {
- if (pdata)
- bp->mii_bus->phy_mask = pdata->phy_mask;
-
err = of_mdiobus_register(bp->mii_bus, np);
}
@@ -2461,12 +2427,12 @@ static int macb_open(struct net_device *dev)
goto pm_exit;
}
- bp->macbgem_ops.mog_init_rings(bp);
- macb_init_hw(bp);
-
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
napi_enable(&queue->napi);
+ bp->macbgem_ops.mog_init_rings(bp);
+ macb_init_hw(bp);
+
/* schedule a link state check */
phy_start(dev->phydev);
@@ -4052,7 +4018,6 @@ static int macb_probe(struct platform_device *pdev)
struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
struct clk *tsu_clk = NULL;
unsigned int queue_mask, num_queues;
- struct macb_platform_data *pdata;
bool native_io;
struct phy_device *phydev;
struct net_device *dev;
@@ -4172,27 +4137,21 @@ static int macb_probe(struct platform_device *pdev)
bp->rx_intr_mask |= MACB_BIT(RXUBR);
mac = of_get_mac_address(np);
- if (mac) {
+ if (PTR_ERR(mac) == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto err_out_free_netdev;
+ } else if (!IS_ERR(mac)) {
ether_addr_copy(bp->dev->dev_addr, mac);
} else {
- err = nvmem_get_mac_address(&pdev->dev, bp->dev->dev_addr);
- if (err) {
- if (err == -EPROBE_DEFER)
- goto err_out_free_netdev;
- macb_get_hwaddr(bp);
- }
+ macb_get_hwaddr(bp);
}
err = of_get_phy_mode(np);
- if (err < 0) {
- pdata = dev_get_platdata(&pdev->dev);
- if (pdata && pdata->is_rmii)
- bp->phy_interface = PHY_INTERFACE_MODE_RMII;
- else
- bp->phy_interface = PHY_INTERFACE_MODE_MII;
- } else {
+ if (err < 0)
+ /* not found in DT, MII by default */
+ bp->phy_interface = PHY_INTERFACE_MODE_MII;
+ else
bp->phy_interface = err;
- }
/* IP specific init */
err = init(pdev);
@@ -4362,8 +4321,7 @@ static int __maybe_unused macb_resume(struct device *dev)
static int __maybe_unused macb_runtime_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *netdev = platform_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
if (!(device_may_wakeup(&bp->dev->dev))) {
@@ -4379,8 +4337,7 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev)
static int __maybe_unused macb_runtime_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *netdev = platform_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
if (!(device_may_wakeup(&bp->dev->dev))) {
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 6650e2a5f171..7612ab6b286d 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -68,6 +68,7 @@ config LIQUIDIO
imply PTP_1588_CLOCK
select FW_LOADER
select LIBCRC32C
+ select NET_DEVLINK
---help---
This driver supports Cavium LiquidIO Intelligent Server Adapters
based on CN66XX, CN68XX and CN23XX chips.
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index fb6f813cff65..eab805579f96 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -2522,7 +2522,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
}
- xmit_more = skb->xmit_more;
+ xmit_more = netdev_xmit_more();
if (unlikely(cmdsetup.s.timestamp))
status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 54b245797d2e..db0b90555acb 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -1585,7 +1585,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
}
- xmit_more = skb->xmit_more;
+ xmit_more = netdev_xmit_more();
if (unlikely(cmdsetup.s.timestamp))
status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 5359c1021f42..15b1130aa4ae 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1503,7 +1503,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
mac = of_get_mac_address(pdev->dev.of_node);
- if (mac)
+ if (!IS_ERR(mac))
memcpy(netdev->dev_addr, mac, ETH_ALEN);
else
eth_hw_addr_random(netdev);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 673c57b8023f..a65be851124f 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -962,13 +962,13 @@ static void bgx_poll_for_sgmii_link(struct lmac *lmac)
lmac->last_duplex = (an_result >> 1) & 0x1;
switch (speed) {
case 0:
- lmac->last_speed = 10;
+ lmac->last_speed = SPEED_10;
break;
case 1:
- lmac->last_speed = 100;
+ lmac->last_speed = SPEED_100;
break;
case 2:
- lmac->last_speed = 1000;
+ lmac->last_speed = SPEED_1000;
break;
default:
lmac->link_up = false;
@@ -1012,10 +1012,10 @@ static void bgx_poll_for_link(struct work_struct *work)
!(smu_link & SMU_RX_CTL_STATUS)) {
lmac->link_up = 1;
if (lmac->lmac_type == BGX_MODE_XLAUI)
- lmac->last_speed = 40000;
+ lmac->last_speed = SPEED_40000;
else
- lmac->last_speed = 10000;
- lmac->last_duplex = 1;
+ lmac->last_speed = SPEED_10000;
+ lmac->last_duplex = DUPLEX_FULL;
} else {
lmac->link_up = 0;
lmac->last_speed = SPEED_UNKNOWN;
@@ -1105,8 +1105,8 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
} else {
/* Default to below link speed and duplex */
lmac->link_up = true;
- lmac->last_speed = 1000;
- lmac->last_duplex = 1;
+ lmac->last_speed = SPEED_1000;
+ lmac->last_duplex = DUPLEX_FULL;
bgx_sgmii_change_link_state(lmac);
return 0;
}
@@ -1484,7 +1484,7 @@ static int bgx_init_of_phy(struct bgx *bgx)
break;
mac = of_get_mac_address(node);
- if (mac)
+ if (!IS_ERR(mac))
ether_addr_copy(bgx->lmac[lmac].mac, mac);
SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 0e9182d3f02c..b3e4118a15e7 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -443,9 +443,9 @@ found:
struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
{
struct l2t_data *d;
- int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
+ int i;
- d = kvzalloc(size, GFP_KERNEL);
+ d = kvzalloc(struct_size(d, l2tab, l2t_capacity), GFP_KERNEL);
if (!d)
return NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index c2fd323c4078..ea75f275023f 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -75,8 +75,8 @@ struct l2t_data {
struct l2t_entry *rover; /* starting point for next allocation */
atomic_t nfree; /* number of free entries */
rwlock_t lock;
- struct l2t_entry l2tab[0];
struct rcu_head rcu_head; /* to handle rcu cleanup */
+ struct l2t_entry l2tab[];
};
typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 956219c178e1..a8fe0808823d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1575,9 +1575,11 @@ int t4_slow_intr_handler(struct adapter *adapter);
int t4_wait_dev_ready(void __iomem *regs);
+fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port,
+ struct link_config *lc);
int t4_link_l1cfg_core(struct adapter *adap, unsigned int mbox,
unsigned int port, struct link_config *lc,
- bool sleep_ok, int timeout);
+ u8 sleep_ok, int timeout);
static inline int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox,
unsigned int port, struct link_config *lc)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index bec4711005cc..9e589302af90 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -442,7 +442,7 @@ static unsigned int speed_to_fw_caps(int speed)
* Link Mode Mask.
*/
static void fw_caps_to_lmm(enum fw_port_type port_type,
- unsigned int fw_caps,
+ fw_port_cap32_t fw_caps,
unsigned long *link_mode_mask)
{
#define SET_LMM(__lmm_name) \
@@ -632,7 +632,10 @@ static int get_link_ksettings(struct net_device *dev,
fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps,
link_ksettings->link_modes.supported);
- fw_caps_to_lmm(pi->port_type, pi->link_cfg.acaps,
+ fw_caps_to_lmm(pi->port_type,
+ t4_link_acaps(pi->adapter,
+ pi->lport,
+ &pi->link_cfg),
link_ksettings->link_modes.advertising);
fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
link_ksettings->link_modes.lp_advertising);
@@ -642,22 +645,6 @@ static int get_link_ksettings(struct net_device *dev,
: SPEED_UNKNOWN);
base->duplex = DUPLEX_FULL;
- if (pi->link_cfg.fc & PAUSE_RX) {
- if (pi->link_cfg.fc & PAUSE_TX) {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Pause);
- } else {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Asym_Pause);
- }
- } else if (pi->link_cfg.fc & PAUSE_TX) {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Asym_Pause);
- }
-
base->autoneg = pi->link_cfg.autoneg;
if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
ethtool_link_ksettings_add_link_mode(link_ksettings,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 5afb43000049..4107007b6ec4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -524,8 +524,7 @@ static int del_filter_wr(struct adapter *adapter, int fidx)
return -ENOMEM;
fwr = __skb_put(skb, len);
- t4_mk_filtdelwr(f->tid, fwr, (adapter->flags & CXGB4_SHUTTING_DOWN) ? -1
- : adapter->sge.fw_evtq.abs_id);
+ t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
/* Mark the filter as "pending" and ship off the Filter Work Request.
* When we get the Work Request Reply we'll clear the pending status.
@@ -744,16 +743,40 @@ void clear_filter(struct adapter *adap, struct filter_entry *f)
void clear_all_filters(struct adapter *adapter)
{
+ struct net_device *dev = adapter->port[0];
unsigned int i;
if (adapter->tids.ftid_tab) {
struct filter_entry *f = &adapter->tids.ftid_tab[0];
unsigned int max_ftid = adapter->tids.nftids +
adapter->tids.nsftids;
-
+ /* Clear all TCAM filters */
for (i = 0; i < max_ftid; i++, f++)
if (f->valid || f->pending)
- clear_filter(adapter, f);
+ cxgb4_del_filter(dev, i, &f->fs);
+ }
+
+ /* Clear all hash filters */
+ if (is_hashfilter(adapter) && adapter->tids.tid_tab) {
+ struct filter_entry *f;
+ unsigned int sb;
+
+ for (i = adapter->tids.hash_base;
+ i <= adapter->tids.ntids; i++) {
+ f = (struct filter_entry *)
+ adapter->tids.tid_tab[i];
+
+ if (f && (f->valid || f->pending))
+ cxgb4_del_filter(dev, i, &f->fs);
+ }
+
+ sb = t4_read_reg(adapter, LE_DB_SRVR_START_INDEX_A);
+ for (i = 0; i < sb; i++) {
+ f = (struct filter_entry *)adapter->tids.tid_tab[i];
+
+ if (f && (f->valid || f->pending))
+ cxgb4_del_filter(dev, i, &f->fs);
+ }
}
}
@@ -1568,9 +1591,8 @@ int cxgb4_del_filter(struct net_device *dev, int filter_id,
struct filter_ctx ctx;
int ret;
- /* If we are shutting down the adapter do not wait for completion */
if (netdev2adap(dev)->flags & CXGB4_SHUTTING_DOWN)
- return __cxgb4_del_filter(dev, filter_id, fs, NULL);
+ return 0;
init_completion(&ctx.completion);
@@ -1722,12 +1744,13 @@ void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
break;
default:
- dev_err(adap->pdev_dev, "%s: filter creation PROBLEM; status = %u\n",
- __func__, status);
+ if (status != CPL_ERR_TCAM_FULL)
+ dev_err(adap->pdev_dev, "%s: filter creation PROBLEM; status = %u\n",
+ __func__, status);
if (ctx) {
if (status == CPL_ERR_TCAM_FULL)
- ctx->result = -EAGAIN;
+ ctx->result = -ENOSPC;
else
ctx->result = -EINVAL;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 89179e316687..715e4edcf4a2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -979,8 +979,7 @@ freeout:
}
static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
int txq;
@@ -1022,7 +1021,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
return txq;
}
- return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
+ return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
}
static int closest_timer(const struct sge *s, int time)
@@ -6025,6 +6024,11 @@ static void remove_one(struct pci_dev *pdev)
return;
}
+ /* If we allocated filters, free up state associated with any
+ * valid filters ...
+ */
+ clear_all_filters(adapter);
+
adapter->flags |= CXGB4_SHUTTING_DOWN;
if (adapter->pf == 4) {
@@ -6055,11 +6059,6 @@ static void remove_one(struct pci_dev *pdev)
if (IS_REACHABLE(CONFIG_THERMAL))
cxgb4_thermal_remove(adapter);
- /* If we allocated filters, free up state associated with any
- * valid filters ...
- */
- clear_all_filters(adapter);
-
if (adapter->flags & CXGB4_FULL_INIT_DONE)
cxgb_down(adapter);
@@ -6161,15 +6160,24 @@ static int __init cxgb4_init_module(void)
ret = pci_register_driver(&cxgb4_driver);
if (ret < 0)
- debugfs_remove(cxgb4_debugfs_root);
+ goto err_pci;
#if IS_ENABLED(CONFIG_IPV6)
if (!inet6addr_registered) {
- register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
- inet6addr_registered = true;
+ ret = register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
+ if (ret)
+ pci_unregister_driver(&cxgb4_driver);
+ else
+ inet6addr_registered = true;
}
#endif
+ if (ret == 0)
+ return ret;
+
+err_pci:
+ debugfs_remove(cxgb4_debugfs_root);
+
return ret;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 82a8d1970060..6e2d80008a79 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -687,11 +687,8 @@ int cxgb4_tc_flower_replace(struct net_device *dev,
ret = ctx.result;
/* Check if hw returned error for filter creation */
- if (ret) {
- netdev_err(dev, "%s: filter creation err %d\n",
- __func__, ret);
+ if (ret)
goto free_entry;
- }
ch_flower->tc_flower_cookie = cls->cookie;
ch_flower->filter_id = ctx.tid;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index a3544041ad32..f9b70be59792 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3964,6 +3964,14 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
}
}
+/* The ADVERT_MASK is used to mask out all of the Advertised Firmware Port
+ * Capabilities which we control with separate controls -- see, for instance,
+ * Pause Frames and Forward Error Correction. In order to determine what the
+ * full set of Advertised Port Capabilities are, the base Advertised Port
+ * Capabilities (masked by ADVERT_MASK) must be combined with the Advertised
+ * Port Capabilities associated with those other controls. See
+ * t4_link_acaps() for how this is done.
+ */
#define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
FW_PORT_CAP32_ANEG)
@@ -4061,6 +4069,9 @@ static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
/* Translate Common Code Pause specification into Firmware Port Capabilities */
static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
{
+ /* Translate orthogonal RX/TX Pause Controls for L1 Configure
+ * commands, etc.
+ */
fw_port_cap32_t fw_pause = 0;
if (cc_pause & PAUSE_RX)
@@ -4070,6 +4081,19 @@ static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
if (!(cc_pause & PAUSE_AUTONEG))
fw_pause |= FW_PORT_CAP32_FORCE_PAUSE;
+ /* Translate orthogonal Pause controls into IEEE 802.3 Pause,
+ * Asymetrical Pause for use in reporting to upper layer OS code, etc.
+ * Note that these bits are ignored in L1 Configure commands.
+ */
+ if (cc_pause & PAUSE_RX) {
+ if (cc_pause & PAUSE_TX)
+ fw_pause |= FW_PORT_CAP32_802_3_PAUSE;
+ else
+ fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
+ } else if (cc_pause & PAUSE_TX) {
+ fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
+ }
+
return fw_pause;
}
@@ -4100,31 +4124,22 @@ static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
}
/**
- * t4_link_l1cfg - apply link configuration to MAC/PHY
+ * t4_link_acaps - compute Link Advertised Port Capabilities
* @adapter: the adapter
- * @mbox: the Firmware Mailbox to use
* @port: the Port ID
* @lc: the Port's Link Configuration
- * @sleep_ok: if true we may sleep while awaiting command completion
- * @timeout: time to wait for command to finish before timing out
- * (negative implies @sleep_ok=false)
*
- * Set up a port's MAC and PHY according to a desired link configuration.
- * - If the PHY can auto-negotiate first decide what to advertise, then
- * enable/disable auto-negotiation as desired, and reset.
- * - If the PHY does not auto-negotiate just reset it.
- * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
- * otherwise do it later based on the outcome of auto-negotiation.
+ * Synthesize the Advertised Port Capabilities we'll be using based on
+ * the base Advertised Port Capabilities (which have been filtered by
+ * ADVERT_MASK) plus the individual controls for things like Pause
+ * Frames, Forward Error Correction, MDI, etc.
*/
-int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
- unsigned int port, struct link_config *lc,
- bool sleep_ok, int timeout)
+fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port,
+ struct link_config *lc)
{
- unsigned int fw_caps = adapter->params.fw_caps_support;
- fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap;
- struct fw_port_cmd cmd;
+ fw_port_cap32_t fw_fc, fw_fec, acaps;
unsigned int fw_mdi;
- int ret;
+ char cc_fec;
fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps);
@@ -4151,18 +4166,15 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
* init_link_config().
*/
if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
- if (lc->autoneg == AUTONEG_ENABLE)
- return -EINVAL;
-
- rcap = lc->acaps | fw_fc | fw_fec;
+ acaps = lc->acaps | fw_fc | fw_fec;
lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
lc->fec = cc_fec;
} else if (lc->autoneg == AUTONEG_DISABLE) {
- rcap = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
+ acaps = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
lc->fec = cc_fec;
} else {
- rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
+ acaps = lc->acaps | fw_fc | fw_fec | fw_mdi;
}
/* Some Requested Port Capabilities are trivially wrong if they exceed
@@ -4173,15 +4185,50 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
* we need to exclude this from this check in order to maintain
* compatibility ...
*/
- if ((rcap & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) {
- dev_err(adapter->pdev_dev,
- "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n",
- rcap, lc->pcaps);
+ if ((acaps & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) {
+ dev_err(adapter->pdev_dev, "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n",
+ acaps, lc->pcaps);
+ return -EINVAL;
+ }
+
+ return acaps;
+}
+
+/**
+ * t4_link_l1cfg_core - apply link configuration to MAC/PHY
+ * @adapter: the adapter
+ * @mbox: the Firmware Mailbox to use
+ * @port: the Port ID
+ * @lc: the Port's Link Configuration
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ * @timeout: time to wait for command to finish before timing out
+ * (negative implies @sleep_ok=false)
+ *
+ * Set up a port's MAC and PHY according to a desired link configuration.
+ * - If the PHY can auto-negotiate first decide what to advertise, then
+ * enable/disable auto-negotiation as desired, and reset.
+ * - If the PHY does not auto-negotiate just reset it.
+ * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+ * otherwise do it later based on the outcome of auto-negotiation.
+ */
+int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
+ unsigned int port, struct link_config *lc,
+ u8 sleep_ok, int timeout)
+{
+ unsigned int fw_caps = adapter->params.fw_caps_support;
+ struct fw_port_cmd cmd;
+ fw_port_cap32_t rcap;
+ int ret;
+
+ if (!(lc->pcaps & FW_PORT_CAP32_ANEG) &&
+ lc->autoneg == AUTONEG_ENABLE) {
return -EINVAL;
}
- /* And send that on to the Firmware ...
+ /* Compute our Requested Port Capabilities and send that on to the
+ * Firmware.
*/
+ rcap = t4_link_acaps(adapter, port, lc);
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
@@ -4211,7 +4258,7 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
rcap, -ret);
return ret;
}
- return ret;
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index 9125ddd89dd1..a02b1dff403e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,8 +36,8 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x16
-#define T4FW_VERSION_MICRO 0x09
+#define T4FW_VERSION_MINOR 0x17
+#define T4FW_VERSION_MICRO 0x03
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x16
-#define T5FW_VERSION_MICRO 0x09
+#define T5FW_VERSION_MINOR 0x17
+#define T5FW_VERSION_MICRO 0x03
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x16
-#define T6FW_VERSION_MICRO 0x09
+#define T6FW_VERSION_MINOR 0x17
+#define T6FW_VERSION_MICRO 0x03
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index adc4d481815b..6d4cf3d0b2f0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -518,8 +518,8 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
break;
}
cpl = (void *)p;
- /*FALLTHROUGH*/
}
+ /* Fall through */
case CPL_SGE_EGR_UPDATE: {
/*
@@ -1479,22 +1479,6 @@ static int cxgb4vf_get_link_ksettings(struct net_device *dev,
base->duplex = DUPLEX_UNKNOWN;
}
- if (pi->link_cfg.fc & PAUSE_RX) {
- if (pi->link_cfg.fc & PAUSE_TX) {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Pause);
- } else {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Asym_Pause);
- }
- } else if (pi->link_cfg.fc & PAUSE_TX) {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Asym_Pause);
- }
-
base->autoneg = pi->link_cfg.autoneg;
if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
ethtool_link_ksettings_add_link_mode(link_ksettings,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 84dff74ca9cd..8a389d617a23 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -313,7 +313,17 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
return ret;
}
+/* In the Physical Function Driver Common Code, the ADVERT_MASK is used to
+ * mask out bits in the Advertised Port Capabilities which are managed via
+ * separate controls, like Pause Frames and Forward Error Correction. In the
+ * Virtual Function Common Code, since we never perform L1 Configuration on
+ * the Link, the only things we really need to filter out are things which
+ * we decode and report separately like Speed.
+ */
#define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
+ FW_PORT_CAP32_802_3_PAUSE | \
+ FW_PORT_CAP32_802_3_ASM_DIR | \
+ FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M) | \
FW_PORT_CAP32_ANEG)
/**
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 733d9172425b..acb2856936d2 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -897,7 +897,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
netif_tx_stop_queue(txq);
skb_tx_timestamp(skb);
- if (!skb->xmit_more || netif_xmit_stopped(txq))
+ if (!netdev_xmit_more() || netif_xmit_stopped(txq))
vnic_wq_doorbell(wq);
spin_unlock(&enic->wq_lock[txq_map]);
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 949103db8a8a..9003eb6716cd 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1235,8 +1235,6 @@ static int gmac_start_xmit(struct sk_buff *skb, struct net_device *netdev)
int txq_num, nfrags;
union dma_rwptr rw;
- SKB_FRAG_ASSERT(skb);
-
if (skb->len >= 0x10000)
goto out_drop_free;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index c2586f44c29d..953ee5616801 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1412,7 +1412,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
pdata->flags |= DM9000_PLATF_NO_EEPROM;
mac_addr = of_get_mac_address(np);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
memcpy(pdata->dev_addr, mac_addr, sizeof(pdata->dev_addr));
return pdata;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3c7c04406a2b..e2f9fbced174 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1376,7 +1376,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
u16 q_idx = skb_get_queue_mapping(skb);
struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
struct be_wrb_params wrb_params = { 0 };
- bool flush = !skb->xmit_more;
+ bool flush = !netdev_xmit_more();
u16 wrb_cnt;
skb = be_xmit_workarounds(adapter, skb, &wrb_params);
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 0f3e7f21c6fa..71da0490521b 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1153,7 +1153,7 @@ static int ethoc_probe(struct platform_device *pdev)
const void *mac;
mac = of_get_mac_address(pdev->dev.of_node);
- if (mac)
+ if (!IS_ERR(mac))
ether_addr_copy(netdev->dev_addr, mac);
priv->phy_id = -1;
}
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 659f1ad37e96..b4ce26155087 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -616,7 +616,7 @@ static s32 nps_enet_probe(struct platform_device *pdev)
/* set kernel MAC address to dev */
mac_addr = of_get_mac_address(dev->of_node);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
ether_addr_copy(ndev->dev_addr, mac_addr);
else
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index dfebc30c4841..d3f2408dc9e8 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1648,7 +1648,7 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
qm_sg_entry_get_len(&sgt[0]), dma_dir);
/* remaining pages were mapped with skb_frag_dma_map() */
- for (i = 1; i < nr_frags; i++) {
+ for (i = 1; i <= nr_frags; i++) {
WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index dc339dc1adb2..63b1ecc18c26 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -435,7 +435,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
percpu_stats->rx_packets++;
percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
- napi_gro_receive(&ch->napi, skb);
+ list_add_tail(&skb->list, ch->rx_list);
return;
@@ -1113,12 +1113,16 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
struct dpaa2_eth_fq *fq, *txc_fq = NULL;
struct netdev_queue *nq;
int store_cleaned, work_done;
+ struct list_head rx_list;
int err;
ch = container_of(napi, struct dpaa2_eth_channel, napi);
ch->xdp.res = 0;
priv = ch->priv;
+ INIT_LIST_HEAD(&rx_list);
+ ch->rx_list = &rx_list;
+
do {
err = pull_channel(ch);
if (unlikely(err))
@@ -1162,6 +1166,8 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
work_done = max(rx_cleaned, 1);
out:
+ netif_receive_skb_list(ch->rx_list);
+
if (txc_fq && txc_fq->dq_frames) {
nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
netdev_tx_completed_queue(nq, txc_fq->dq_frames,
@@ -2565,10 +2571,12 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = {
.rxnfc_field = RXH_L2DA,
.cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_DA,
+ .id = DPAA2_ETH_DIST_ETHDST,
.size = 6,
}, {
.cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_SA,
+ .id = DPAA2_ETH_DIST_ETHSRC,
.size = 6,
}, {
/* This is the last ethertype field parsed:
@@ -2577,28 +2585,33 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = {
*/
.cls_prot = NET_PROT_ETH,
.cls_field = NH_FLD_ETH_TYPE,
+ .id = DPAA2_ETH_DIST_ETHTYPE,
.size = 2,
}, {
/* VLAN header */
.rxnfc_field = RXH_VLAN,
.cls_prot = NET_PROT_VLAN,
.cls_field = NH_FLD_VLAN_TCI,
+ .id = DPAA2_ETH_DIST_VLAN,
.size = 2,
}, {
/* IP header */
.rxnfc_field = RXH_IP_SRC,
.cls_prot = NET_PROT_IP,
.cls_field = NH_FLD_IP_SRC,
+ .id = DPAA2_ETH_DIST_IPSRC,
.size = 4,
}, {
.rxnfc_field = RXH_IP_DST,
.cls_prot = NET_PROT_IP,
.cls_field = NH_FLD_IP_DST,
+ .id = DPAA2_ETH_DIST_IPDST,
.size = 4,
}, {
.rxnfc_field = RXH_L3_PROTO,
.cls_prot = NET_PROT_IP,
.cls_field = NH_FLD_IP_PROTO,
+ .id = DPAA2_ETH_DIST_IPPROTO,
.size = 1,
}, {
/* Using UDP ports, this is functionally equivalent to raw
@@ -2607,11 +2620,13 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = {
.rxnfc_field = RXH_L4_B_0_1,
.cls_prot = NET_PROT_UDP,
.cls_field = NH_FLD_UDP_PORT_SRC,
+ .id = DPAA2_ETH_DIST_L4SRC,
.size = 2,
}, {
.rxnfc_field = RXH_L4_B_2_3,
.cls_prot = NET_PROT_UDP,
.cls_field = NH_FLD_UDP_PORT_DST,
+ .id = DPAA2_ETH_DIST_L4DST,
.size = 2,
},
};
@@ -2677,12 +2692,15 @@ static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
}
/* Size of the Rx flow classification key */
-int dpaa2_eth_cls_key_size(void)
+int dpaa2_eth_cls_key_size(u64 fields)
{
int i, size = 0;
- for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+ if (!(fields & dist_fields[i].id))
+ continue;
size += dist_fields[i].size;
+ }
return size;
}
@@ -2703,6 +2721,24 @@ int dpaa2_eth_cls_fld_off(int prot, int field)
return 0;
}
+/* Prune unused fields from the classification rule.
+ * Used when masking is not supported
+ */
+void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
+{
+ int off = 0, new_off = 0;
+ int i, size;
+
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
+ size = dist_fields[i].size;
+ if (dist_fields[i].id & fields) {
+ memcpy(key_mem + new_off, key_mem + off, size);
+ new_off += size;
+ }
+ off += size;
+ }
+}
+
/* Set Rx distribution (hash or flow classification) key
* flags is a combination of RXH_ bits
*/
@@ -2724,14 +2760,13 @@ static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
struct dpkg_extract *key =
&cls_cfg.extracts[cls_cfg.num_extracts];
- /* For Rx hashing key we set only the selected fields.
- * For Rx flow classification key we set all supported fields
+ /* For both Rx hashing and classification keys
+ * we set only the selected fields.
*/
- if (type == DPAA2_ETH_RX_DIST_HASH) {
- if (!(flags & dist_fields[i].rxnfc_field))
- continue;
+ if (!(flags & dist_fields[i].id))
+ continue;
+ if (type == DPAA2_ETH_RX_DIST_HASH)
rx_hash_fields |= dist_fields[i].rxnfc_field;
- }
if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
dev_err(dev, "error adding key extraction rule, too many rules?\n");
@@ -2786,16 +2821,28 @@ free_key:
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u64 key = 0;
+ int i;
if (!dpaa2_eth_hash_enabled(priv))
return -EOPNOTSUPP;
- return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, flags);
+ for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
+ if (dist_fields[i].rxnfc_field & flags)
+ key |= dist_fields[i].id;
+
+ return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
+}
+
+int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
+{
+ return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
}
-static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
+ int err;
/* Check if we actually support Rx flow classification */
if (dpaa2_eth_has_legacy_dist(priv)) {
@@ -2803,8 +2850,7 @@ static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
return -EOPNOTSUPP;
}
- if (priv->dpni_attrs.options & DPNI_OPT_NO_FS ||
- !(priv->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)) {
+ if (!dpaa2_eth_fs_enabled(priv)) {
dev_dbg(dev, "Rx cls disabled in DPNI options\n");
return -EOPNOTSUPP;
}
@@ -2814,9 +2860,21 @@ static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
return -EOPNOTSUPP;
}
+ /* If there is no support for masking in the classification table,
+ * we don't set a default key, as it will depend on the rules
+ * added by the user at runtime.
+ */
+ if (!dpaa2_eth_fs_mask_enabled(priv))
+ goto out;
+
+ err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
+ if (err)
+ return err;
+
+out:
priv->rx_cls_enabled = 1;
- return dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0);
+ return 0;
}
/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
@@ -2851,7 +2909,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
/* Configure the flow classification key; it includes all
* supported header fields and cannot be modified at runtime
*/
- err = dpaa2_eth_set_cls(priv);
+ err = dpaa2_eth_set_default_cls(priv);
if (err && err != -EOPNOTSUPP)
dev_err(dev, "Failed to configure Rx classification key\n");
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 7879622aa3e6..5fb8f5c0dc9f 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -334,6 +334,7 @@ struct dpaa2_eth_channel {
struct dpaa2_eth_ch_stats stats;
struct dpaa2_eth_ch_xdp xdp;
struct xdp_rxq_info xdp_rxq;
+ struct list_head *rx_list;
};
struct dpaa2_eth_dist_fields {
@@ -341,6 +342,7 @@ struct dpaa2_eth_dist_fields {
enum net_prot cls_prot;
int cls_field;
int size;
+ u64 id;
};
struct dpaa2_eth_cls_rule {
@@ -393,6 +395,7 @@ struct dpaa2_eth_priv {
/* enabled ethtool hashing bits */
u64 rx_hash_fields;
+ u64 rx_cls_fields;
struct dpaa2_eth_cls_rule *cls_rules;
u8 rx_cls_enabled;
struct bpf_prog *xdp_prog;
@@ -436,6 +439,12 @@ static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
(dpaa2_eth_cmp_dpni_ver((priv), DPNI_RX_DIST_KEY_VER_MAJOR, \
DPNI_RX_DIST_KEY_VER_MINOR) < 0)
+#define dpaa2_eth_fs_enabled(priv) \
+ (!((priv)->dpni_attrs.options & DPNI_OPT_NO_FS))
+
+#define dpaa2_eth_fs_mask_enabled(priv) \
+ ((priv)->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)
+
#define dpaa2_eth_fs_count(priv) \
((priv)->dpni_attrs.fs_entries)
@@ -448,6 +457,18 @@ enum dpaa2_eth_rx_dist {
DPAA2_ETH_RX_DIST_CLS
};
+/* Unique IDs for the supported Rx classification header fields */
+#define DPAA2_ETH_DIST_ETHDST BIT(0)
+#define DPAA2_ETH_DIST_ETHSRC BIT(1)
+#define DPAA2_ETH_DIST_ETHTYPE BIT(2)
+#define DPAA2_ETH_DIST_VLAN BIT(3)
+#define DPAA2_ETH_DIST_IPSRC BIT(4)
+#define DPAA2_ETH_DIST_IPDST BIT(5)
+#define DPAA2_ETH_DIST_IPPROTO BIT(6)
+#define DPAA2_ETH_DIST_L4SRC BIT(7)
+#define DPAA2_ETH_DIST_L4DST BIT(8)
+#define DPAA2_ETH_DIST_ALL (~0U)
+
static inline
unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
struct sk_buff *skb)
@@ -482,7 +503,9 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
}
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
-int dpaa2_eth_cls_key_size(void);
+int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key);
+int dpaa2_eth_cls_key_size(u64 key);
int dpaa2_eth_cls_fld_off(int prot, int field);
+void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields);
#endif /* __DPAA2_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 591dfcf76adb..76bd8d2872cc 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -264,7 +264,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
}
static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
- void *key, void *mask)
+ void *key, void *mask, u64 *fields)
{
int off;
@@ -272,18 +272,21 @@ static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
*(__be16 *)(key + off) = eth_value->h_proto;
*(__be16 *)(mask + off) = eth_mask->h_proto;
+ *fields |= DPAA2_ETH_DIST_ETHTYPE;
}
if (!is_zero_ether_addr(eth_mask->h_source)) {
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
ether_addr_copy(key + off, eth_value->h_source);
ether_addr_copy(mask + off, eth_mask->h_source);
+ *fields |= DPAA2_ETH_DIST_ETHSRC;
}
if (!is_zero_ether_addr(eth_mask->h_dest)) {
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
ether_addr_copy(key + off, eth_value->h_dest);
ether_addr_copy(mask + off, eth_mask->h_dest);
+ *fields |= DPAA2_ETH_DIST_ETHDST;
}
return 0;
@@ -291,7 +294,7 @@ static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
struct ethtool_usrip4_spec *uip_mask,
- void *key, void *mask)
+ void *key, void *mask, u64 *fields)
{
int off;
u32 tmp_value, tmp_mask;
@@ -303,18 +306,21 @@ static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
*(__be32 *)(key + off) = uip_value->ip4src;
*(__be32 *)(mask + off) = uip_mask->ip4src;
+ *fields |= DPAA2_ETH_DIST_IPSRC;
}
if (uip_mask->ip4dst) {
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
*(__be32 *)(key + off) = uip_value->ip4dst;
*(__be32 *)(mask + off) = uip_mask->ip4dst;
+ *fields |= DPAA2_ETH_DIST_IPDST;
}
if (uip_mask->proto) {
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
*(u8 *)(key + off) = uip_value->proto;
*(u8 *)(mask + off) = uip_mask->proto;
+ *fields |= DPAA2_ETH_DIST_IPPROTO;
}
if (uip_mask->l4_4_bytes) {
@@ -324,23 +330,26 @@ static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
*(__be16 *)(key + off) = htons(tmp_value >> 16);
*(__be16 *)(mask + off) = htons(tmp_mask >> 16);
+ *fields |= DPAA2_ETH_DIST_L4SRC;
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
*(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
*(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
+ *fields |= DPAA2_ETH_DIST_L4DST;
}
/* Only apply the rule for IPv4 frames */
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
*(__be16 *)(key + off) = htons(ETH_P_IP);
*(__be16 *)(mask + off) = htons(0xFFFF);
+ *fields |= DPAA2_ETH_DIST_ETHTYPE;
return 0;
}
static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
struct ethtool_tcpip4_spec *l4_mask,
- void *key, void *mask, u8 l4_proto)
+ void *key, void *mask, u8 l4_proto, u64 *fields)
{
int off;
@@ -351,41 +360,47 @@ static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
*(__be32 *)(key + off) = l4_value->ip4src;
*(__be32 *)(mask + off) = l4_mask->ip4src;
+ *fields |= DPAA2_ETH_DIST_IPSRC;
}
if (l4_mask->ip4dst) {
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
*(__be32 *)(key + off) = l4_value->ip4dst;
*(__be32 *)(mask + off) = l4_mask->ip4dst;
+ *fields |= DPAA2_ETH_DIST_IPDST;
}
if (l4_mask->psrc) {
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
*(__be16 *)(key + off) = l4_value->psrc;
*(__be16 *)(mask + off) = l4_mask->psrc;
+ *fields |= DPAA2_ETH_DIST_L4SRC;
}
if (l4_mask->pdst) {
off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
*(__be16 *)(key + off) = l4_value->pdst;
*(__be16 *)(mask + off) = l4_mask->pdst;
+ *fields |= DPAA2_ETH_DIST_L4DST;
}
/* Only apply the rule for IPv4 frames with the specified L4 proto */
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
*(__be16 *)(key + off) = htons(ETH_P_IP);
*(__be16 *)(mask + off) = htons(0xFFFF);
+ *fields |= DPAA2_ETH_DIST_ETHTYPE;
off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
*(u8 *)(key + off) = l4_proto;
*(u8 *)(mask + off) = 0xFF;
+ *fields |= DPAA2_ETH_DIST_IPPROTO;
return 0;
}
static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
struct ethtool_flow_ext *ext_mask,
- void *key, void *mask)
+ void *key, void *mask, u64 *fields)
{
int off;
@@ -396,6 +411,7 @@ static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
*(__be16 *)(key + off) = ext_value->vlan_tci;
*(__be16 *)(mask + off) = ext_mask->vlan_tci;
+ *fields |= DPAA2_ETH_DIST_VLAN;
}
return 0;
@@ -403,7 +419,7 @@ static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
struct ethtool_flow_ext *ext_mask,
- void *key, void *mask)
+ void *key, void *mask, u64 *fields)
{
int off;
@@ -411,36 +427,38 @@ static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
ether_addr_copy(key + off, ext_value->h_dest);
ether_addr_copy(mask + off, ext_mask->h_dest);
+ *fields |= DPAA2_ETH_DIST_ETHDST;
}
return 0;
}
-static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask)
+static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
+ u64 *fields)
{
int err;
switch (fs->flow_type & 0xFF) {
case ETHER_FLOW:
err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
- key, mask);
+ key, mask, fields);
break;
case IP_USER_FLOW:
err = prep_uip_rule(&fs->h_u.usr_ip4_spec,
- &fs->m_u.usr_ip4_spec, key, mask);
+ &fs->m_u.usr_ip4_spec, key, mask, fields);
break;
case TCP_V4_FLOW:
err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
- key, mask, IPPROTO_TCP);
+ key, mask, IPPROTO_TCP, fields);
break;
case UDP_V4_FLOW:
err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
- key, mask, IPPROTO_UDP);
+ key, mask, IPPROTO_UDP, fields);
break;
case SCTP_V4_FLOW:
err = prep_l4_rule(&fs->h_u.sctp_ip4_spec,
&fs->m_u.sctp_ip4_spec, key, mask,
- IPPROTO_SCTP);
+ IPPROTO_SCTP, fields);
break;
default:
return -EOPNOTSUPP;
@@ -450,13 +468,14 @@ static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask)
return err;
if (fs->flow_type & FLOW_EXT) {
- err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask);
+ err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
if (err)
return err;
}
if (fs->flow_type & FLOW_MAC_EXT) {
- err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask);
+ err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask,
+ fields);
if (err)
return err;
}
@@ -473,6 +492,7 @@ static int do_cls_rule(struct net_device *net_dev,
struct dpni_rule_cfg rule_cfg = { 0 };
struct dpni_fs_action_cfg fs_act = { 0 };
dma_addr_t key_iova;
+ u64 fields = 0;
void *key_buf;
int err;
@@ -480,7 +500,7 @@ static int do_cls_rule(struct net_device *net_dev,
fs->ring_cookie >= dpaa2_eth_queue_count(priv))
return -EINVAL;
- rule_cfg.key_size = dpaa2_eth_cls_key_size();
+ rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL);
/* allocate twice the key size, for the actual key and for mask */
key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
@@ -488,10 +508,36 @@ static int do_cls_rule(struct net_device *net_dev,
return -ENOMEM;
/* Fill the key and mask memory areas */
- err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size);
+ err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
if (err)
goto free_mem;
+ if (!dpaa2_eth_fs_mask_enabled(priv)) {
+ /* Masking allows us to configure a maximal key during init and
+ * use it for all flow steering rules. Without it, we include
+ * in the key only the fields actually used, so we need to
+ * extract the others from the final key buffer.
+ *
+ * Program the FS key if needed, or return error if previously
+ * set key can't be used for the current rule. User needs to
+ * delete existing rules in this case to allow for the new one.
+ */
+ if (!priv->rx_cls_fields) {
+ err = dpaa2_eth_set_cls(net_dev, fields);
+ if (err)
+ goto free_mem;
+
+ priv->rx_cls_fields = fields;
+ } else if (priv->rx_cls_fields != fields) {
+ netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n");
+ err = -EOPNOTSUPP;
+ goto free_mem;
+ }
+
+ dpaa2_eth_cls_trim_rule(key_buf, fields);
+ rule_cfg.key_size = dpaa2_eth_cls_key_size(fields);
+ }
+
key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, key_iova)) {
@@ -500,7 +546,8 @@ static int do_cls_rule(struct net_device *net_dev,
}
rule_cfg.key_iova = key_iova;
- rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
+ if (dpaa2_eth_fs_mask_enabled(priv))
+ rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
if (add) {
if (fs->ring_cookie == RX_CLS_FLOW_DISC)
@@ -522,6 +569,17 @@ free_mem:
return err;
}
+static int num_rules(struct dpaa2_eth_priv *priv)
+{
+ int i, rules = 0;
+
+ for (i = 0; i < dpaa2_eth_fs_count(priv); i++)
+ if (priv->cls_rules[i].in_use)
+ rules++;
+
+ return rules;
+}
+
static int update_cls_rule(struct net_device *net_dev,
struct ethtool_rx_flow_spec *new_fs,
int location)
@@ -545,6 +603,9 @@ static int update_cls_rule(struct net_device *net_dev,
return err;
rule->in_use = 0;
+
+ if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv))
+ priv->rx_cls_fields = 0;
}
/* If no new entry to add, return here */
@@ -581,9 +642,7 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
break;
case ETHTOOL_GRXCLSRLCNT:
rxnfc->rule_cnt = 0;
- for (i = 0; i < max_rules; i++)
- if (priv->cls_rules[i].in_use)
- rxnfc->rule_cnt++;
+ rxnfc->rule_cnt = num_rules(priv);
rxnfc->data = max_rules;
break;
case ETHTOOL_GRXCLSRULE:
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index a96ad20ee484..aa7d4e27c5d1 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1655,7 +1655,7 @@ static void fec_get_mac(struct net_device *ndev)
struct device_node *np = fep->pdev->dev.of_node;
if (np) {
const char *mac = of_get_mac_address(np);
- if (mac)
+ if (!IS_ERR(mac))
iap = (unsigned char *) mac;
}
}
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index c1968b3ecec8..7b7e526869a7 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -902,7 +902,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
* First try to read MAC address from DT
*/
mac_addr = of_get_mac_address(np);
- if (mac_addr) {
+ if (!IS_ERR(mac_addr)) {
memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
} else {
struct mpc52xx_fec __iomem *fec = priv->fec;
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 3c21486c6c84..9cd2c28d17df 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -724,7 +724,7 @@ static int mac_probe(struct platform_device *_of_dev)
/* Get the MAC address */
mac_addr = of_get_mac_address(mac_node);
- if (!mac_addr) {
+ if (IS_ERR(mac_addr)) {
dev_err(dev, "of_get_mac_address(%pOF) failed\n", mac_node);
err = -EINVAL;
goto _return_of_get_parent;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 7c548ed535da..90ea7a115d0f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1014,7 +1014,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
spin_lock_init(&fep->tx_lock);
mac_addr = of_get_mac_address(ofdev->dev.of_node);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
ret = fep->ops->allocate_bd(ndev);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 45fcc96be90e..df13c693b038 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -872,7 +872,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
mac_addr = of_get_mac_address(np);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
if (model && !strcasecmp(model, "TSEC"))
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index eb3e65e8868f..216e99af2b5a 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3910,7 +3910,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
}
mac_addr = of_get_mac_address(np);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
ugeth->ug_info = ug_info;
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 0beee2cc2ddd..722b6de24816 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -252,14 +252,12 @@ uec_set_ringparam(struct net_device *netdev,
return -EINVAL;
}
+ if (netif_running(netdev))
+ return -EBUSY;
+
ug_info->bdRingLenRx[queue] = ring->rx_pending;
ug_info->bdRingLenTx[queue] = ring->tx_pending;
- if (netif_running(netdev)) {
- /* FIXME: restart automatically */
- netdev_info(netdev, "Please re-open the interface\n");
- }
-
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 2c2808830e95..96c32ae320b0 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -870,7 +870,7 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
phy_modes(phy->interface));
mac_addr = of_get_mac_address(node);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
ether_addr_copy(ndev->dev_addr, mac_addr);
if (!is_valid_ether_addr(ndev->dev_addr)) {
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index e5d853b7b454..b1cb58f0aaf6 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1229,7 +1229,7 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
}
mac_addr = of_get_mac_address(node);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
ether_addr_copy(ndev->dev_addr, mac_addr);
if (!is_valid_ether_addr(ndev->dev_addr)) {
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 61eea6ac846f..e05d2095d09b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -2769,7 +2769,7 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
struct hns_mac_cb *mac_cb;
u8 addr[ETH_ALEN] = {0};
u8 port_num;
- u16 mskid;
+ int mskid;
/* promisc use vague table match with vlanid = 0 & macaddr = 0 */
hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 4cd86ba1f050..65b985acae38 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -598,7 +598,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
} else {
ring->stats.seg_pkt_cnt++;
- pull_len = eth_get_headlen(va, HNS_RX_HEAD_SIZE);
+ pull_len = eth_get_headlen(ndev, va, HNS_RX_HEAD_SIZE);
memcpy(__skb_put(skb, pull_len), va,
ALIGN(pull_len, sizeof(long)));
@@ -1962,8 +1962,7 @@ static void hns_nic_get_stats64(struct net_device *ndev,
static u16
hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
struct hns_nic_priv *priv = netdev_priv(ndev);
@@ -1973,7 +1972,7 @@ hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
is_multicast_ether_addr(eth_hdr->h_dest))
return 0;
else
- return fallback(ndev, skb, NULL);
+ return netdev_pick_tx(ndev, skb, NULL);
}
static const struct net_device_ops hns_nic_netdev_ops = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 299b277bc7ae..83e19c6b974e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -43,6 +43,8 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */
HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */
HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */
+ HLCGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */
+ HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
};
@@ -62,6 +64,8 @@ enum hclge_mbx_vlan_cfg_subcode {
HCLGE_MBX_VLAN_FILTER = 0, /* set vlan filter */
HCLGE_MBX_VLAN_TX_OFF_CFG, /* set tx side vlan offload */
HCLGE_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */
+ HCLGE_MBX_PORT_BASE_VLAN_CFG, /* set port based vlan configuration */
+ HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, /* get port based vlan state */
};
#define HCLGE_MBX_MAX_MSG_SIZE 16
@@ -80,12 +84,15 @@ struct hclgevf_mbx_resp_status {
struct hclge_mbx_vf_to_pf_cmd {
u8 rsv;
u8 mbx_src_vfid; /* Auto filled by IMP */
- u8 rsv1[2];
+ u8 mbx_need_resp;
+ u8 rsv1[1];
u8 msg_len;
u8 rsv2[3];
u8 msg[HCLGE_MBX_MAX_MSG_SIZE];
};
+#define HCLGE_MBX_NEED_RESP_BIT BIT(0)
+
struct hclge_mbx_pf_to_vf_cmd {
u8 dest_vfid;
u8 rsv[3];
@@ -107,7 +114,7 @@ struct hclgevf_mbx_arq_ring {
struct hclgevf_dev *hdev;
u32 head;
u32 tail;
- u32 count;
+ atomic_t count;
u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE];
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 17ab4f4af6ad..fa8b8506b120 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -76,8 +76,8 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client,
return inited;
}
-static int hnae3_match_n_instantiate(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev, bool is_reg)
+static int hnae3_init_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
{
int ret;
@@ -87,23 +87,27 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
return 0;
}
- /* now, (un-)instantiate client by calling lower layer */
- if (is_reg) {
- ret = ae_dev->ops->init_client_instance(client, ae_dev);
- if (ret)
- dev_err(&ae_dev->pdev->dev,
- "fail to instantiate client, ret = %d\n", ret);
+ ret = ae_dev->ops->init_client_instance(client, ae_dev);
+ if (ret)
+ dev_err(&ae_dev->pdev->dev,
+ "fail to instantiate client, ret = %d\n", ret);
- return ret;
- }
+ return ret;
+}
+
+static void hnae3_uninit_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ /* check if this client matches the type of ae_dev */
+ if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
+ hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)))
+ return;
if (hnae3_get_client_init_flag(client, ae_dev)) {
ae_dev->ops->uninit_client_instance(client, ae_dev);
hnae3_set_client_init_flag(client, ae_dev, 0);
}
-
- return 0;
}
int hnae3_register_client(struct hnae3_client *client)
@@ -129,7 +133,7 @@ int hnae3_register_client(struct hnae3_client *client)
/* if the client could not be initialized on current port, for
* any error reasons, move on to next available port
*/
- ret = hnae3_match_n_instantiate(client, ae_dev, true);
+ ret = hnae3_init_client_instance(client, ae_dev);
if (ret)
dev_err(&ae_dev->pdev->dev,
"match and instantiation failed for port, ret = %d\n",
@@ -153,7 +157,7 @@ void hnae3_unregister_client(struct hnae3_client *client)
mutex_lock(&hnae3_common_lock);
/* un-initialize the client on every matched port */
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
- hnae3_match_n_instantiate(client, ae_dev, false);
+ hnae3_uninit_client_instance(client, ae_dev);
}
list_del(&client->node);
@@ -205,7 +209,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
* initialize the figure out client instance
*/
list_for_each_entry(client, &hnae3_client_list, node) {
- ret = hnae3_match_n_instantiate(client, ae_dev, true);
+ ret = hnae3_init_client_instance(client, ae_dev);
if (ret)
dev_err(&ae_dev->pdev->dev,
"match and instantiation failed, ret = %d\n",
@@ -243,7 +247,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
* un-initialize the figure out client instance
*/
list_for_each_entry(client, &hnae3_client_list, node)
- hnae3_match_n_instantiate(client, ae_dev, false);
+ hnae3_uninit_client_instance(client, ae_dev);
ae_algo->ops->uninit_ae_dev(ae_dev);
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
@@ -301,7 +305,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
* initialize the figure out client instance
*/
list_for_each_entry(client, &hnae3_client_list, node) {
- ret = hnae3_match_n_instantiate(client, ae_dev, true);
+ ret = hnae3_init_client_instance(client, ae_dev);
if (ret)
dev_err(&ae_dev->pdev->dev,
"match and instantiation failed, ret = %d\n",
@@ -343,7 +347,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
continue;
list_for_each_entry(client, &hnae3_client_list, node)
- hnae3_match_n_instantiate(client, ae_dev, false);
+ hnae3_uninit_client_instance(client, ae_dev);
ae_algo->ops->uninit_ae_dev(ae_dev);
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 38b430f11fc1..ad21b0ef1946 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -120,6 +120,25 @@ enum hnae3_media_type {
HNAE3_MEDIA_TYPE_NONE,
};
+/* must be consistent with definition in firmware */
+enum hnae3_module_type {
+ HNAE3_MODULE_TYPE_UNKNOWN = 0x00,
+ HNAE3_MODULE_TYPE_FIBRE_LR = 0x01,
+ HNAE3_MODULE_TYPE_FIBRE_SR = 0x02,
+ HNAE3_MODULE_TYPE_AOC = 0x03,
+ HNAE3_MODULE_TYPE_CR = 0x04,
+ HNAE3_MODULE_TYPE_KR = 0x05,
+ HNAE3_MODULE_TYPE_TP = 0x06,
+
+};
+
+enum hnae3_fec_mode {
+ HNAE3_FEC_AUTO = 0,
+ HNAE3_FEC_BASER,
+ HNAE3_FEC_RS,
+ HNAE3_FEC_USER_DEF,
+};
+
enum hnae3_reset_notify_type {
HNAE3_UP_CLIENT,
HNAE3_DOWN_CLIENT,
@@ -147,6 +166,13 @@ enum hnae3_flr_state {
HNAE3_FLR_DONE,
};
+enum hnae3_port_base_vlan_state {
+ HNAE3_PORT_BASE_VLAN_DISABLE,
+ HNAE3_PORT_BASE_VLAN_ENABLE,
+ HNAE3_PORT_BASE_VLAN_MODIFY,
+ HNAE3_PORT_BASE_VLAN_NOCHANGE,
+};
+
struct hnae3_vector_info {
u8 __iomem *io_addr;
int vector;
@@ -223,10 +249,10 @@ struct hnae3_ae_dev {
* non-ok
* get_ksettings_an_result()
* Get negotiation status,speed and duplex
- * update_speed_duplex_h()
- * Update hardware speed and duplex
* get_media_type()
* Get media type of MAC
+ * check_port_speed()
+ * Check target speed whether is supported
* adjust_link()
* Adjust link status
* set_loopback()
@@ -243,6 +269,8 @@ struct hnae3_ae_dev {
* set auto autonegotiation of pause frame use
* get_autoneg()
* get auto autonegotiation of pause frame use
+ * restart_autoneg()
+ * restart autonegotiation
* get_coalesce_usecs()
* get usecs to delay a TX interrupt after a packet is sent
* get_rx_max_coalesced_frames()
@@ -333,11 +361,15 @@ struct hnae3_ae_ops {
void (*get_ksettings_an_result)(struct hnae3_handle *handle,
u8 *auto_neg, u32 *speed, u8 *duplex);
- int (*update_speed_duplex_h)(struct hnae3_handle *handle);
int (*cfg_mac_speed_dup_h)(struct hnae3_handle *handle, int speed,
u8 duplex);
- void (*get_media_type)(struct hnae3_handle *handle, u8 *media_type);
+ void (*get_media_type)(struct hnae3_handle *handle, u8 *media_type,
+ u8 *module_type);
+ int (*check_port_speed)(struct hnae3_handle *handle, u32 speed);
+ void (*get_fec)(struct hnae3_handle *handle, u8 *fec_ability,
+ u8 *fec_mode);
+ int (*set_fec)(struct hnae3_handle *handle, u32 fec_mode);
void (*adjust_link)(struct hnae3_handle *handle, int speed, int duplex);
int (*set_loopback)(struct hnae3_handle *handle,
enum hnae3_loop loop_mode, bool en);
@@ -353,6 +385,7 @@ struct hnae3_ae_ops {
int (*set_autoneg)(struct hnae3_handle *handle, bool enable);
int (*get_autoneg)(struct hnae3_handle *handle);
+ int (*restart_autoneg)(struct hnae3_handle *handle);
void (*get_coalesce_usecs)(struct hnae3_handle *handle,
u32 *tx_usecs, u32 *rx_usecs);
@@ -385,7 +418,8 @@ struct hnae3_ae_ops {
void (*update_stats)(struct hnae3_handle *handle,
struct net_device_stats *net_stats);
void (*get_stats)(struct hnae3_handle *handle, u64 *data);
-
+ void (*get_mac_pause_stats)(struct hnae3_handle *handle, u64 *tx_cnt,
+ u64 *rx_cnt);
void (*get_strings)(struct hnae3_handle *handle,
u32 stringset, u8 *data);
int (*get_sset_count)(struct hnae3_handle *handle, int stringset);
@@ -578,8 +612,13 @@ struct hnae3_handle {
u32 numa_node_mask; /* for multi-chip support */
+ enum hnae3_port_base_vlan_state port_base_vlan_state;
+
u8 netdev_flags;
struct dentry *hnae3_dbgfs;
+
+ /* Network interface message level enabled bits */
+ u32 msg_enable;
};
#define hnae3_set_field(origin, mask, shift, val) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 0de543faa5b1..fc4917ac44be 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -239,6 +239,10 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "queue info [number]\n");
dev_info(&h->pdev->dev, "queue map\n");
dev_info(&h->pdev->dev, "bd info [q_num] <bd index>\n");
+
+ if (!hns3_is_phys_func(h->pdev))
+ return;
+
dev_info(&h->pdev->dev, "dump fd tcam\n");
dev_info(&h->pdev->dev, "dump tc\n");
dev_info(&h->pdev->dev, "dump tm map [q_num]\n");
@@ -247,6 +251,9 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "dump qos pri map\n");
dev_info(&h->pdev->dev, "dump qos buf cfg\n");
dev_info(&h->pdev->dev, "dump mng tbl\n");
+ dev_info(&h->pdev->dev, "dump reset info\n");
+ dev_info(&h->pdev->dev, "dump ncl_config <offset> <length>(in hex)\n");
+ dev_info(&h->pdev->dev, "dump mac tnl status\n");
memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
strncat(printf_buf, "dump reg [[bios common] [ssu <prt_id>]",
@@ -341,6 +348,8 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
ret = hns3_dbg_bd_info(handle, cmd_buf);
else if (handle->ae_algo->ops->dbg_run_cmd)
ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
+ else
+ ret = -EOPNOTSUPP;
if (ret)
hns3_dbg_help(handle);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 162cb9afa0e7..18711e0f9bdf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -35,6 +35,13 @@ static const char hns3_driver_string[] =
static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
static struct hnae3_client client;
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, " Network interface message level setting");
+
+#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+ NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+
/* hns3_pci_tbl - PCI Device ID Table
*
* Last entry must be all 0s
@@ -67,7 +74,7 @@ static irqreturn_t hns3_irq_handle(int irq, void *vector)
{
struct hns3_enet_tqp_vector *tqp_vector = vector;
- napi_schedule(&tqp_vector->napi);
+ napi_schedule_irqoff(&tqp_vector->napi);
return IRQ_HANDLED;
}
@@ -730,95 +737,6 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
return 0;
}
-static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
- u8 il4_proto, u32 *type_cs_vlan_tso,
- u32 *ol_type_vlan_len_msec)
-{
- union l3_hdr_info l3;
- union l4_hdr_info l4;
- unsigned char *l2_hdr;
- u8 l4_proto = ol4_proto;
- u32 ol2_len;
- u32 ol3_len;
- u32 ol4_len;
- u32 l2_len;
- u32 l3_len;
-
- l3.hdr = skb_network_header(skb);
- l4.hdr = skb_transport_header(skb);
-
- /* compute L2 header size for normal packet, defined in 2 Bytes */
- l2_len = l3.hdr - skb->data;
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
-
- /* tunnel packet*/
- if (skb->encapsulation) {
- /* compute OL2 header size, defined in 2 Bytes */
- ol2_len = l2_len;
- hns3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_L2LEN_S, ol2_len >> 1);
-
- /* compute OL3 header size, defined in 4 Bytes */
- ol3_len = l4.hdr - l3.hdr;
- hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S,
- ol3_len >> 2);
-
- /* MAC in UDP, MAC in GRE (0x6558)*/
- if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
- /* switch MAC header ptr from outer to inner header.*/
- l2_hdr = skb_inner_mac_header(skb);
-
- /* compute OL4 header size, defined in 4 Bytes. */
- ol4_len = l2_hdr - l4.hdr;
- hns3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_L4LEN_S, ol4_len >> 2);
-
- /* switch IP header ptr from outer to inner header */
- l3.hdr = skb_inner_network_header(skb);
-
- /* compute inner l2 header size, defined in 2 Bytes. */
- l2_len = l3.hdr - l2_hdr;
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S,
- l2_len >> 1);
- } else {
- /* skb packet types not supported by hardware,
- * txbd len fild doesn't be filled.
- */
- return;
- }
-
- /* switch L4 header pointer from outer to inner */
- l4.hdr = skb_inner_transport_header(skb);
-
- l4_proto = il4_proto;
- }
-
- /* compute inner(/normal) L3 header size, defined in 4 Bytes */
- l3_len = l4.hdr - l3.hdr;
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
-
- /* compute inner(/normal) L4 header size, defined in 4 Bytes */
- switch (l4_proto) {
- case IPPROTO_TCP:
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
- l4.tcp->doff);
- break;
- case IPPROTO_SCTP:
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
- (sizeof(struct sctphdr) >> 2));
- break;
- case IPPROTO_UDP:
- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
- (sizeof(struct udphdr) >> 2));
- break;
- default:
- /* skb packet types not supported by hardware,
- * txbd len fild doesn't be filled.
- */
- return;
- }
-}
-
/* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
* and it is udp packet, which has a dest port as the IANA assigned.
* the hardware is expected to do the checksum offload, but the
@@ -827,12 +745,12 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
*/
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{
-#define IANA_VXLAN_PORT 4789
union l4_hdr_info l4;
l4.hdr = skb_transport_header(skb);
- if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT)))
+ if (!(!skb->encapsulation &&
+ l4.udp->dest == htons(IANA_VXLAN_UDP_PORT)))
return false;
skb_checksum_help(skb);
@@ -840,46 +758,71 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
return true;
}
-static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
- u8 il4_proto, u32 *type_cs_vlan_tso,
- u32 *ol_type_vlan_len_msec)
+static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
+ u32 *ol_type_vlan_len_msec)
{
+ u32 l2_len, l3_len, l4_len;
+ unsigned char *il2_hdr;
union l3_hdr_info l3;
- u32 l4_proto = ol4_proto;
+ union l4_hdr_info l4;
l3.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
- /* define OL3 type and tunnel type(OL4).*/
- if (skb->encapsulation) {
- /* define outer network header type.*/
- if (skb->protocol == htons(ETH_P_IP)) {
- if (skb_is_gso(skb))
- hns3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV4_CSUM);
- else
- hns3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV4_NO_CSUM);
-
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
- hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
- HNS3_OL3T_IPV6);
- }
+ /* compute OL2 header size, defined in 2 Bytes */
+ l2_len = l3.hdr - skb->data;
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1);
- /* define tunnel type(OL4).*/
- switch (l4_proto) {
- case IPPROTO_UDP:
+ /* compute OL3 header size, defined in 4 Bytes */
+ l3_len = l4.hdr - l3.hdr;
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);
+
+ il2_hdr = skb_inner_mac_header(skb);
+ /* compute OL4 header size, defined in 4 Bytes. */
+ l4_len = il2_hdr - l4.hdr;
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);
+
+ /* define outer network header type */
+ if (skb->protocol == htons(ETH_P_IP)) {
+ if (skb_is_gso(skb))
hns3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_TUNTYPE_S,
- HNS3_TUN_MAC_IN_UDP);
- break;
- case IPPROTO_GRE:
+ HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV4_CSUM);
+ else
hns3_set_field(*ol_type_vlan_len_msec,
- HNS3_TXD_TUNTYPE_S,
- HNS3_TUN_NVGRE);
- break;
- default:
+ HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV4_NO_CSUM);
+
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV6);
+ }
+
+ if (ol4_proto == IPPROTO_UDP)
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
+ HNS3_TUN_MAC_IN_UDP);
+ else if (ol4_proto == IPPROTO_GRE)
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
+ HNS3_TUN_NVGRE);
+}
+
+static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
+ u8 il4_proto, u32 *type_cs_vlan_tso,
+ u32 *ol_type_vlan_len_msec)
+{
+ unsigned char *l2_hdr = l2_hdr = skb->data;
+ u32 l4_proto = ol4_proto;
+ union l4_hdr_info l4;
+ union l3_hdr_info l3;
+ u32 l2_len, l3_len;
+
+ l4.hdr = skb_transport_header(skb);
+ l3.hdr = skb_network_header(skb);
+
+ /* handle encapsulation skb */
+ if (skb->encapsulation) {
+ /* If this is a not UDP/GRE encapsulation skb */
+ if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
/* drop the skb tunnel packet if hardware don't support,
* because hardware can't calculate csum when TSO.
*/
@@ -893,7 +836,12 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
return 0;
}
+ hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
+
+ /* switch to inner header */
+ l2_hdr = skb_inner_mac_header(skb);
l3.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
l4_proto = il4_proto;
}
@@ -911,11 +859,22 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
HNS3_L3T_IPV6);
}
+ /* compute inner(/normal) L2 header size, defined in 2 Bytes */
+ l2_len = l3.hdr - l2_hdr;
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
+
+ /* compute inner(/normal) L3 header size, defined in 4 Bytes */
+ l3_len = l4.hdr - l3.hdr;
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
+
+ /* compute inner(/normal) L4 header size, defined in 4 Bytes */
switch (l4_proto) {
case IPPROTO_TCP:
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
HNS3_L4T_TCP);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
+ l4.tcp->doff);
break;
case IPPROTO_UDP:
if (hns3_tunnel_csum_bug(skb))
@@ -924,11 +883,15 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
HNS3_L4T_UDP);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
+ (sizeof(struct udphdr) >> 2));
break;
case IPPROTO_SCTP:
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
HNS3_L4T_SCTP);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
+ (sizeof(struct sctphdr) >> 2));
break;
default:
/* drop the skb tunnel packet if hardware don't support,
@@ -963,6 +926,16 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
{
#define HNS3_TX_VLAN_PRIO_SHIFT 13
+ struct hnae3_handle *handle = tx_ring->tqp->handle;
+
+ /* Since HW limitation, if port based insert VLAN enabled, only one VLAN
+ * header is allowed in skb, otherwise it will cause RAS error.
+ */
+ if (unlikely(skb_vlan_tagged_multi(skb) &&
+ handle->port_base_vlan_state ==
+ HNAE3_PORT_BASE_VLAN_ENABLE))
+ return -EINVAL;
+
if (skb->protocol == htons(ETH_P_8021Q) &&
!(tx_ring->tqp->handle->kinfo.netdev->features &
NETIF_F_HW_VLAN_CTAG_TX)) {
@@ -984,8 +957,16 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
* and use inner_vtag in one tag case.
*/
if (skb->protocol == htons(ETH_P_8021Q)) {
- hns3_set_field(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
- *out_vtag = vlan_tag;
+ if (handle->port_base_vlan_state ==
+ HNAE3_PORT_BASE_VLAN_DISABLE){
+ hns3_set_field(*out_vlan_flag,
+ HNS3_TXD_OVLAN_B, 1);
+ *out_vtag = vlan_tag;
+ } else {
+ hns3_set_field(*inner_vlan_flag,
+ HNS3_TXD_VLAN_B, 1);
+ *inner_vtag = vlan_tag;
+ }
} else {
hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
*inner_vtag = vlan_tag;
@@ -1012,7 +993,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
struct device *dev = ring_to_dev(ring);
- u16 bdtp_fe_sc_vld_ra_ri = 0;
struct skb_frag_struct *frag;
unsigned int frag_buf_num;
int k, sizeoflast;
@@ -1042,12 +1022,10 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
if (unlikely(ret))
return ret;
- hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
- &type_cs_vlan_tso,
- &ol_type_vlan_len_msec);
- ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
- &type_cs_vlan_tso,
- &ol_type_vlan_len_msec);
+
+ ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
+ &type_cs_vlan_tso,
+ &ol_type_vlan_len_msec);
if (unlikely(ret))
return ret;
@@ -1073,19 +1051,37 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
}
- if (unlikely(dma_mapping_error(ring->dev, dma))) {
+ if (unlikely(dma_mapping_error(dev, dma))) {
ring->stats.sw_err_cnt++;
return -ENOMEM;
}
desc_cb->length = size;
+ if (likely(size <= HNS3_MAX_BD_SIZE)) {
+ u16 bdtp_fe_sc_vld_ra_ri = 0;
+
+ desc_cb->priv = priv;
+ desc_cb->dma = dma;
+ desc_cb->type = type;
+ desc->addr = cpu_to_le64(dma);
+ desc->tx.send_size = cpu_to_le16(size);
+ hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
+ desc->tx.bdtp_fe_sc_vld_ra_ri =
+ cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
+
+ ring_ptr_move_fw(ring, next_to_use);
+ return 0;
+ }
+
frag_buf_num = hns3_tx_bd_count(size);
sizeoflast = size & HNS3_TX_LAST_SIZE_M;
sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
/* When frag size is bigger than hardware limit, split this frag */
for (k = 0; k < frag_buf_num; k++) {
+ u16 bdtp_fe_sc_vld_ra_ri = 0;
+
/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
desc_cb->priv = priv;
desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
@@ -1112,64 +1108,92 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
return 0;
}
-static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
- struct hns3_enet_ring *ring)
+static int hns3_nic_bd_num(struct sk_buff *skb)
{
- struct sk_buff *skb = *out_skb;
- struct sk_buff *new_skb = NULL;
- struct skb_frag_struct *frag;
- int bdnum_for_frag;
- int frag_num;
- int buf_num;
- int size;
- int i;
+ int size = skb_headlen(skb);
+ int i, bd_num;
- size = skb_headlen(skb);
- buf_num = hns3_tx_bd_count(size);
+ /* if the total len is within the max bd limit */
+ if (likely(skb->len <= HNS3_MAX_BD_SIZE))
+ return skb_shinfo(skb)->nr_frags + 1;
+
+ bd_num = hns3_tx_bd_count(size);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ int frag_bd_num;
- frag_num = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < frag_num; i++) {
- frag = &skb_shinfo(skb)->frags[i];
size = skb_frag_size(frag);
- bdnum_for_frag = hns3_tx_bd_count(size);
- if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG))
+ frag_bd_num = hns3_tx_bd_count(size);
+
+ if (unlikely(frag_bd_num > HNS3_MAX_BD_PER_FRAG))
return -ENOMEM;
- buf_num += bdnum_for_frag;
+ bd_num += frag_bd_num;
}
- if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
- buf_num = hns3_tx_bd_count(skb->len);
- if (ring_space(ring) < buf_num)
- return -EBUSY;
- /* manual split the send packet */
- new_skb = skb_copy(skb, GFP_ATOMIC);
- if (!new_skb)
- return -ENOMEM;
- dev_kfree_skb_any(skb);
- *out_skb = new_skb;
- }
+ return bd_num;
+}
- if (unlikely(ring_space(ring) < buf_num))
- return -EBUSY;
+static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
+{
+ if (!skb->encapsulation)
+ return skb_transport_offset(skb) + tcp_hdrlen(skb);
- *bnum = buf_num;
- return 0;
+ return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
}
-static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
- struct hns3_enet_ring *ring)
+/* HW need every continuous 8 buffer data to be larger than MSS,
+ * we simplify it by ensuring skb_headlen + the first continuous
+ * 7 frags to to be larger than gso header len + mss, and the remaining
+ * continuous 7 frags to be larger than MSS except the last 7 frags.
+ */
+static bool hns3_skb_need_linearized(struct sk_buff *skb)
+{
+ int bd_limit = HNS3_MAX_BD_PER_FRAG - 1;
+ unsigned int tot_len = 0;
+ int i;
+
+ for (i = 0; i < bd_limit; i++)
+ tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+ /* ensure headlen + the first 7 frags is greater than mss + header
+ * and the first 7 frags is greater than mss.
+ */
+ if (((tot_len + skb_headlen(skb)) < (skb_shinfo(skb)->gso_size +
+ hns3_gso_hdr_len(skb))) || (tot_len < skb_shinfo(skb)->gso_size))
+ return true;
+
+ /* ensure the remaining continuous 7 buffer is greater than mss */
+ for (i = 0; i < (skb_shinfo(skb)->nr_frags - bd_limit - 1); i++) {
+ tot_len -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
+ tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i + bd_limit]);
+
+ if (tot_len < skb_shinfo(skb)->gso_size)
+ return true;
+ }
+
+ return false;
+}
+
+static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
+ struct sk_buff **out_skb)
{
struct sk_buff *skb = *out_skb;
- struct sk_buff *new_skb = NULL;
- int buf_num;
+ int bd_num;
- /* No. of segments (plus a header) */
- buf_num = skb_shinfo(skb)->nr_frags + 1;
+ bd_num = hns3_nic_bd_num(skb);
+ if (bd_num < 0)
+ return bd_num;
- if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
- buf_num = hns3_tx_bd_count(skb->len);
- if (ring_space(ring) < buf_num)
+ if (unlikely(bd_num > HNS3_MAX_BD_PER_FRAG)) {
+ struct sk_buff *new_skb;
+
+ if (skb_is_gso(skb) && !hns3_skb_need_linearized(skb))
+ goto out;
+
+ bd_num = hns3_tx_bd_count(skb->len);
+ if (unlikely(ring_space(ring) < bd_num))
return -EBUSY;
/* manual split the send packet */
new_skb = skb_copy(skb, GFP_ATOMIC);
@@ -1177,14 +1201,17 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
return -ENOMEM;
dev_kfree_skb_any(skb);
*out_skb = new_skb;
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_copy++;
+ u64_stats_update_end(&ring->syncp);
}
- if (unlikely(ring_space(ring) < buf_num))
+out:
+ if (unlikely(ring_space(ring) < bd_num))
return -EBUSY;
- *bnum = buf_num;
-
- return 0;
+ return bd_num;
}
static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
@@ -1197,6 +1224,9 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
if (ring->next_to_use == next_to_use_orig)
break;
+ /* rollback one */
+ ring_ptr_move_bw(ring, next_to_use);
+
/* unmap the descriptor dma address */
if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
dma_unmap_single(dev,
@@ -1210,9 +1240,7 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
DMA_TO_DEVICE);
ring->desc_cb[ring->next_to_use].length = 0;
-
- /* rollback one */
- ring_ptr_move_bw(ring, next_to_use);
+ ring->desc_cb[ring->next_to_use].dma = 0;
}
}
@@ -1225,7 +1253,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
struct netdev_queue *dev_queue;
struct skb_frag_struct *frag;
int next_to_use_head;
- int next_to_use_frag;
int buf_num;
int seg_num;
int size;
@@ -1235,22 +1262,23 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Prefetch the data used later */
prefetch(skb->data);
- switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
- case -EBUSY:
- u64_stats_update_begin(&ring->syncp);
- ring->stats.tx_busy++;
- u64_stats_update_end(&ring->syncp);
+ buf_num = hns3_nic_maybe_stop_tx(ring, &skb);
+ if (unlikely(buf_num <= 0)) {
+ if (buf_num == -EBUSY) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_busy++;
+ u64_stats_update_end(&ring->syncp);
+ goto out_net_tx_busy;
+ } else if (buf_num == -ENOMEM) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+ }
- goto out_net_tx_busy;
- case -ENOMEM:
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
- netdev_err(netdev, "no memory to xmit!\n");
+ if (net_ratelimit())
+ netdev_err(netdev, "xmit error: %d!\n", buf_num);
goto out_err_tx_ok;
- default:
- break;
}
/* No. of segments (plus a header) */
@@ -1263,9 +1291,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
DESC_TYPE_SKB);
if (unlikely(ret))
- goto head_fill_err;
+ goto fill_err;
- next_to_use_frag = ring->next_to_use;
/* Fill the fragments */
for (i = 1; i < seg_num; i++) {
frag = &skb_shinfo(skb)->frags[i - 1];
@@ -1276,7 +1303,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
DESC_TYPE_PAGE);
if (unlikely(ret))
- goto frag_fill_err;
+ goto fill_err;
}
/* Complete translate all packets */
@@ -1289,10 +1316,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
-frag_fill_err:
- hns3_clear_desc(ring, next_to_use_frag);
-
-head_fill_err:
+fill_err:
hns3_clear_desc(ring, next_to_use_head);
out_err_tx_ok:
@@ -1355,13 +1379,6 @@ static int hns3_nic_set_features(struct net_device *netdev,
bool enable;
int ret;
- if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
- if (features & (NETIF_F_TSO | NETIF_F_TSO6))
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
- else
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
- }
-
if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
enable = !!(features & NETIF_F_GRO_HW);
ret = h->ae_algo->ops->set_gro_en(h, enable);
@@ -1574,6 +1591,9 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
struct hnae3_handle *h = hns3_get_handle(netdev);
int ret;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
if (!h->ae_algo->ops->set_mtu)
return -EOPNOTSUPP;
@@ -1590,13 +1610,19 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = hns3_get_handle(ndev);
struct hns3_enet_ring *tx_ring = NULL;
+ struct napi_struct *napi;
int timeout_queue = 0;
int hw_head, hw_tail;
+ int fbd_num, fbd_oft;
+ int ebd_num, ebd_oft;
+ int bd_num, bd_err;
+ int ring_en, tc;
int i;
/* Find the stopped queue the same way the stack does */
- for (i = 0; i < ndev->real_num_tx_queues; i++) {
+ for (i = 0; i < ndev->num_tx_queues; i++) {
struct netdev_queue *q;
unsigned long trans_start;
@@ -1617,21 +1643,66 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
return false;
}
+ priv->tx_timeout_count++;
+
tx_ring = priv->ring_data[timeout_queue].ring;
+ napi = &tx_ring->tqp_vector->napi;
+
+ netdev_info(ndev,
+ "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
+ priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use,
+ tx_ring->next_to_clean, napi->state);
+
+ netdev_info(ndev,
+ "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n",
+ tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
+ tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt);
+
+ netdev_info(ndev,
+ "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n",
+ tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt,
+ tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
+
+ /* When mac received many pause frames continuous, it's unable to send
+ * packets, which may cause tx timeout
+ */
+ if (h->ae_algo->ops->update_stats &&
+ h->ae_algo->ops->get_mac_pause_stats) {
+ u64 tx_pause_cnt, rx_pause_cnt;
+
+ h->ae_algo->ops->update_stats(h, &ndev->stats);
+ h->ae_algo->ops->get_mac_pause_stats(h, &tx_pause_cnt,
+ &rx_pause_cnt);
+ netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
+ tx_pause_cnt, rx_pause_cnt);
+ }
hw_head = readl_relaxed(tx_ring->tqp->io_base +
HNS3_RING_TX_RING_HEAD_REG);
hw_tail = readl_relaxed(tx_ring->tqp->io_base +
HNS3_RING_TX_RING_TAIL_REG);
+ fbd_num = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_FBDNUM_REG);
+ fbd_oft = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_OFFSET_REG);
+ ebd_num = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_EBDNUM_REG);
+ ebd_oft = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_EBD_OFFSET_REG);
+ bd_num = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_BD_NUM_REG);
+ bd_err = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_BD_ERR_REG);
+ ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG);
+ tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG);
+
netdev_info(ndev,
- "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
- priv->tx_timeout_count,
- timeout_queue,
- tx_ring->next_to_use,
- tx_ring->next_to_clean,
- hw_head,
- hw_tail,
+ "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
+ bd_num, hw_head, hw_tail, bd_err,
readl(tx_ring->tqp_vector->mask_addr));
+ netdev_info(ndev,
+ "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
+ ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft);
return true;
}
@@ -1644,8 +1715,6 @@ static void hns3_nic_net_timeout(struct net_device *ndev)
if (!hns3_get_tx_timeo_queue_info(ndev))
return;
- priv->tx_timeout_count++;
-
/* request the reset, and let the hclge to determine
* which reset level should be done
*/
@@ -1670,7 +1739,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
};
-static bool hns3_is_phys_func(struct pci_dev *pdev)
+bool hns3_is_phys_func(struct pci_dev *pdev)
{
u32 dev_id = pdev->device;
@@ -2117,17 +2186,30 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
ring->desc[i].rx.bd_base_info = 0;
}
-static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
- int *pkts)
+static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head,
+ int *bytes, int *pkts)
{
- struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
+ int ntc = ring->next_to_clean;
+ struct hns3_desc_cb *desc_cb;
- (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
- (*bytes) += desc_cb->length;
- /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
- hns3_free_buffer_detach(ring, ring->next_to_clean);
+ while (head != ntc) {
+ desc_cb = &ring->desc_cb[ntc];
+ (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
+ (*bytes) += desc_cb->length;
+ /* desc_cb will be cleaned, after hnae3_free_buffer_detach */
+ hns3_free_buffer_detach(ring, ntc);
- ring_ptr_move_fw(ring, next_to_clean);
+ if (++ntc == ring->desc_num)
+ ntc = 0;
+
+ /* Issue prefetch for next Tx descriptor */
+ prefetch(&ring->desc_cb[ntc]);
+ }
+
+ /* This smp_store_release() pairs with smp_load_acquire() in
+ * ring_space called by hns3_nic_net_xmit.
+ */
+ smp_store_release(&ring->next_to_clean, ntc);
}
static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
@@ -2167,11 +2249,7 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
bytes = 0;
pkts = 0;
- while (head != ring->next_to_clean) {
- hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
- /* Issue prefetch for next Tx descriptor */
- prefetch(&ring->desc_cb[ring->next_to_clean]);
- }
+ hns3_nic_reclaim_desc(ring, head, &bytes, &pkts);
ring->tqp_vector->tx_group.total_bytes += bytes;
ring->tqp_vector->tx_group.total_packets += pkts;
@@ -2233,6 +2311,10 @@ hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
break;
}
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.non_reuse_pg++;
+ u64_stats_update_end(&ring->syncp);
}
ring_ptr_move_fw(ring, next_to_use);
@@ -2246,64 +2328,78 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring, int pull_len,
struct hns3_desc_cb *desc_cb)
{
- struct hns3_desc *desc;
- u32 truesize;
- int size;
- int last_offset;
- bool twobufs;
-
- twobufs = ((PAGE_SIZE < 8192) &&
- hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
-
- desc = &ring->desc[ring->next_to_clean];
- size = le16_to_cpu(desc->rx.size);
-
- truesize = hnae3_buf_size(ring);
-
- if (!twobufs)
- last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
+ struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
+ int size = le16_to_cpu(desc->rx.size);
+ u32 truesize = hnae3_buf_size(ring);
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize);
- /* Avoid re-using remote pages,flag default unreuse */
- if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
- return;
-
- if (twobufs) {
- /* If we are only owner of page we can reuse it */
- if (likely(page_count(desc_cb->priv) == 1)) {
- /* Flip page offset to other buffer */
- desc_cb->page_offset ^= truesize;
-
- desc_cb->reuse_flag = 1;
- /* bump ref count on page before it is given*/
- get_page(desc_cb->priv);
- }
+ /* Avoid re-using remote pages, or the stack is still using the page
+ * when page_offset rollback to zero, flag default unreuse
+ */
+ if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()) ||
+ (!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
return;
- }
/* Move offset up to the next cache line */
desc_cb->page_offset += truesize;
- if (desc_cb->page_offset <= last_offset) {
+ if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) {
desc_cb->reuse_flag = 1;
/* Bump ref count on page before it is given*/
get_page(desc_cb->priv);
+ } else if (page_count(desc_cb->priv) == 1) {
+ desc_cb->reuse_flag = 1;
+ desc_cb->page_offset = 0;
+ get_page(desc_cb->priv);
}
}
+static int hns3_gro_complete(struct sk_buff *skb)
+{
+ __be16 type = skb->protocol;
+ struct tcphdr *th;
+ int depth = 0;
+
+ while (type == htons(ETH_P_8021Q)) {
+ struct vlan_hdr *vh;
+
+ if ((depth + VLAN_HLEN) > skb_headlen(skb))
+ return -EFAULT;
+
+ vh = (struct vlan_hdr *)(skb->data + depth);
+ type = vh->h_vlan_encapsulated_proto;
+ depth += VLAN_HLEN;
+ }
+
+ if (type == htons(ETH_P_IP)) {
+ depth += sizeof(struct iphdr);
+ } else if (type == htons(ETH_P_IPV6)) {
+ depth += sizeof(struct ipv6hdr);
+ } else {
+ netdev_err(skb->dev,
+ "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
+ be16_to_cpu(type), depth);
+ return -EFAULT;
+ }
+
+ th = (struct tcphdr *)(skb->data + depth);
+ skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+ if (th->cwr)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ return 0;
+}
+
static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
- struct hns3_desc *desc)
+ u32 l234info, u32 bd_base_info, u32 ol_info)
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
int l3_type, l4_type;
- u32 bd_base_info;
int ol4_type;
- u32 l234info;
-
- bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
- l234info = le32_to_cpu(desc->rx.l234_info);
skb->ip_summed = CHECKSUM_NONE;
@@ -2312,12 +2408,6 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
if (!(netdev->features & NETIF_F_RXCSUM))
return;
- /* We MUST enable hardware checksum before enabling hardware GRO */
- if (skb_shinfo(skb)->gso_size) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- return;
- }
-
/* check if hardware has done checksum */
if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
return;
@@ -2332,7 +2422,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
return;
}
- ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
+ ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M,
HNS3_RXD_OL4ID_S);
switch (ol4_type) {
case HNS3_OL4_TYPE_MAC_IN_UDP:
@@ -2370,6 +2460,7 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
struct hns3_desc *desc, u32 l234info,
u16 *vlan_tag)
{
+ struct hnae3_handle *handle = ring->tqp->handle;
struct pci_dev *pdev = ring->tqp->handle->pdev;
if (pdev->revision == 0x20) {
@@ -2382,15 +2473,36 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
#define HNS3_STRP_OUTER_VLAN 0x1
#define HNS3_STRP_INNER_VLAN 0x2
+#define HNS3_STRP_BOTH 0x3
+ /* Hardware always insert VLAN tag into RX descriptor when
+ * remove the tag from packet, driver needs to determine
+ * reporting which tag to stack.
+ */
switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
HNS3_RXD_STRP_TAGP_S)) {
case HNS3_STRP_OUTER_VLAN:
+ if (handle->port_base_vlan_state !=
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ return false;
+
*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
return true;
case HNS3_STRP_INNER_VLAN:
+ if (handle->port_base_vlan_state !=
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ return false;
+
*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
return true;
+ case HNS3_STRP_BOTH:
+ if (handle->port_base_vlan_state ==
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ else
+ *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+
+ return true;
default:
return false;
}
@@ -2437,7 +2549,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
ring->stats.seg_pkt_cnt++;
u64_stats_update_end(&ring->syncp);
- ring->pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
+ ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
__skb_put(skb, ring->pull_len);
hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
desc_cb);
@@ -2512,8 +2624,9 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
return 0;
}
-static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
- u32 bd_base_info)
+static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, u32 l234info,
+ u32 bd_base_info, u32 ol_info)
{
u16 gro_count;
u32 l3_type;
@@ -2521,12 +2634,11 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
HNS3_RXD_GRO_COUNT_S);
/* if there is no HW GRO, do not set gro params */
- if (!gro_count)
- return;
+ if (!gro_count) {
+ hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info);
+ return 0;
+ }
- /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
- * to skb_shinfo(skb)->gso_segs
- */
NAPI_GRO_CB(skb)->count = gro_count;
l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
@@ -2536,47 +2648,121 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
else if (l3_type == HNS3_L3_TYPE_IPV6)
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
else
- return;
+ return -EFAULT;
skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
HNS3_RXD_GRO_SIZE_M,
HNS3_RXD_GRO_SIZE_S);
- if (skb_shinfo(skb)->gso_size)
- tcp_gro_complete(skb);
+
+ return hns3_gro_complete(skb);
}
static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
- struct sk_buff *skb)
+ struct sk_buff *skb, u32 rss_hash)
{
struct hnae3_handle *handle = ring->tqp->handle;
enum pkt_hash_types rss_type;
- struct hns3_desc *desc;
- int last_bd;
-
- /* When driver handle the rss type, ring->next_to_clean indicates the
- * first descriptor of next packet, need -1 here.
- */
- last_bd = (ring->next_to_clean - 1 + ring->desc_num) % ring->desc_num;
- desc = &ring->desc[last_bd];
- if (le32_to_cpu(desc->rx.rss_hash))
+ if (rss_hash)
rss_type = handle->kinfo.rss_type;
else
rss_type = PKT_HASH_TYPE_NONE;
- skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type);
+ skb_set_hash(skb, rss_hash, rss_type);
}
-static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
- struct sk_buff **out_skb)
+static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
enum hns3_pkt_l2t_type l2_frame_type;
+ u32 bd_base_info, l234info, ol_info;
+ struct hns3_desc *desc;
+ unsigned int len;
+ int pre_ntc, ret;
+
+ /* bdinfo handled below is only valid on the last BD of the
+ * current packet, and ring->next_to_clean indicates the first
+ * descriptor of next packet, so need - 1 below.
+ */
+ pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) :
+ (ring->desc_num - 1);
+ desc = &ring->desc[pre_ntc];
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ l234info = le32_to_cpu(desc->rx.l234_info);
+ ol_info = le32_to_cpu(desc->rx.ol_info);
+
+ /* Based on hw strategy, the tag offloaded will be stored at
+ * ot_vlan_tag in two layer tag case, and stored at vlan_tag
+ * in one layer tag case.
+ */
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ u16 vlan_tag;
+
+ if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ vlan_tag);
+ }
+
+ if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.non_vld_descs++;
+ u64_stats_update_end(&ring->syncp);
+
+ return -EINVAL;
+ }
+
+ if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
+ BIT(HNS3_RXD_L2E_B))))) {
+ u64_stats_update_begin(&ring->syncp);
+ if (l234info & BIT(HNS3_RXD_L2E_B))
+ ring->stats.l2_err++;
+ else
+ ring->stats.err_pkt_len++;
+ u64_stats_update_end(&ring->syncp);
+
+ return -EFAULT;
+ }
+
+ len = skb->len;
+
+ /* Do update ip stack process */
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ /* This is needed in order to enable forwarding support */
+ ret = hns3_set_gro_and_checksum(ring, skb, l234info,
+ bd_base_info, ol_info);
+ if (unlikely(ret)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.rx_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ }
+
+ l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
+ HNS3_RXD_DMAC_S);
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.rx_pkts++;
+ ring->stats.rx_bytes += len;
+
+ if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
+ ring->stats.rx_multicast++;
+
+ u64_stats_update_end(&ring->syncp);
+
+ ring->tqp_vector->rx_group.total_bytes += len;
+
+ hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash));
+ return 0;
+}
+
+static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
+ struct sk_buff **out_skb)
+{
struct sk_buff *skb = ring->skb;
struct hns3_desc_cb *desc_cb;
struct hns3_desc *desc;
u32 bd_base_info;
- u32 l234info;
int length;
int ret;
@@ -2636,64 +2822,13 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ALIGN(ring->pull_len, sizeof(long)));
}
- l234info = le32_to_cpu(desc->rx.l234_info);
- bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
-
- /* Based on hw strategy, the tag offloaded will be stored at
- * ot_vlan_tag in two layer tag case, and stored at vlan_tag
- * in one layer tag case.
- */
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
- u16 vlan_tag;
-
- if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
- __vlan_hwaccel_put_tag(skb,
- htons(ETH_P_8021Q),
- vlan_tag);
- }
-
- if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.non_vld_descs++;
- u64_stats_update_end(&ring->syncp);
-
- dev_kfree_skb_any(skb);
- return -EINVAL;
- }
-
- if (unlikely((!desc->rx.pkt_len) ||
- (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
- BIT(HNS3_RXD_L2E_B))))) {
- u64_stats_update_begin(&ring->syncp);
- if (l234info & BIT(HNS3_RXD_L2E_B))
- ring->stats.l2_err++;
- else
- ring->stats.err_pkt_len++;
- u64_stats_update_end(&ring->syncp);
-
+ ret = hns3_handle_bdinfo(ring, skb);
+ if (unlikely(ret)) {
dev_kfree_skb_any(skb);
- return -EFAULT;
+ return ret;
}
-
- l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
- HNS3_RXD_DMAC_S);
- u64_stats_update_begin(&ring->syncp);
- if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
- ring->stats.rx_multicast++;
-
- ring->stats.rx_pkts++;
- ring->stats.rx_bytes += skb->len;
- u64_stats_update_end(&ring->syncp);
-
- ring->tqp_vector->rx_group.total_bytes += skb->len;
-
- /* This is needed in order to enable forwarding support */
- hns3_set_gro_param(skb, l234info, bd_base_info);
-
- hns3_rx_checksum(ring, skb, desc);
*out_skb = skb;
- hns3_set_rx_skb_rss_type(ring, skb);
return 0;
}
@@ -2703,9 +2838,8 @@ int hns3_clean_rx_ring(
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
- struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
int recv_pkts, recv_bds, clean_count, err;
- int unused_count = hns3_desc_unused(ring) - ring->pending_buf;
+ int unused_count = hns3_desc_unused(ring);
struct sk_buff *skb = ring->skb;
int num;
@@ -2714,6 +2848,7 @@ int hns3_clean_rx_ring(
recv_pkts = 0, recv_bds = 0, clean_count = 0;
num -= unused_count;
+ unused_count -= ring->pending_buf;
while (recv_pkts < budget && recv_bds < num) {
/* Reuse or realloc buffers */
@@ -2740,8 +2875,6 @@ int hns3_clean_rx_ring(
continue;
}
- /* Do update ip stack process */
- skb->protocol = eth_type_trans(skb, netdev);
rx_fn(ring, skb);
recv_bds += ring->pending_buf;
clean_count += ring->pending_buf;
@@ -2891,7 +3024,7 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
struct hns3_enet_tqp_vector *tqp_vector =
container_of(napi, struct hns3_enet_tqp_vector, napi);
bool clean_complete = true;
- int rx_budget;
+ int rx_budget = budget;
if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
napi_complete(napi);
@@ -2905,7 +3038,8 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
hns3_clean_tx_ring(ring);
/* make sure rx ring budget not smaller than 1 */
- rx_budget = max(budget / tqp_vector->num_tqps, 1);
+ if (tqp_vector->num_tqps > 1)
+ rx_budget = max(budget / tqp_vector->num_tqps, 1);
hns3_for_each_ring(ring, tqp_vector->rx_group) {
int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
@@ -3316,6 +3450,7 @@ err:
}
devm_kfree(&pdev->dev, priv->ring_data);
+ priv->ring_data = NULL;
return ret;
}
@@ -3324,12 +3459,16 @@ static void hns3_put_ring_config(struct hns3_nic_priv *priv)
struct hnae3_handle *h = priv->ae_handle;
int i;
+ if (!priv->ring_data)
+ return;
+
for (i = 0; i < h->kinfo.num_tqps; i++) {
devm_kfree(priv->dev, priv->ring_data[i].ring);
devm_kfree(priv->dev,
priv->ring_data[i + h->kinfo.num_tqps].ring);
}
devm_kfree(priv->dev, priv->ring_data);
+ priv->ring_data = NULL;
}
static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
@@ -3339,8 +3478,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
if (ring->desc_num <= 0 || ring->buf_size <= 0)
return -EINVAL;
- ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
- GFP_KERNEL);
+ ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num,
+ sizeof(ring->desc_cb[0]), GFP_KERNEL);
if (!ring->desc_cb) {
ret = -ENOMEM;
goto out;
@@ -3361,7 +3500,7 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
out_with_desc:
hns3_free_desc(ring);
out_with_desc_cb:
- kfree(ring->desc_cb);
+ devm_kfree(ring_to_dev(ring), ring->desc_cb);
ring->desc_cb = NULL;
out:
return ret;
@@ -3370,7 +3509,7 @@ out:
static void hns3_fini_ring(struct hns3_enet_ring *ring)
{
hns3_free_desc(ring);
- kfree(ring->desc_cb);
+ devm_kfree(ring_to_dev(ring), ring->desc_cb);
ring->desc_cb = NULL;
ring->next_to_clean = 0;
ring->next_to_use = 0;
@@ -3557,17 +3696,6 @@ static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
h->ae_algo->ops->del_all_fd_entries(h, clear_list);
}
-static void hns3_nic_set_priv_ops(struct net_device *netdev)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
-
- if ((netdev->features & NETIF_F_TSO) ||
- (netdev->features & NETIF_F_TSO6))
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
- else
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
-}
-
static int hns3_client_start(struct hnae3_handle *handle)
{
if (!handle->ae_algo->ops->client_start)
@@ -3584,6 +3712,21 @@ static void hns3_client_stop(struct hnae3_handle *handle)
handle->ae_algo->ops->client_stop(handle);
}
+static void hns3_info_show(struct hns3_nic_priv *priv)
+{
+ struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
+
+ dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
+ dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps);
+ dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size);
+ dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size);
+ dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len);
+ dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc);
+ dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc);
+ dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc);
+ dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu);
+}
+
static int hns3_client_init(struct hnae3_handle *handle)
{
struct pci_dev *pdev = handle->pdev;
@@ -3605,6 +3748,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->tx_timeout_count = 0;
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+ handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
+
handle->kinfo.netdev = netdev;
handle->priv = (void *)priv;
@@ -3617,7 +3762,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
netdev->netdev_ops = &hns3_nic_netdev_ops;
SET_NETDEV_DEV(netdev, &pdev->dev);
hns3_ethtool_set_ops(netdev);
- hns3_nic_set_priv_ops(netdev);
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
@@ -3671,6 +3815,9 @@ static int hns3_client_init(struct hnae3_handle *handle)
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
+ if (netif_msg_drv(handle))
+ hns3_info_show(priv);
+
return ret;
out_client_start:
@@ -3697,13 +3844,13 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
- hns3_client_stop(handle);
-
hns3_remove_hw_addr(netdev);
if (netdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(netdev);
+ hns3_client_stop(handle);
+
if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
netdev_warn(netdev, "already uninitialized\n");
goto out_netdev_free;
@@ -3729,8 +3876,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
hns3_dbg_uninit(handle);
- priv->ring_data = NULL;
-
out_netdev_free:
free_netdev(netdev);
}
@@ -3745,11 +3890,13 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
if (linkup) {
netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev);
- netdev_info(netdev, "link up\n");
+ if (netif_msg_link(handle))
+ netdev_info(netdev, "link up\n");
} else {
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
- netdev_info(netdev, "link down\n");
+ if (netif_msg_link(handle))
+ netdev_info(netdev, "link down\n");
}
}
@@ -3773,12 +3920,13 @@ static int hns3_recover_hw_addr(struct net_device *ndev)
struct netdev_hw_addr *ha, *tmp;
int ret = 0;
+ netif_addr_lock_bh(ndev);
/* go through and sync uc_addr entries to the device */
list = &ndev->uc;
list_for_each_entry_safe(ha, tmp, &list->list, list) {
ret = hns3_nic_uc_sync(ndev, ha->addr);
if (ret)
- return ret;
+ goto out;
}
/* go through and sync mc_addr entries to the device */
@@ -3786,9 +3934,11 @@ static int hns3_recover_hw_addr(struct net_device *ndev)
list_for_each_entry_safe(ha, tmp, &list->list, list) {
ret = hns3_nic_mc_sync(ndev, ha->addr);
if (ret)
- return ret;
+ goto out;
}
+out:
+ netif_addr_unlock_bh(ndev);
return ret;
}
@@ -3799,6 +3949,7 @@ static void hns3_remove_hw_addr(struct net_device *netdev)
hns3_nic_uc_unsync(netdev, netdev->dev_addr);
+ netif_addr_lock_bh(netdev);
/* go through and unsync uc_addr entries to the device */
list = &netdev->uc;
list_for_each_entry_safe(ha, tmp, &list->list, list)
@@ -3809,6 +3960,8 @@ static void hns3_remove_hw_addr(struct net_device *netdev)
list_for_each_entry_safe(ha, tmp, &list->list, list)
if (ha->refcount > 1)
hns3_nic_mc_unsync(netdev, ha->addr);
+
+ netif_addr_unlock_bh(netdev);
}
static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
@@ -3850,6 +4003,13 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
ring_ptr_move_fw(ring, next_to_use);
}
+ /* Free the pending skb in rx ring */
+ if (ring->skb) {
+ dev_kfree_skb_any(ring->skb);
+ ring->skb = NULL;
+ ring->pending_buf = 0;
+ }
+
return 0;
}
@@ -4048,18 +4208,24 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
if (ret)
goto err_uninit_vector;
+ ret = hns3_client_start(handle);
+ if (ret) {
+ dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
+ goto err_uninit_ring;
+ }
+
set_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
+err_uninit_ring:
+ hns3_uninit_all_ring(priv);
err_uninit_vector:
hns3_nic_uninit_vector_data(priv);
- priv->ring_data = NULL;
err_dealloc_vector:
hns3_nic_dealloc_vector_data(priv);
err_put_ring:
hns3_put_ring_config(priv);
- priv->ring_data = NULL;
return ret;
}
@@ -4101,7 +4267,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
- if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
netdev_warn(netdev, "already uninitialized\n");
return 0;
}
@@ -4121,9 +4287,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
netdev_err(netdev, "uninit ring error\n");
hns3_put_ring_config(priv);
- priv->ring_data = NULL;
-
- clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 75669cd0c311..c14480f9b625 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -42,8 +42,10 @@ enum hns3_nic_state {
#define HNS3_RING_TX_RING_HEAD_REG 0x0005C
#define HNS3_RING_TX_RING_FBDNUM_REG 0x00060
#define HNS3_RING_TX_RING_OFFSET_REG 0x00064
+#define HNS3_RING_TX_RING_EBDNUM_REG 0x00068
#define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C
-
+#define HNS3_RING_TX_RING_EBD_OFFSET_REG 0x00070
+#define HNS3_RING_TX_RING_BD_ERR_REG 0x00074
#define HNS3_RING_PREFETCH_EN_REG 0x0007C
#define HNS3_RING_CFG_VF_NUM_REG 0x00080
#define HNS3_RING_ASID_REG 0x0008C
@@ -374,6 +376,7 @@ struct ring_stats {
u64 tx_err_cnt;
u64 restart_queue;
u64 tx_busy;
+ u64 tx_copy;
};
struct {
u64 rx_pkts;
@@ -386,6 +389,7 @@ struct ring_stats {
u64 l2_err;
u64 l3l4_csum_err;
u64 rx_multicast;
+ u64 non_reuse_pg;
};
};
};
@@ -397,7 +401,6 @@ struct hns3_enet_ring {
struct hns3_enet_ring *next;
struct hns3_enet_tqp_vector *tqp_vector;
struct hnae3_queue *tqp;
- char ring_name[HNS3_RING_NAME_LEN];
struct device *dev; /* will be used for DMA mapping of descriptors */
/* statistic */
@@ -407,9 +410,6 @@ struct hns3_enet_ring {
dma_addr_t desc_dma_addr;
u32 buf_size; /* size for hnae_desc->addr, preset by AE */
u16 desc_num; /* total number of desc */
- u16 max_desc_num_per_pkt;
- u16 max_raw_data_sz_per_desc;
- u16 max_pkt_size;
int next_to_use; /* idx of next spare desc */
/* idx of lastest sent desc, the ring is empty when equal to
@@ -423,9 +423,6 @@ struct hns3_enet_ring {
u32 flag; /* ring attribute */
- int numa_node;
- cpumask_t affinity_mask;
-
int pending_buf;
struct sk_buff *skb;
struct sk_buff *tail_skb;
@@ -442,11 +439,6 @@ struct hns3_nic_ring_data {
void (*fini_process)(struct hns3_nic_ring_data *);
};
-struct hns3_nic_ops {
- int (*maybe_stop_tx)(struct sk_buff **out_skb,
- int *bnum, struct hns3_enet_ring *ring);
-};
-
enum hns3_flow_level_range {
HNS3_FLOW_LOW = 0,
HNS3_FLOW_MID = 1,
@@ -536,7 +528,6 @@ struct hns3_nic_priv {
u32 port_id;
struct net_device *netdev;
struct device *dev;
- struct hns3_nic_ops ops;
/**
* the cb for nic to manage the ring buffer, the first half of the
@@ -577,18 +568,16 @@ union l4_hdr_info {
unsigned char *hdr;
};
-/* the distance between [begin, end) in a ring buffer
- * note: there is a unuse slot between the begin and the end
- */
-static inline int ring_dist(struct hns3_enet_ring *ring, int begin, int end)
-{
- return (end - begin + ring->desc_num) % ring->desc_num;
-}
-
static inline int ring_space(struct hns3_enet_ring *ring)
{
- return ring->desc_num -
- ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1;
+ /* This smp_load_acquire() pairs with smp_store_release() in
+ * hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring.
+ */
+ int begin = smp_load_acquire(&ring->next_to_clean);
+ int end = READ_ONCE(ring->next_to_use);
+
+ return ((end >= begin) ? (ring->desc_num - end + begin) :
+ (begin - end)) - 1;
}
static inline int is_ring_empty(struct hns3_enet_ring *ring)
@@ -633,7 +622,7 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
(tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG)
-#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev)
+#define ring_to_dev(ring) ((ring)->dev)
#define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \
DMA_TO_DEVICE : DMA_FROM_DEVICE)
@@ -666,6 +655,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv);
int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
int hns3_nic_reset_all_ring(struct hnae3_handle *h);
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
+bool hns3_is_phys_func(struct pci_dev *pdev);
int hns3_clean_rx_ring(
struct hns3_enet_ring *ring, int budget,
void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *));
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 359d4731fb2d..d1588ea6132c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -29,6 +29,7 @@ static const struct hns3_stats hns3_txq_stats[] = {
HNS3_TQP_STAT("errors", tx_err_cnt),
HNS3_TQP_STAT("wake", restart_queue),
HNS3_TQP_STAT("busy", tx_busy),
+ HNS3_TQP_STAT("copy", tx_copy),
};
#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
@@ -48,6 +49,7 @@ static const struct hns3_stats hns3_rxq_stats[] = {
HNS3_TQP_STAT("l2_err", l2_err),
HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
HNS3_TQP_STAT("multicast", rx_multicast),
+ HNS3_TQP_STAT("non_reuse_pg", non_reuse_pg),
};
#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats)
@@ -483,6 +485,11 @@ static void hns3_get_stats(struct net_device *netdev,
struct hnae3_handle *h = hns3_get_handle(netdev);
u64 *p = data;
+ if (hns3_nic_resetting(netdev)) {
+ netdev_err(netdev, "dev resetting, could not get stats\n");
+ return;
+ }
+
if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) {
netdev_err(netdev, "could not get any statistics\n");
return;
@@ -599,6 +606,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops;
+ u8 module_type;
u8 media_type;
u8 link_stat;
@@ -607,7 +615,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
ops = h->ae_algo->ops;
if (ops->get_media_type)
- ops->get_media_type(h, &media_type);
+ ops->get_media_type(h, &media_type, &module_type);
else
return -EOPNOTSUPP;
@@ -617,7 +625,15 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
hns3_get_ksettings(h, cmd);
break;
case HNAE3_MEDIA_TYPE_FIBER:
- cmd->base.port = PORT_FIBRE;
+ if (module_type == HNAE3_MODULE_TYPE_CR)
+ cmd->base.port = PORT_DA;
+ else
+ cmd->base.port = PORT_FIBRE;
+
+ hns3_get_ksettings(h, cmd);
+ break;
+ case HNAE3_MEDIA_TYPE_BACKPLANE:
+ cmd->base.port = PORT_NONE;
hns3_get_ksettings(h, cmd);
break;
case HNAE3_MEDIA_TYPE_COPPER:
@@ -645,14 +661,79 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
return 0;
}
+static int hns3_check_ksettings_param(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ u8 module_type = HNAE3_MODULE_TYPE_UNKNOWN;
+ u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
+ u8 autoneg;
+ u32 speed;
+ u8 duplex;
+ int ret;
+
+ if (ops->get_ksettings_an_result) {
+ ops->get_ksettings_an_result(handle, &autoneg, &speed, &duplex);
+ if (cmd->base.autoneg == autoneg && cmd->base.speed == speed &&
+ cmd->base.duplex == duplex)
+ return 0;
+ }
+
+ if (ops->get_media_type)
+ ops->get_media_type(handle, &media_type, &module_type);
+
+ if (cmd->base.duplex != DUPLEX_FULL &&
+ media_type != HNAE3_MEDIA_TYPE_COPPER) {
+ netdev_err(netdev,
+ "only copper port supports half duplex!");
+ return -EINVAL;
+ }
+
+ if (ops->check_port_speed) {
+ ret = ops->check_port_speed(handle, cmd->base.speed);
+ if (ret) {
+ netdev_err(netdev, "unsupported speed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
static int hns3_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ int ret = 0;
+
+ /* Chip don't support this mode. */
+ if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
+ return -EINVAL;
+
/* Only support ksettings_set for netdev with phy attached for now */
if (netdev->phydev)
return phy_ethtool_ksettings_set(netdev->phydev, cmd);
- return -EOPNOTSUPP;
+ if (handle->pdev->revision == 0x20)
+ return -EOPNOTSUPP;
+
+ ret = hns3_check_ksettings_param(netdev, cmd);
+ if (ret)
+ return ret;
+
+ if (ops->set_autoneg) {
+ ret = ops->set_autoneg(handle, cmd->base.autoneg);
+ if (ret)
+ return ret;
+ }
+
+ if (ops->cfg_mac_speed_dup_h)
+ ret = ops->cfg_mac_speed_dup_h(handle, cmd->base.speed,
+ cmd->base.duplex);
+
+ return ret;
}
static u32 hns3_get_rss_key_size(struct net_device *netdev)
@@ -857,19 +938,36 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
static int hns3_nway_reset(struct net_device *netdev)
{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
struct phy_device *phy = netdev->phydev;
+ int autoneg;
if (!netif_running(netdev))
return 0;
- /* Only support nway_reset for netdev with phy attached for now */
- if (!phy)
+ if (hns3_nic_resetting(netdev)) {
+ netdev_err(netdev, "dev resetting!");
+ return -EBUSY;
+ }
+
+ if (!ops->get_autoneg || !ops->restart_autoneg)
return -EOPNOTSUPP;
- if (phy->autoneg != AUTONEG_ENABLE)
+ autoneg = ops->get_autoneg(handle);
+ if (autoneg != AUTONEG_ENABLE) {
+ netdev_err(netdev,
+ "Autoneg is off, don't support to restart it\n");
return -EINVAL;
+ }
+
+ if (phy)
+ return genphy_restart_aneg(phy);
+
+ if (handle->pdev->revision == 0x20)
+ return -EOPNOTSUPP;
- return genphy_restart_aneg(phy);
+ return ops->restart_autoneg(handle);
}
static void hns3_get_channels(struct net_device *netdev,
@@ -1101,6 +1199,95 @@ static int hns3_set_phys_id(struct net_device *netdev,
return h->ae_algo->ops->set_led_id(h, state);
}
+static u32 hns3_get_msglevel(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ return h->msg_enable;
+}
+
+static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ h->msg_enable = msg_level;
+}
+
+/* Translate local fec value into ethtool value. */
+static unsigned int loc_to_eth_fec(u8 loc_fec)
+{
+ u32 eth_fec = 0;
+
+ if (loc_fec & BIT(HNAE3_FEC_AUTO))
+ eth_fec |= ETHTOOL_FEC_AUTO;
+ if (loc_fec & BIT(HNAE3_FEC_RS))
+ eth_fec |= ETHTOOL_FEC_RS;
+ if (loc_fec & BIT(HNAE3_FEC_BASER))
+ eth_fec |= ETHTOOL_FEC_BASER;
+
+ /* if nothing is set, then FEC is off */
+ if (!eth_fec)
+ eth_fec = ETHTOOL_FEC_OFF;
+
+ return eth_fec;
+}
+
+/* Translate ethtool fec value into local value. */
+static unsigned int eth_to_loc_fec(unsigned int eth_fec)
+{
+ u32 loc_fec = 0;
+
+ if (eth_fec & ETHTOOL_FEC_OFF)
+ return loc_fec;
+
+ if (eth_fec & ETHTOOL_FEC_AUTO)
+ loc_fec |= BIT(HNAE3_FEC_AUTO);
+ if (eth_fec & ETHTOOL_FEC_RS)
+ loc_fec |= BIT(HNAE3_FEC_RS);
+ if (eth_fec & ETHTOOL_FEC_BASER)
+ loc_fec |= BIT(HNAE3_FEC_BASER);
+
+ return loc_fec;
+}
+
+static int hns3_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ u8 fec_ability;
+ u8 fec_mode;
+
+ if (handle->pdev->revision == 0x20)
+ return -EOPNOTSUPP;
+
+ if (!ops->get_fec)
+ return -EOPNOTSUPP;
+
+ ops->get_fec(handle, &fec_ability, &fec_mode);
+
+ fec->fec = loc_to_eth_fec(fec_ability);
+ fec->active_fec = loc_to_eth_fec(fec_mode);
+
+ return 0;
+}
+
+static int hns3_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ u32 fec_mode;
+
+ if (handle->pdev->revision == 0x20)
+ return -EOPNOTSUPP;
+
+ if (!ops->set_fec)
+ return -EOPNOTSUPP;
+ fec_mode = eth_to_loc_fec(fec->fec);
+ return ops->set_fec(handle, fec_mode);
+}
+
static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_drvinfo = hns3_get_drvinfo,
.get_ringparam = hns3_get_ringparam,
@@ -1121,6 +1308,8 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.get_regs_len = hns3_get_regs_len,
.get_regs = hns3_get_regs,
.get_link = hns3_get_link,
+ .get_msglevel = hns3_get_msglevel,
+ .set_msglevel = hns3_set_msglevel,
};
static const struct ethtool_ops hns3_ethtool_ops = {
@@ -1150,6 +1339,10 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_regs_len = hns3_get_regs_len,
.get_regs = hns3_get_regs,
.set_phys_id = hns3_set_phys_id,
+ .get_msglevel = hns3_get_msglevel,
+ .set_msglevel = hns3_set_msglevel,
+ .get_fecparam = hns3_get_fecparam,
+ .set_fecparam = hns3_set_fecparam,
};
void hns3_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 3a093a92eac5..fbd904e3077c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -355,7 +355,7 @@ int hclge_cmd_init(struct hclge_dev *hdev)
int ret;
spin_lock_bh(&hdev->hw.cmq.csq.lock);
- spin_lock_bh(&hdev->hw.cmq.crq.lock);
+ spin_lock(&hdev->hw.cmq.crq.lock);
hdev->hw.cmq.csq.next_to_clean = 0;
hdev->hw.cmq.csq.next_to_use = 0;
@@ -364,7 +364,7 @@ int hclge_cmd_init(struct hclge_dev *hdev)
hclge_cmd_init_regs(&hdev->hw);
- spin_unlock_bh(&hdev->hw.cmq.crq.lock);
+ spin_unlock(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
@@ -373,21 +373,26 @@ int hclge_cmd_init(struct hclge_dev *hdev)
* reset may happen when lower level reset is being processed.
*/
if ((hclge_is_reset_pending(hdev))) {
- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
- return -EBUSY;
+ ret = -EBUSY;
+ goto err_cmd_init;
}
ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) {
dev_err(&hdev->pdev->dev,
"firmware version query failed %d\n", ret);
- return ret;
+ goto err_cmd_init;
}
hdev->fw_version = version;
dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
return 0;
+
+err_cmd_init:
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+
+ return ret;
}
static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
@@ -411,7 +416,7 @@ static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
spin_unlock(&ring->lock);
}
-void hclge_destroy_cmd_queue(struct hclge_hw *hw)
+static void hclge_destroy_cmd_queue(struct hclge_hw *hw)
{
hclge_destroy_queue(&hw->cmq.csq);
hclge_destroy_queue(&hw->cmq.crq);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 3714733c96d9..d79a209b80f6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -109,7 +109,11 @@ enum hclge_opcode_type {
HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
+ HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310,
+ HCLGE_OPC_MAC_TNL_INT_EN = 0x0311,
+ HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312,
HCLGE_OPC_SERDES_LOOPBACK = 0x0315,
+ HCLGE_OPC_CONFIG_FEC_MODE = 0x031A,
/* PFC/Pause commands */
HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701,
@@ -237,8 +241,11 @@ enum hclge_opcode_type {
/* Led command */
HCLGE_OPC_LED_STATUS_CFG = 0xB000,
+ /* NCL config command */
+ HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
+
/* SFP command */
- HCLGE_OPC_SFP_GET_SPEED = 0x7104,
+ HCLGE_OPC_GET_SFP_INFO = 0x7104,
/* Error INT commands */
HCLGE_MAC_COMMON_INT_EN = 0x030E,
@@ -593,9 +600,30 @@ struct hclge_config_auto_neg_cmd {
u8 rsv[20];
};
-struct hclge_sfp_speed_cmd {
- __le32 sfp_speed;
- u32 rsv[5];
+struct hclge_sfp_info_cmd {
+ __le32 speed;
+ u8 query_type; /* 0: sfp speed, 1: active speed */
+ u8 active_fec;
+ u8 autoneg; /* autoneg state */
+ u8 autoneg_ability; /* whether support autoneg */
+ __le32 speed_ability; /* speed ability for current media */
+ __le32 module_type;
+ u8 rsv[8];
+};
+
+#define HCLGE_MAC_CFG_FEC_AUTO_EN_B 0
+#define HCLGE_MAC_CFG_FEC_MODE_S 1
+#define HCLGE_MAC_CFG_FEC_MODE_M GENMASK(3, 1)
+#define HCLGE_MAC_CFG_FEC_SET_DEF_B 0
+#define HCLGE_MAC_CFG_FEC_CLR_DEF_B 1
+
+#define HCLGE_MAC_FEC_OFF 0
+#define HCLGE_MAC_FEC_BASER 1
+#define HCLGE_MAC_FEC_RS 2
+struct hclge_config_fec_cmd {
+ u8 fec_mode;
+ u8 default_config;
+ u8 rsv[22];
};
#define HCLGE_MAC_UPLINK_PORT 0x100
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 1192cf6f2321..a9ffb57c4607 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -901,6 +901,109 @@ static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
}
}
+static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
+{
+ dev_info(&hdev->pdev->dev, "PF reset count: %d\n",
+ hdev->rst_stats.pf_rst_cnt);
+ dev_info(&hdev->pdev->dev, "FLR reset count: %d\n",
+ hdev->rst_stats.flr_rst_cnt);
+ dev_info(&hdev->pdev->dev, "CORE reset count: %d\n",
+ hdev->rst_stats.core_rst_cnt);
+ dev_info(&hdev->pdev->dev, "GLOBAL reset count: %d\n",
+ hdev->rst_stats.global_rst_cnt);
+ dev_info(&hdev->pdev->dev, "IMP reset count: %d\n",
+ hdev->rst_stats.imp_rst_cnt);
+ dev_info(&hdev->pdev->dev, "reset done count: %d\n",
+ hdev->rst_stats.reset_done_cnt);
+ dev_info(&hdev->pdev->dev, "HW reset done count: %d\n",
+ hdev->rst_stats.hw_reset_done_cnt);
+ dev_info(&hdev->pdev->dev, "reset count: %d\n",
+ hdev->rst_stats.reset_cnt);
+}
+
+/* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file
+ * @hdev: pointer to struct hclge_dev
+ * @cmd_buf: string that contains offset and length
+ */
+static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *cmd_buf)
+{
+#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
+#define HCLGE_MAX_NCL_CONFIG_LENGTH (20 + 24 * 4)
+#define HCLGE_CMD_DATA_NUM 6
+
+ struct hclge_desc desc[5];
+ u32 byte_offset;
+ int bd_num = 5;
+ int offset;
+ int length;
+ int data0;
+ int ret;
+ int i;
+ int j;
+
+ ret = sscanf(cmd_buf, "%x %x", &offset, &length);
+ if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
+ length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) {
+ dev_err(&hdev->pdev->dev, "Invalid offset or length.\n");
+ return;
+ }
+ if (offset < 0 || length <= 0) {
+ dev_err(&hdev->pdev->dev, "Non-positive offset or length.\n");
+ return;
+ }
+
+ dev_info(&hdev->pdev->dev, "offset | data\n");
+
+ while (length > 0) {
+ data0 = offset;
+ if (length >= HCLGE_MAX_NCL_CONFIG_LENGTH)
+ data0 |= HCLGE_MAX_NCL_CONFIG_LENGTH << 16;
+ else
+ data0 |= length << 16;
+ ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
+ HCLGE_OPC_QUERY_NCL_CONFIG);
+ if (ret)
+ return;
+
+ byte_offset = offset;
+ for (i = 0; i < bd_num; i++) {
+ for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
+ if (i == 0 && j == 0)
+ continue;
+
+ dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
+ byte_offset,
+ le32_to_cpu(desc[i].data[j]));
+ byte_offset += sizeof(u32);
+ length -= sizeof(u32);
+ if (length <= 0)
+ return;
+ }
+ }
+ offset += HCLGE_MAX_NCL_CONFIG_LENGTH;
+ }
+}
+
+/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
+ * @hdev: pointer to struct hclge_dev
+ */
+static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
+{
+#define HCLGE_BILLION_NANO_SECONDS 1000000000
+
+ struct hclge_mac_tnl_stats stats;
+ unsigned long rem_nsec;
+
+ dev_info(&hdev->pdev->dev, "Recently generated mac tnl interruption:\n");
+
+ while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
+ rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
+ dev_info(&hdev->pdev->dev, "[%07lu.%03lu]status = 0x%x\n",
+ (unsigned long)stats.time, rem_nsec / 1000,
+ stats.status);
+ }
+}
+
int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -924,6 +1027,13 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
hclge_dbg_dump_mng_table(hdev);
} else if (strncmp(cmd_buf, "dump reg", 8) == 0) {
hclge_dbg_dump_reg_cmd(hdev, cmd_buf);
+ } else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
+ hclge_dbg_dump_rst_info(hdev);
+ } else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) {
+ hclge_dbg_dump_ncl_config(hdev,
+ &cmd_buf[sizeof("dump ncl_config")]);
+ } else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) {
+ hclge_dbg_dump_mac_tnl_status(hdev);
} else {
dev_info(&hdev->pdev->dev, "unknown command\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 1f52d11f77b5..4ac80634c984 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -4,287 +4,468 @@
#include "hclge_err.h"
static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
- { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" },
- { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" },
- { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" },
- { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err" },
- { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err" },
- { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err" },
- { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err" },
- { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err" },
- { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err" },
+ { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
- { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" },
- { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" },
- { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" },
- { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err" },
- { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err" },
- { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err" },
- { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err" },
- { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err" },
- { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" },
- { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" },
- { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" },
- { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" },
- { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" },
- { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" },
- { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" },
- { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" },
+ { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
- { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" },
- { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" },
- { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" },
- { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err" },
- { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err" },
- { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err" },
+ { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = {
- { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err" },
- { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err" },
+ { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_igu_int[] = {
- { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" },
- { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" },
+ { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err",
+ .reset_level = HNAE3_CORE_RESET },
+ { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err",
+ .reset_level = HNAE3_CORE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
- { .int_msk = BIT(0), .msg = "rx_buf_overflow" },
- { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" },
- { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" },
- { .int_msk = BIT(3), .msg = "tx_buf_overflow" },
- { .int_msk = BIT(4), .msg = "tx_buf_underrun" },
- { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow" },
+ { .int_msk = BIT(0), .msg = "rx_buf_overflow",
+ .reset_level = HNAE3_CORE_RESET },
+ { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow",
+ .reset_level = HNAE3_CORE_RESET },
+ { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow",
+ .reset_level = HNAE3_CORE_RESET },
+ { .int_msk = BIT(3), .msg = "tx_buf_overflow",
+ .reset_level = HNAE3_CORE_RESET },
+ { .int_msk = BIT(4), .msg = "tx_buf_underrun",
+ .reset_level = HNAE3_CORE_RESET },
+ { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow",
+ .reset_level = HNAE3_CORE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ncsi_err_int[] = {
- { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" },
+ { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = {
- { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" },
- { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" },
- { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" },
- { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" },
- { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" },
- { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" },
- { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err" },
- { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" },
- { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" },
- { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" },
- { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err" },
- { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err" },
- { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err" },
- { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err" },
- { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err" },
- { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err" },
- { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err" },
- { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err" },
- { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err" },
- { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err" },
- { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err" },
- { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err" },
- { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err" },
- { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err" },
- { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err" },
- { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err" },
- { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err" },
- { .int_msk = BIT(27),
- .msg = "flow_director_ad_mem0_ecc_mbit_err" },
- { .int_msk = BIT(28),
- .msg = "flow_director_ad_mem1_ecc_mbit_err" },
- { .int_msk = BIT(29),
- .msg = "rx_vlan_tag_memory_ecc_mbit_err" },
- { .int_msk = BIT(30),
- .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err" },
+ { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(27), .msg = "flow_director_ad_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(28), .msg = "flow_director_ad_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(29), .msg = "rx_vlan_tag_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(30), .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = {
- { .int_msk = BIT(0), .msg = "tx_vlan_tag_err" },
- { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" },
+ { .int_msk = BIT(0), .msg = "tx_vlan_tag_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err",
+ .reset_level = HNAE3_NONE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = {
- { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" },
- { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" },
- { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" },
- { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err" },
- { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err" },
- { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err" },
+ { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_tm_sch_rint[] = {
- { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err" },
- { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err" },
- { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err" },
- { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err" },
- { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err" },
- { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err" },
- { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err" },
- { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err" },
- { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err" },
- { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err" },
- { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err" },
- { .int_msk = BIT(12),
- .msg = "tm_sch_port_shap_offset_fifo_wr_err" },
- { .int_msk = BIT(13),
- .msg = "tm_sch_port_shap_offset_fifo_rd_err" },
- { .int_msk = BIT(14),
- .msg = "tm_sch_pg_pshap_offset_fifo_wr_err" },
- { .int_msk = BIT(15),
- .msg = "tm_sch_pg_pshap_offset_fifo_rd_err" },
- { .int_msk = BIT(16),
- .msg = "tm_sch_pg_cshap_offset_fifo_wr_err" },
- { .int_msk = BIT(17),
- .msg = "tm_sch_pg_cshap_offset_fifo_rd_err" },
- { .int_msk = BIT(18),
- .msg = "tm_sch_pri_pshap_offset_fifo_wr_err" },
- { .int_msk = BIT(19),
- .msg = "tm_sch_pri_pshap_offset_fifo_rd_err" },
- { .int_msk = BIT(20),
- .msg = "tm_sch_pri_cshap_offset_fifo_wr_err" },
- { .int_msk = BIT(21),
- .msg = "tm_sch_pri_cshap_offset_fifo_rd_err" },
- { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err" },
- { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err" },
- { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err" },
- { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err" },
- { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err" },
- { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err" },
- { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err" },
- { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err" },
- { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err" },
- { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err" },
+ { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(12), .msg = "tm_sch_port_shap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(13), .msg = "tm_sch_port_shap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(14), .msg = "tm_sch_pg_pshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(15), .msg = "tm_sch_pg_pshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(16), .msg = "tm_sch_pg_cshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(17), .msg = "tm_sch_pg_cshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(18), .msg = "tm_sch_pri_pshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(19), .msg = "tm_sch_pri_pshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(20), .msg = "tm_sch_pri_cshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(21), .msg = "tm_sch_pri_cshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_qcn_fifo_rint[] = {
- { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err" },
- { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err" },
- { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err" },
- { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err" },
- { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err" },
- { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err" },
- { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err" },
- { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err" },
- { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err" },
- { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err" },
- { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err" },
- { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err" },
- { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err" },
- { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err" },
- { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err" },
- { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err" },
- { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err" },
- { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err" },
+ { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_qcn_ecc_rint[] = {
- { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err" },
- { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err" },
- { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err" },
- { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err" },
- { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err" },
- { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err" },
- { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err" },
- { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err" },
- { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err" },
- { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err" },
- { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err" },
+ { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = {
- { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err" },
- { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err" },
- { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err" },
- { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err" },
- { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err" },
- { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err" },
- { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err" },
- { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err" },
- { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err" },
- { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err" },
- { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err" },
- { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err" },
- { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err" },
- { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err" },
+ { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = {
- { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err" },
- { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err" },
- { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err" },
- { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err" },
- { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err" },
- { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err" },
- { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err" },
- { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err" },
- { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err" },
- { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err" },
- { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err" },
- { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err" },
- { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err" },
- { .int_msk = BIT(26), .msg = "rd_bus_err" },
- { .int_msk = BIT(27), .msg = "wr_bus_err" },
- { .int_msk = BIT(28), .msg = "reg_search_miss" },
- { .int_msk = BIT(29), .msg = "rx_q_search_miss" },
- { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect" },
- { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl" },
+ { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(26), .msg = "rd_bus_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(27), .msg = "wr_bus_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(28), .msg = "reg_search_miss",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(29), .msg = "rx_q_search_miss",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = {
- { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err" },
- { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err" },
- { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err" },
- { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err" },
+ { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err",
+ .reset_level = HNAE3_CORE_RESET },
+ { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err",
+ .reset_level = HNAE3_CORE_RESET },
+ { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err",
+ .reset_level = HNAE3_CORE_RESET },
+ { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err",
+ .reset_level = HNAE3_CORE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = {
- { .int_msk = BIT(0), .msg = "over_8bd_no_fe" },
- { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err" },
- { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err" },
- { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison" },
- { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison" },
- { .int_msk = BIT(5), .msg = "buf_wait_timeout" },
+ { .int_msk = BIT(0), .msg = "over_8bd_no_fe",
+ .reset_level = HNAE3_FUNC_RESET },
+ { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison",
+ .reset_level = HNAE3_FUNC_RESET },
+ { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison",
+ .reset_level = HNAE3_FUNC_RESET },
+ { .int_msk = BIT(5), .msg = "buf_wait_timeout",
+ .reset_level = HNAE3_NONE_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ssu_com_err_int[] = {
- { .int_msk = BIT(0), .msg = "buf_sum_err" },
- { .int_msk = BIT(1), .msg = "ppp_mb_num_err" },
- { .int_msk = BIT(2), .msg = "ppp_mbid_err" },
- { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err" },
- { .int_msk = BIT(4), .msg = "ppp_rlt_host_err" },
- { .int_msk = BIT(5), .msg = "cks_edit_position_err" },
- { .int_msk = BIT(6), .msg = "cks_edit_condition_err" },
- { .int_msk = BIT(7), .msg = "vlan_edit_condition_err" },
- { .int_msk = BIT(8), .msg = "vlan_num_ot_err" },
- { .int_msk = BIT(9), .msg = "vlan_num_in_err" },
+ { .int_msk = BIT(0), .msg = "buf_sum_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(1), .msg = "ppp_mb_num_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(2), .msg = "ppp_mbid_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "ppp_rlt_host_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "cks_edit_position_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(6), .msg = "cks_edit_condition_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(7), .msg = "vlan_edit_condition_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(8), .msg = "vlan_num_ot_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(9), .msg = "vlan_num_in_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
#define HCLGE_SSU_MEM_ECC_ERR(x) \
- { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err" }
+ { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err", \
+ .reset_level = HNAE3_GLOBAL_RESET }
static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
HCLGE_SSU_MEM_ECC_ERR(0),
@@ -323,62 +504,106 @@ static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
};
static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = {
- { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" },
- { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port" },
- { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port" },
- { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port" },
- { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port" },
- { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port" },
- { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port" },
- { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port" },
- { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port" },
- { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port" },
- { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port" },
- { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port" },
- { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port" },
+ { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = {
- { .int_msk = BIT(0), .msg = "ig_mac_inf_int" },
- { .int_msk = BIT(1), .msg = "ig_host_inf_int" },
- { .int_msk = BIT(2), .msg = "ig_roc_buf_int" },
- { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int" },
- { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int" },
- { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int" },
- { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int" },
- { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int" },
- { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int" },
- { .int_msk = BIT(9), .msg = "qm_eof_fifo_int" },
- { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int" },
- { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int" },
- { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int" },
- { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int" },
- { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int" },
- { .int_msk = BIT(15), .msg = "host_cmd_fifo_int" },
- { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int" },
- { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int" },
- { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int" },
- { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int" },
- { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int" },
- { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int" },
- { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int" },
- { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int" },
+ { .int_msk = BIT(0), .msg = "ig_mac_inf_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(1), .msg = "ig_host_inf_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "ig_roc_buf_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(9), .msg = "qm_eof_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(15), .msg = "host_cmd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = {
- { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg" },
- { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg" },
- { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg" },
- { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg" },
+ { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = {
- { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" },
- { .int_msk = BIT(9), .msg = "low_water_line_err_port" },
- { .int_msk = BIT(10), .msg = "hi_water_line_err_port" },
+ { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(9), .msg = "low_water_line_err_port",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(10), .msg = "hi_water_line_err_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
{ /* sentinel */ }
};
@@ -406,16 +631,29 @@ static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
{ /* sentinel */ }
};
-static void hclge_log_error(struct device *dev, char *reg,
- const struct hclge_hw_error *err,
- u32 err_sts)
+static enum hnae3_reset_type hclge_log_error(struct device *dev, char *reg,
+ const struct hclge_hw_error *err,
+ u32 err_sts)
{
+ enum hnae3_reset_type reset_level = HNAE3_FUNC_RESET;
+ bool need_reset = false;
+
while (err->msg) {
- if (err->int_msk & err_sts)
+ if (err->int_msk & err_sts) {
dev_warn(dev, "%s %s found [error status=0x%x]\n",
reg, err->msg, err_sts);
+ if (err->reset_level != HNAE3_NONE_RESET &&
+ err->reset_level >= reset_level) {
+ reset_level = err->reset_level;
+ need_reset = true;
+ }
+ }
err++;
}
+ if (need_reset)
+ return reset_level;
+ else
+ return HNAE3_NONE_RESET;
}
/* hclge_cmd_query_error: read the error information
@@ -454,6 +692,16 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev,
return ret;
}
+static int hclge_clear_mac_tnl_int(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_MAC_TNL_INT, false);
+ desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_CLR);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en)
{
struct device *dev = &hdev->pdev->dev;
@@ -673,6 +921,21 @@ static int hclge_config_mac_err_int(struct hclge_dev *hdev, bool en)
return ret;
}
+int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en)
+{
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_TNL_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN);
+ else
+ desc.data[0] = 0;
+
+ desc.data[1] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN_MASK);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
bool en)
{
@@ -826,6 +1089,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
int num)
{
struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+ enum hnae3_reset_type reset_level;
struct device *dev = &hdev->pdev->dev;
__le32 *desc_data;
u32 status;
@@ -845,78 +1109,94 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
/* log HNS common errors */
status = le32_to_cpu(desc[0].data[0]);
if (status) {
- hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
- &hclge_imp_tcm_ecc_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ reset_level = hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
+ &hclge_imp_tcm_ecc_int[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
status = le32_to_cpu(desc[0].data[1]);
if (status) {
- hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
- &hclge_cmdq_nic_mem_ecc_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ reset_level = hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
+ &hclge_cmdq_nic_mem_ecc_int[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
if ((le32_to_cpu(desc[0].data[2])) & BIT(0)) {
dev_warn(dev, "imp_rd_data_poison_err found\n");
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_NONE_RESET);
}
status = le32_to_cpu(desc[0].data[3]);
if (status) {
- hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
- &hclge_tqp_int_ecc_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ reset_level = hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
+ &hclge_tqp_int_ecc_int[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
status = le32_to_cpu(desc[0].data[4]);
if (status) {
- hclge_log_error(dev, "MSIX_ECC_INT_STS",
- &hclge_msix_sram_ecc_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ reset_level = hclge_log_error(dev, "MSIX_ECC_INT_STS",
+ &hclge_msix_sram_ecc_int[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* log SSU(Storage Switch Unit) errors */
desc_data = (__le32 *)&desc[2];
status = le32_to_cpu(*(desc_data + 2));
if (status) {
- hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
- &hclge_ssu_mem_ecc_err_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ reset_level = hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
+ &hclge_ssu_mem_ecc_err_int[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
if (status) {
dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
}
status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK;
if (status) {
- hclge_log_error(dev, "SSU_COMMON_ERR_INT",
- &hclge_ssu_com_err_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ reset_level = hclge_log_error(dev, "SSU_COMMON_ERR_INT",
+ &hclge_ssu_com_err_int[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* log IGU(Ingress Unit) errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK;
- if (status)
- hclge_log_error(dev, "IGU_INT_STS",
- &hclge_igu_int[0], status);
+ if (status) {
+ reset_level = hclge_log_error(dev, "IGU_INT_STS",
+ &hclge_igu_int[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
/* log PPP(Programmable Packet Process) errors */
desc_data = (__le32 *)&desc[4];
status = le32_to_cpu(*(desc_data + 1));
- if (status)
- hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
- &hclge_ppp_mpf_abnormal_int_st1[0], status);
+ if (status) {
+ reset_level =
+ hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
+ &hclge_ppp_mpf_abnormal_int_st1[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK;
- if (status)
- hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
- &hclge_ppp_mpf_abnormal_int_st3[0], status);
+ if (status) {
+ reset_level =
+ hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
+ &hclge_ppp_mpf_abnormal_int_st3[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
/* log PPU(RCB) errors */
desc_data = (__le32 *)&desc[5];
@@ -924,55 +1204,60 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
if (status) {
dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n",
"rpu_rx_pkt_ecc_mbit_err");
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
}
status = le32_to_cpu(*(desc_data + 2));
if (status) {
- hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
- &hclge_ppu_mpf_abnormal_int_st2[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ reset_level =
+ hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
+ &hclge_ppu_mpf_abnormal_int_st2[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK;
if (status) {
- hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
- &hclge_ppu_mpf_abnormal_int_st3[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ reset_level =
+ hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
+ &hclge_ppu_mpf_abnormal_int_st3[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* log TM(Traffic Manager) errors */
desc_data = (__le32 *)&desc[6];
status = le32_to_cpu(*desc_data);
if (status) {
- hclge_log_error(dev, "TM_SCH_RINT",
- &hclge_tm_sch_rint[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ reset_level = hclge_log_error(dev, "TM_SCH_RINT",
+ &hclge_tm_sch_rint[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* log QCN(Quantized Congestion Control) errors */
desc_data = (__le32 *)&desc[7];
status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK;
if (status) {
- hclge_log_error(dev, "QCN_FIFO_RINT",
- &hclge_qcn_fifo_rint[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ reset_level = hclge_log_error(dev, "QCN_FIFO_RINT",
+ &hclge_qcn_fifo_rint[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK;
if (status) {
- hclge_log_error(dev, "QCN_ECC_RINT",
- &hclge_qcn_ecc_rint[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ reset_level = hclge_log_error(dev, "QCN_ECC_RINT",
+ &hclge_qcn_ecc_rint[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* log NCSI errors */
desc_data = (__le32 *)&desc[9];
status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK;
if (status) {
- hclge_log_error(dev, "NCSI_ECC_INT_RPT",
- &hclge_ncsi_err_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
+ reset_level = hclge_log_error(dev, "NCSI_ECC_INT_RPT",
+ &hclge_ncsi_err_int[0], status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* clear all main PF RAS errors */
@@ -1000,6 +1285,7 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
{
struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
struct device *dev = &hdev->pdev->dev;
+ enum hnae3_reset_type reset_level;
__le32 *desc_data;
u32 status;
int ret;
@@ -1018,38 +1304,47 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
/* log SSU(Storage Switch Unit) errors */
status = le32_to_cpu(desc[0].data[0]);
if (status) {
- hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
- &hclge_ssu_port_based_err_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
+ &hclge_ssu_port_based_err_int[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
status = le32_to_cpu(desc[0].data[1]);
if (status) {
- hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
- &hclge_ssu_fifo_overflow_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ reset_level = hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
+ &hclge_ssu_fifo_overflow_int[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
status = le32_to_cpu(desc[0].data[2]);
if (status) {
- hclge_log_error(dev, "SSU_ETS_TCG_INT",
- &hclge_ssu_ets_tcg_int[0], status);
- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ reset_level = hclge_log_error(dev, "SSU_ETS_TCG_INT",
+ &hclge_ssu_ets_tcg_int[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
}
/* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */
desc_data = (__le32 *)&desc[1];
status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK;
- if (status)
- hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
- &hclge_igu_egu_tnl_int[0], status);
+ if (status) {
+ reset_level = hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
+ &hclge_igu_egu_tnl_int[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
/* log PPU(RCB) errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK;
- if (status)
- hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
- &hclge_ppu_pf_abnormal_int[0], status);
+ if (status) {
+ reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
+ &hclge_ppu_pf_abnormal_int[0],
+ status);
+ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
/* clear all PF RAS errors */
hclge_cmd_reuse_desc(&desc[0], false);
@@ -1341,16 +1636,15 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
unsigned long *reset_requests)
{
+ struct hclge_mac_tnl_stats mac_tnl_stats;
struct device *dev = &hdev->pdev->dev;
u32 mpf_bd_num, pf_bd_num, bd_num;
+ enum hnae3_reset_type reset_level;
struct hclge_desc desc_bd;
struct hclge_desc *desc;
__le32 *desc_data;
- int ret = 0;
u32 status;
-
- /* set default handling */
- set_bit(HNAE3_FUNC_RESET, reset_requests);
+ int ret;
/* query the number of bds for the MSIx int status */
hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_MSIX_INT_STS_BD_NUM,
@@ -1359,8 +1653,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
if (ret) {
dev_err(dev, "fail(%d) to query msix int status bd num\n",
ret);
- /* reset everything for now */
- set_bit(HNAE3_GLOBAL_RESET, reset_requests);
return ret;
}
@@ -1381,8 +1673,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
if (ret) {
dev_err(dev, "query all mpf msix int cmd failed (%d)\n",
ret);
- /* reset everything for now */
- set_bit(HNAE3_GLOBAL_RESET, reset_requests);
goto msi_error;
}
@@ -1390,9 +1680,10 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
desc_data = (__le32 *)&desc[1];
status = le32_to_cpu(*desc_data);
if (status) {
- hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
- &hclge_mac_afifo_tnl_int[0], status);
- set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ reset_level = hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
+ &hclge_mac_afifo_tnl_int[0],
+ status);
+ set_bit(reset_level, reset_requests);
}
/* log PPU(RCB) MPF errors */
@@ -1400,9 +1691,11 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
status = le32_to_cpu(*(desc_data + 2)) &
HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
if (status) {
- hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
- &hclge_ppu_mpf_abnormal_int_st2[0], status);
- set_bit(HNAE3_CORE_RESET, reset_requests);
+ reset_level =
+ hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
+ &hclge_ppu_mpf_abnormal_int_st2[0],
+ status);
+ set_bit(reset_level, reset_requests);
}
/* clear all main PF MSIx errors */
@@ -1413,8 +1706,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
if (ret) {
dev_err(dev, "clear all mpf msix int cmd failed (%d)\n",
ret);
- /* reset everything for now */
- set_bit(HNAE3_GLOBAL_RESET, reset_requests);
goto msi_error;
}
@@ -1428,32 +1719,37 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
if (ret) {
dev_err(dev, "query all pf msix int cmd failed (%d)\n",
ret);
- /* reset everything for now */
- set_bit(HNAE3_GLOBAL_RESET, reset_requests);
goto msi_error;
}
/* log SSU PF errors */
status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK;
if (status) {
- hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
- &hclge_ssu_port_based_pf_int[0], status);
- set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
+ &hclge_ssu_port_based_pf_int[0],
+ status);
+ set_bit(reset_level, reset_requests);
}
/* read and log PPP PF errors */
desc_data = (__le32 *)&desc[2];
status = le32_to_cpu(*desc_data);
- if (status)
- hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
- &hclge_ppp_pf_abnormal_int[0], status);
+ if (status) {
+ reset_level = hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
+ &hclge_ppp_pf_abnormal_int[0],
+ status);
+ set_bit(reset_level, reset_requests);
+ }
/* log PPU(RCB) PF errors */
desc_data = (__le32 *)&desc[3];
status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK;
- if (status)
- hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
- &hclge_ppu_pf_abnormal_int[0], status);
+ if (status) {
+ reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
+ &hclge_ppu_pf_abnormal_int[0],
+ status);
+ set_bit(reset_level, reset_requests);
+ }
/* clear all PF MSIx errors */
hclge_cmd_reuse_desc(&desc[0], false);
@@ -1463,8 +1759,31 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
if (ret) {
dev_err(dev, "clear all pf msix int cmd failed (%d)\n",
ret);
- /* reset everything for now */
- set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ }
+
+ /* query and clear mac tnl interruptions */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_MAC_TNL_INT,
+ true);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 1);
+ if (ret) {
+ dev_err(dev, "query mac tnl int cmd failed (%d)\n", ret);
+ goto msi_error;
+ }
+
+ status = le32_to_cpu(desc->data[0]);
+ if (status) {
+ /* When mac tnl interrupt occurs, we record current time and
+ * register status here in a fifo, then clear the status. So
+ * that if link status changes suddenly at some time, we can
+ * query them by debugfs.
+ */
+ mac_tnl_stats.time = local_clock();
+ mac_tnl_stats.status = status;
+ kfifo_put(&hdev->mac_tnl_log, mac_tnl_stats);
+ ret = hclge_clear_mac_tnl_int(hdev);
+ if (ret)
+ dev_err(dev, "clear mac tnl int failed (%d)\n", ret);
+ set_bit(HNAE3_NONE_RESET, reset_requests);
}
msi_error:
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index fc068280d391..9645590c9294 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -47,6 +47,9 @@
#define HCLGE_NCSI_ERR_INT_TYPE 0x9
#define HCLGE_MAC_COMMON_ERR_INT_EN 0x107FF
#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK 0x107FF
+#define HCLGE_MAC_TNL_INT_EN GENMASK(7, 0)
+#define HCLGE_MAC_TNL_INT_EN_MASK GENMASK(7, 0)
+#define HCLGE_MAC_TNL_INT_CLR GENMASK(7, 0)
#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0)
#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0)
#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0)
@@ -112,8 +115,10 @@ struct hclge_hw_blk {
struct hclge_hw_error {
u32 int_msk;
const char *msg;
+ enum hnae3_reset_type reset_level;
};
+int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en);
int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state);
pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev);
int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index deda606c51e7..d3b1f8cb1155 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -12,6 +12,7 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/if_vlan.h>
+#include <linux/crash_dump.h>
#include <net/rtnetlink.h>
#include "hclge_cmd.h"
#include "hclge_dcb.h"
@@ -31,6 +32,7 @@
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
+static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
u16 *allocated_size, bool is_alloc);
@@ -697,6 +699,16 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
p = hclge_tqps_get_stats(handle, p);
}
+static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
+ u64 *rx_cnt)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
+ *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
+}
+
static int hclge_parse_func_status(struct hclge_dev *hdev,
struct hclge_func_status_cmd *status)
{
@@ -833,33 +845,189 @@ static int hclge_parse_speed(int speed_cmd, int *speed)
return 0;
}
-static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
- u8 speed_ability)
+static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
{
- unsigned long *supported = hdev->hw.mac.supported;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 speed_ability = hdev->hw.mac.speed_ability;
+ u32 speed_bit = 0;
- if (speed_ability & HCLGE_SUPPORT_1G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
- supported);
+ switch (speed) {
+ case HCLGE_MAC_SPEED_10M:
+ speed_bit = HCLGE_SUPPORT_10M_BIT;
+ break;
+ case HCLGE_MAC_SPEED_100M:
+ speed_bit = HCLGE_SUPPORT_100M_BIT;
+ break;
+ case HCLGE_MAC_SPEED_1G:
+ speed_bit = HCLGE_SUPPORT_1G_BIT;
+ break;
+ case HCLGE_MAC_SPEED_10G:
+ speed_bit = HCLGE_SUPPORT_10G_BIT;
+ break;
+ case HCLGE_MAC_SPEED_25G:
+ speed_bit = HCLGE_SUPPORT_25G_BIT;
+ break;
+ case HCLGE_MAC_SPEED_40G:
+ speed_bit = HCLGE_SUPPORT_40G_BIT;
+ break;
+ case HCLGE_MAC_SPEED_50G:
+ speed_bit = HCLGE_SUPPORT_50G_BIT;
+ break;
+ case HCLGE_MAC_SPEED_100G:
+ speed_bit = HCLGE_SUPPORT_100G_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (speed_bit & speed_ability)
+ return 0;
+
+ return -EINVAL;
+}
+static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
+{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
- supported);
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ mac->supported);
+}
+static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
+{
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ mac->supported);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- supported);
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ mac->supported);
+}
+static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
+{
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ mac->supported);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
- supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ mac->supported);
+}
+static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
+{
+ if (speed_ability & HCLGE_SUPPORT_1G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ mac->supported);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
- supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ mac->supported);
+}
- linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
+static void hclge_convert_setting_fec(struct hclge_mac *mac)
+{
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
+
+ switch (mac->speed) {
+ case HCLGE_MAC_SPEED_10G:
+ case HCLGE_MAC_SPEED_40G:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ mac->supported);
+ mac->fec_ability =
+ BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
+ break;
+ case HCLGE_MAC_SPEED_25G:
+ case HCLGE_MAC_SPEED_50G:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ mac->supported);
+ mac->fec_ability =
+ BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
+ BIT(HNAE3_FEC_AUTO);
+ break;
+ case HCLGE_MAC_SPEED_100G:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
+ mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
+ break;
+ default:
+ mac->fec_ability = 0;
+ break;
+ }
+}
+
+static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
+ u8 speed_ability)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ if (speed_ability & HCLGE_SUPPORT_1G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ mac->supported);
+
+ hclge_convert_setting_sr(mac, speed_ability);
+ hclge_convert_setting_lr(mac, speed_ability);
+ hclge_convert_setting_cr(mac, speed_ability);
+ if (hdev->pdev->revision >= 0x21)
+ hclge_convert_setting_fec(mac);
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
+}
+
+static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
+ u8 speed_ability)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ hclge_convert_setting_kr(mac, speed_ability);
+ if (hdev->pdev->revision >= 0x21)
+ hclge_convert_setting_fec(mac);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
}
static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
@@ -900,8 +1068,9 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
hclge_parse_fiber_link_mode(hdev, speed_ability);
else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
hclge_parse_copper_link_mode(hdev, speed_ability);
+ else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
+ hclge_parse_backplane_link_mode(hdev, speed_ability);
}
-
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
{
struct hclge_cfg_param_cmd *req;
@@ -1015,6 +1184,23 @@ static int hclge_get_cap(struct hclge_dev *hdev)
return ret;
}
+static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
+{
+#define HCLGE_MIN_TX_DESC 64
+#define HCLGE_MIN_RX_DESC 64
+
+ if (!is_kdump_kernel())
+ return;
+
+ dev_info(&hdev->pdev->dev,
+ "Running kdump kernel. Using minimal resources\n");
+
+ /* minimal queue pairs equals to the number of vports */
+ hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
+ hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
+ hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
+}
+
static int hclge_configure(struct hclge_dev *hdev)
{
struct hclge_cfg cfg;
@@ -1074,6 +1260,8 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
+ hclge_init_kdump_kernel_config(hdev);
+
return ret;
}
@@ -1337,6 +1525,8 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
vport->back = hdev;
vport->vport_id = i;
vport->mps = HCLGE_MAC_DEFAULT_FRAME;
+ vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
+ vport->rxvlan_cfg.rx_vlan_offload_en = true;
INIT_LIST_HEAD(&vport->vlan_list);
INIT_LIST_HEAD(&vport->uc_mac_list);
INIT_LIST_HEAD(&vport->mc_mac_list);
@@ -1399,7 +1589,7 @@ static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
return ret;
}
-static int hclge_get_tc_num(struct hclge_dev *hdev)
+static u32 hclge_get_tc_num(struct hclge_dev *hdev)
{
int i, cnt = 0;
@@ -1409,17 +1599,6 @@ static int hclge_get_tc_num(struct hclge_dev *hdev)
return cnt;
}
-static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
-{
- int i, cnt = 0;
-
- for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
- if (hdev->hw_tc_map & BIT(i) &&
- hdev->tm_info.hw_pfc_map & BIT(i))
- cnt++;
- return cnt;
-}
-
/* Get the number of pfc enabled TCs, which have private buffer */
static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
@@ -1483,14 +1662,12 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc,
u32 rx_all)
{
- u32 shared_buf_min, shared_buf_tc, shared_std;
- int tc_num, pfc_enable_num;
+ u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
+ u32 tc_num = hclge_get_tc_num(hdev);
u32 shared_buf, aligned_mps;
u32 rx_priv;
int i;
- tc_num = hclge_get_tc_num(hdev);
- pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
if (hnae3_dev_dcb_supported(hdev))
@@ -1499,9 +1676,7 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
+ hdev->dv_buf_size;
- shared_buf_tc = pfc_enable_num * aligned_mps +
- (tc_num - pfc_enable_num) * aligned_mps / 2 +
- aligned_mps;
+ shared_buf_tc = tc_num * aligned_mps + aligned_mps;
shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
HCLGE_BUF_SIZE_UNIT);
@@ -1518,19 +1693,26 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
} else {
buf_alloc->s_buf.self.high = aligned_mps +
HCLGE_NON_DCB_ADDITIONAL_BUF;
- buf_alloc->s_buf.self.low =
- roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
+ buf_alloc->s_buf.self.low = aligned_mps;
+ }
+
+ if (hnae3_dev_dcb_supported(hdev)) {
+ if (tc_num)
+ hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
+ else
+ hi_thrd = shared_buf - hdev->dv_buf_size;
+
+ hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
+ hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
+ lo_thrd = hi_thrd - aligned_mps / 2;
+ } else {
+ hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
+ lo_thrd = aligned_mps;
}
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- if ((hdev->hw_tc_map & BIT(i)) &&
- (hdev->tm_info.hw_pfc_map & BIT(i))) {
- buf_alloc->s_buf.tc_thrd[i].low = aligned_mps;
- buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps;
- } else {
- buf_alloc->s_buf.tc_thrd[i].low = 0;
- buf_alloc->s_buf.tc_thrd[i].high = aligned_mps;
- }
+ buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
+ buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
}
return true;
@@ -2095,6 +2277,16 @@ static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ if (!hdev->hw.mac.support_autoneg) {
+ if (enable) {
+ dev_err(&hdev->pdev->dev,
+ "autoneg is not supported by current port\n");
+ return -EOPNOTSUPP;
+ } else {
+ return 0;
+ }
+ }
+
return hclge_set_autoneg_en(hdev, enable);
}
@@ -2110,6 +2302,78 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
return hdev->hw.mac.autoneg;
}
+static int hclge_restart_autoneg(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
+
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+ return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+}
+
+static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
+{
+ struct hclge_config_fec_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
+
+ req = (struct hclge_config_fec_cmd *)desc.data;
+ if (fec_mode & BIT(HNAE3_FEC_AUTO))
+ hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
+ if (fec_mode & BIT(HNAE3_FEC_RS))
+ hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
+ HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
+ if (fec_mode & BIT(HNAE3_FEC_BASER))
+ hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
+ HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
+
+ return ret;
+}
+
+static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_mac *mac = &hdev->hw.mac;
+ int ret;
+
+ if (fec_mode && !(mac->fec_ability & fec_mode)) {
+ dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
+ return -EINVAL;
+ }
+
+ ret = hclge_set_fec_hw(hdev, fec_mode);
+ if (ret)
+ return ret;
+
+ mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
+ return 0;
+}
+
+static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
+ u8 *fec_mode)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ if (fec_ability)
+ *fec_ability = mac->fec_ability;
+ if (fec_mode)
+ *fec_mode = mac->fec_mode;
+}
+
static int hclge_mac_init(struct hclge_dev *hdev)
{
struct hclge_mac *mac = &hdev->hw.mac;
@@ -2127,6 +2391,15 @@ static int hclge_mac_init(struct hclge_dev *hdev)
mac->link = 0;
+ if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
+ ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Fec mode init fail, ret = %d\n", ret);
+ return ret;
+ }
+ }
+
ret = hclge_set_mac_mtu(hdev, hdev->mps);
if (ret) {
dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
@@ -2143,7 +2416,8 @@ static int hclge_mac_init(struct hclge_dev *hdev)
static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{
- if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
+ if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
+ !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
schedule_work(&hdev->mbx_service_task);
}
@@ -2222,6 +2496,7 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
handle = &hdev->vport[i].nic;
client->ops->link_status_change(handle, state);
+ hclge_config_mac_tnl_int(hdev, state);
rhandle = &hdev->vport[i].roce;
if (rclient && rclient->ops->link_status_change)
rclient->ops->link_status_change(rhandle,
@@ -2231,14 +2506,35 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
}
}
+static void hclge_update_port_capability(struct hclge_mac *mac)
+{
+ /* firmware can not identify back plane type, the media type
+ * read from configuration can help deal it
+ */
+ if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
+ mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
+ mac->module_type = HNAE3_MODULE_TYPE_KR;
+ else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
+ mac->module_type = HNAE3_MODULE_TYPE_TP;
+
+ if (mac->support_autoneg == true) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
+ linkmode_copy(mac->advertising, mac->supported);
+ } else {
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ mac->supported);
+ linkmode_zero(mac->advertising);
+ }
+}
+
static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
{
- struct hclge_sfp_speed_cmd *resp = NULL;
+ struct hclge_sfp_info_cmd *resp = NULL;
struct hclge_desc desc;
int ret;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true);
- resp = (struct hclge_sfp_speed_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
+ resp = (struct hclge_sfp_info_cmd *)desc.data;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret == -EOPNOTSUPP) {
dev_warn(&hdev->pdev->dev,
@@ -2249,28 +2545,67 @@ static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
return ret;
}
- *speed = resp->sfp_speed;
+ *speed = le32_to_cpu(resp->speed);
return 0;
}
-static int hclge_update_speed_duplex(struct hclge_dev *hdev)
+static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
{
- struct hclge_mac mac = hdev->hw.mac;
- int speed;
+ struct hclge_sfp_info_cmd *resp;
+ struct hclge_desc desc;
int ret;
- /* get the speed from SFP cmd when phy
- * doesn't exit.
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
+ resp = (struct hclge_sfp_info_cmd *)desc.data;
+
+ resp->query_type = QUERY_ACTIVE_SPEED;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret == -EOPNOTSUPP) {
+ dev_warn(&hdev->pdev->dev,
+ "IMP does not support get SFP info %d\n", ret);
+ return ret;
+ } else if (ret) {
+ dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
+ return ret;
+ }
+
+ mac->speed = le32_to_cpu(resp->speed);
+ /* if resp->speed_ability is 0, it means it's an old version
+ * firmware, do not update these params
*/
- if (mac.phydev)
+ if (resp->speed_ability) {
+ mac->module_type = le32_to_cpu(resp->module_type);
+ mac->speed_ability = le32_to_cpu(resp->speed_ability);
+ mac->autoneg = resp->autoneg;
+ mac->support_autoneg = resp->autoneg_ability;
+ } else {
+ mac->speed_type = QUERY_SFP_SPEED;
+ }
+
+ return 0;
+}
+
+static int hclge_update_port_info(struct hclge_dev *hdev)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+ int speed = HCLGE_MAC_SPEED_UNKNOWN;
+ int ret;
+
+ /* get the port info from SFP cmd if not copper port */
+ if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
return 0;
- /* if IMP does not support get SFP/qSFP speed, return directly */
+ /* if IMP does not support get SFP/qSFP info, return directly */
if (!hdev->support_sfp_query)
return 0;
- ret = hclge_get_sfp_speed(hdev, &speed);
+ if (hdev->pdev->revision >= 0x21)
+ ret = hclge_get_sfp_info(hdev, mac);
+ else
+ ret = hclge_get_sfp_speed(hdev, &speed);
+
if (ret == -EOPNOTSUPP) {
hdev->support_sfp_query = false;
return ret;
@@ -2278,19 +2613,20 @@ static int hclge_update_speed_duplex(struct hclge_dev *hdev)
return ret;
}
- if (speed == HCLGE_MAC_SPEED_UNKNOWN)
- return 0; /* do nothing if no SFP */
-
- /* must config full duplex for SFP */
- return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
-}
-
-static int hclge_update_speed_duplex_h(struct hnae3_handle *handle)
-{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
+ if (hdev->pdev->revision >= 0x21) {
+ if (mac->speed_type == QUERY_ACTIVE_SPEED) {
+ hclge_update_port_capability(mac);
+ return 0;
+ }
+ return hclge_cfg_mac_speed_dup(hdev, mac->speed,
+ HCLGE_MAC_FULL);
+ } else {
+ if (speed == HCLGE_MAC_SPEED_UNKNOWN)
+ return 0; /* do nothing if no SFP */
- return hclge_update_speed_duplex(hdev);
+ /* must config full duplex for SFP */
+ return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
+ }
}
static int hclge_get_status(struct hnae3_handle *handle)
@@ -2344,6 +2680,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
*clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+ hdev->rst_stats.imp_rst_cnt++;
return HCLGE_VECTOR0_EVENT_RST;
}
@@ -2352,6 +2689,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
+ hdev->rst_stats.global_rst_cnt++;
return HCLGE_VECTOR0_EVENT_RST;
}
@@ -2360,12 +2698,16 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
+ hdev->rst_stats.core_rst_cnt++;
return HCLGE_VECTOR0_EVENT_RST;
}
/* check for vector0 msix event source */
- if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK)
+ if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
+ dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
+ msix_src_reg);
return HCLGE_VECTOR0_EVENT_ERR;
+ }
/* check for vector0 mailbox(=CMDQ RX) event source */
if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
@@ -2374,6 +2716,9 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
return HCLGE_VECTOR0_EVENT_MBX;
}
+ /* print other vector0 event source */
+ dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
+ cmdq_src_reg, msix_src_reg);
return HCLGE_VECTOR0_EVENT_OTHER;
}
@@ -2657,7 +3002,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
return ret;
}
- if (!reset)
+ if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
continue;
/* Inform VF to process the reset.
@@ -2694,9 +3039,18 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
static void hclge_do_reset(struct hclge_dev *hdev)
{
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
struct pci_dev *pdev = hdev->pdev;
u32 val;
+ if (hclge_get_hw_reset_stat(handle)) {
+ dev_info(&pdev->dev, "Hardware reset not finish\n");
+ dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
+ hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
+ hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
+ return;
+ }
+
switch (hdev->reset_type) {
case HNAE3_GLOBAL_RESET:
val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
@@ -2775,6 +3129,10 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
clear_bit(HNAE3_FLR_RESET, addr);
}
+ if (hdev->reset_type != HNAE3_NONE_RESET &&
+ rst_level < hdev->reset_type)
+ return HNAE3_NONE_RESET;
+
return rst_level;
}
@@ -2844,6 +3202,7 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
* after hclge_cmd_init is called.
*/
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ hdev->rst_stats.pf_rst_cnt++;
break;
case HNAE3_FLR_RESET:
/* There is no mechanism for PF to know if VF has stopped IO
@@ -2852,6 +3211,7 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
msleep(100);
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ hdev->rst_stats.flr_rst_cnt++;
break;
case HNAE3_IMP_RESET:
reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
@@ -2932,7 +3292,7 @@ static void hclge_reset(struct hclge_dev *hdev)
* know if device is undergoing reset
*/
ae_dev->reset_type = hdev->reset_type;
- hdev->reset_count++;
+ hdev->rst_stats.reset_cnt++;
/* perform reset of the stack & ae device for a client */
ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
if (ret)
@@ -2958,6 +3318,8 @@ static void hclge_reset(struct hclge_dev *hdev)
goto err_reset;
}
+ hdev->rst_stats.hw_reset_done_cnt++;
+
ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
if (ret)
goto err_reset;
@@ -3001,7 +3363,9 @@ static void hclge_reset(struct hclge_dev *hdev)
hdev->last_reset_time = jiffies;
hdev->reset_fail_cnt = 0;
+ hdev->rst_stats.reset_done_cnt++;
ae_dev->reset_type = HNAE3_NONE_RESET;
+ del_timer(&hdev->reset_timer);
return;
@@ -3154,7 +3518,7 @@ static void hclge_service_task(struct work_struct *work)
hdev->hw_stats.stats_timer = 0;
}
- hclge_update_speed_duplex(hdev);
+ hclge_update_port_info(hdev);
hclge_update_link_status(hdev);
hclge_update_vport_alive(hdev);
hclge_service_complete(hdev);
@@ -5194,7 +5558,7 @@ static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- return hdev->reset_count;
+ return hdev->rst_stats.hw_reset_done_cnt;
}
static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
@@ -5282,8 +5646,8 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
#define HCLGE_SERDES_RETRY_MS 10
#define HCLGE_SERDES_RETRY_NUM 100
-#define HCLGE_MAC_LINK_STATUS_MS 20
-#define HCLGE_MAC_LINK_STATUS_NUM 10
+#define HCLGE_MAC_LINK_STATUS_MS 10
+#define HCLGE_MAC_LINK_STATUS_NUM 100
#define HCLGE_MAC_LINK_STATUS_DOWN 0
#define HCLGE_MAC_LINK_STATUS_UP 1
@@ -5942,8 +6306,11 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
}
/* check if we just hit the duplicate */
- if (!ret)
- ret = -EINVAL;
+ if (!ret) {
+ dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
+ vport->vport_id, addr);
+ return 0;
+ }
dev_err(&hdev->pdev->dev,
"PF failed to add unicast entry(%pM) in the MAC table\n",
@@ -6293,7 +6660,8 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
return -EINVAL;
}
- if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
+ if ((!is_first || is_kdump_kernel()) &&
+ hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
dev_warn(&hdev->pdev->dev,
"remove old uc mac address fail.\n");
@@ -6543,30 +6911,6 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
return ret;
}
-int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
- u16 vlan_id, bool is_kill)
-{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
-
- return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
- 0, is_kill);
-}
-
-static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
- u16 vlan, u8 qos, __be16 proto)
-{
- struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
-
- if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
- return -EINVAL;
- if (proto != htons(ETH_P_8021Q))
- return -EPROTONOSUPPORT;
-
- return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
-}
-
static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
{
struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
@@ -6640,6 +6984,52 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
return status;
}
+static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
+ u16 port_base_vlan_state,
+ u16 vlan_tag)
+{
+ int ret;
+
+ if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ vport->txvlan_cfg.accept_tag1 = true;
+ vport->txvlan_cfg.insert_tag1_en = false;
+ vport->txvlan_cfg.default_tag1 = 0;
+ } else {
+ vport->txvlan_cfg.accept_tag1 = false;
+ vport->txvlan_cfg.insert_tag1_en = true;
+ vport->txvlan_cfg.default_tag1 = vlan_tag;
+ }
+
+ vport->txvlan_cfg.accept_untag1 = true;
+
+ /* accept_tag2 and accept_untag2 are not supported on
+ * pdev revision(0x20), new revision support them,
+ * this two fields can not be configured by user.
+ */
+ vport->txvlan_cfg.accept_tag2 = true;
+ vport->txvlan_cfg.accept_untag2 = true;
+ vport->txvlan_cfg.insert_tag2_en = false;
+ vport->txvlan_cfg.default_tag2 = 0;
+
+ if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ vport->rxvlan_cfg.strip_tag1_en = false;
+ vport->rxvlan_cfg.strip_tag2_en =
+ vport->rxvlan_cfg.rx_vlan_offload_en;
+ } else {
+ vport->rxvlan_cfg.strip_tag1_en =
+ vport->rxvlan_cfg.rx_vlan_offload_en;
+ vport->rxvlan_cfg.strip_tag2_en = true;
+ }
+ vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+ vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+
+ ret = hclge_set_vlan_tx_offload_cfg(vport);
+ if (ret)
+ return ret;
+
+ return hclge_set_vlan_rx_offload_cfg(vport);
+}
+
static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
{
struct hclge_rx_vlan_type_cfg_cmd *rx_req;
@@ -6730,34 +7120,14 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
return ret;
for (i = 0; i < hdev->num_alloc_vport; i++) {
- vport = &hdev->vport[i];
- vport->txvlan_cfg.accept_tag1 = true;
- vport->txvlan_cfg.accept_untag1 = true;
-
- /* accept_tag2 and accept_untag2 are not supported on
- * pdev revision(0x20), new revision support them. The
- * value of this two fields will not return error when driver
- * send command to fireware in revision(0x20).
- * This two fields can not configured by user.
- */
- vport->txvlan_cfg.accept_tag2 = true;
- vport->txvlan_cfg.accept_untag2 = true;
+ u16 vlan_tag;
- vport->txvlan_cfg.insert_tag1_en = false;
- vport->txvlan_cfg.insert_tag2_en = false;
- vport->txvlan_cfg.default_tag1 = 0;
- vport->txvlan_cfg.default_tag2 = 0;
-
- ret = hclge_set_vlan_tx_offload_cfg(vport);
- if (ret)
- return ret;
-
- vport->rxvlan_cfg.strip_tag1_en = false;
- vport->rxvlan_cfg.strip_tag2_en = true;
- vport->rxvlan_cfg.vlan1_vlan_prionly = false;
- vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+ vport = &hdev->vport[i];
+ vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
- ret = hclge_set_vlan_rx_offload_cfg(vport);
+ ret = hclge_vlan_offload_cfg(vport,
+ vport->port_base_vlan_cfg.state,
+ vlan_tag);
if (ret)
return ret;
}
@@ -6765,7 +7135,8 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
}
-void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id)
+static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+ bool writen_to_tbl)
{
struct hclge_vport_vlan_cfg *vlan;
@@ -6777,14 +7148,38 @@ void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id)
if (!vlan)
return;
- vlan->hd_tbl_status = true;
+ vlan->hd_tbl_status = writen_to_tbl;
vlan->vlan_id = vlan_id;
list_add_tail(&vlan->node, &vport->vlan_list);
}
-void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
- bool is_write_tbl)
+static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
+{
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ if (!vlan->hd_tbl_status) {
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan->vlan_id, 0, false);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "restore vport vlan list failed, ret=%d\n",
+ ret);
+ return ret;
+ }
+ }
+ vlan->hd_tbl_status = true;
+ }
+
+ return 0;
+}
+
+static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+ bool is_write_tbl)
{
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
@@ -6847,14 +7242,203 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
- vport->rxvlan_cfg.strip_tag1_en = false;
- vport->rxvlan_cfg.strip_tag2_en = enable;
+ if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ vport->rxvlan_cfg.strip_tag1_en = false;
+ vport->rxvlan_cfg.strip_tag2_en = enable;
+ } else {
+ vport->rxvlan_cfg.strip_tag1_en = enable;
+ vport->rxvlan_cfg.strip_tag2_en = true;
+ }
vport->rxvlan_cfg.vlan1_vlan_prionly = false;
vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+ vport->rxvlan_cfg.rx_vlan_offload_en = enable;
return hclge_set_vlan_rx_offload_cfg(vport);
}
+static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
+ u16 port_base_vlan_state,
+ struct hclge_vlan_info *new_info,
+ struct hclge_vlan_info *old_info)
+{
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
+ hclge_rm_vport_all_vlan_table(vport, false);
+ return hclge_set_vlan_filter_hw(hdev,
+ htons(new_info->vlan_proto),
+ vport->vport_id,
+ new_info->vlan_tag,
+ new_info->qos, false);
+ }
+
+ ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
+ vport->vport_id, old_info->vlan_tag,
+ old_info->qos, true);
+ if (ret)
+ return ret;
+
+ return hclge_add_vport_all_vlan_table(vport);
+}
+
+int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
+ struct hclge_vlan_info *vlan_info)
+{
+ struct hnae3_handle *nic = &vport->nic;
+ struct hclge_vlan_info *old_vlan_info;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
+
+ ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
+ if (ret)
+ return ret;
+
+ if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
+ /* add new VLAN tag */
+ ret = hclge_set_vlan_filter_hw(hdev,
+ htons(vlan_info->vlan_proto),
+ vport->vport_id,
+ vlan_info->vlan_tag,
+ vlan_info->qos, false);
+ if (ret)
+ return ret;
+
+ /* remove old VLAN tag */
+ ret = hclge_set_vlan_filter_hw(hdev,
+ htons(old_vlan_info->vlan_proto),
+ vport->vport_id,
+ old_vlan_info->vlan_tag,
+ old_vlan_info->qos, true);
+ if (ret)
+ return ret;
+
+ goto update;
+ }
+
+ ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
+ old_vlan_info);
+ if (ret)
+ return ret;
+
+ /* update state only when disable/enable port based VLAN */
+ vport->port_base_vlan_cfg.state = state;
+ if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
+ else
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+
+update:
+ vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
+ vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
+ vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
+
+ return 0;
+}
+
+static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
+ enum hnae3_port_base_vlan_state state,
+ u16 vlan)
+{
+ if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ if (!vlan)
+ return HNAE3_PORT_BASE_VLAN_NOCHANGE;
+ else
+ return HNAE3_PORT_BASE_VLAN_ENABLE;
+ } else {
+ if (!vlan)
+ return HNAE3_PORT_BASE_VLAN_DISABLE;
+ else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
+ return HNAE3_PORT_BASE_VLAN_NOCHANGE;
+ else
+ return HNAE3_PORT_BASE_VLAN_MODIFY;
+ }
+}
+
+static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
+ u16 vlan, u8 qos, __be16 proto)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_vlan_info vlan_info;
+ u16 state;
+ int ret;
+
+ if (hdev->pdev->revision == 0x20)
+ return -EOPNOTSUPP;
+
+ /* qos is a 3 bits value, so can not be bigger than 7 */
+ if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
+ return -EINVAL;
+ if (proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ vport = &hdev->vport[vfid];
+ state = hclge_get_port_base_vlan_state(vport,
+ vport->port_base_vlan_cfg.state,
+ vlan);
+ if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
+ return 0;
+
+ vlan_info.vlan_tag = vlan;
+ vlan_info.qos = qos;
+ vlan_info.vlan_proto = ntohs(proto);
+
+ /* update port based VLAN for PF */
+ if (!vfid) {
+ hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+
+ return ret;
+ }
+
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
+ return hclge_update_port_base_vlan_cfg(vport, state,
+ &vlan_info);
+ } else {
+ ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
+ (u8)vfid, state,
+ vlan, qos,
+ ntohs(proto));
+ return ret;
+ }
+}
+
+int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+ u16 vlan_id, bool is_kill)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ bool writen_to_tbl = false;
+ int ret = 0;
+
+ /* when port based VLAN enabled, we use port based VLAN as the VLAN
+ * filter entry. In this case, we don't update VLAN filter table
+ * when user add new VLAN or remove exist VLAN, just update the vport
+ * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
+ * table until port based VLAN disabled
+ */
+ if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
+ vlan_id, 0, is_kill);
+ writen_to_tbl = true;
+ }
+
+ if (ret)
+ return ret;
+
+ if (is_kill)
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
+ else
+ hclge_add_vport_vlan_table(vport, vlan_id,
+ writen_to_tbl);
+
+ return 0;
+}
+
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
{
struct hclge_config_max_frm_size_cmd *req;
@@ -7199,13 +7783,13 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
if (!fc_autoneg)
return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
- /* Only support flow control negotiation for netdev with
- * phy attached for now.
- */
- if (!phydev)
+ if (phydev)
+ return phy_start_aneg(phydev);
+
+ if (hdev->pdev->revision == 0x20)
return -EOPNOTSUPP;
- return phy_start_aneg(phydev);
+ return hclge_restart_autoneg(handle);
}
static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
@@ -7222,13 +7806,17 @@ static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
*auto_neg = hdev->hw.mac.autoneg;
}
-static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type)
+static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
+ u8 *module_type)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
if (media_type)
*media_type = hdev->hw.mac.media_type;
+
+ if (module_type)
+ *module_type = hdev->hw.mac.module_type;
}
static void hclge_get_mdix_mode(struct hnae3_handle *handle,
@@ -7280,6 +7868,32 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
*tp_mdix = ETH_TP_MDI;
}
+static void hclge_info_show(struct hclge_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+
+ dev_info(dev, "PF info begin:\n");
+
+ dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
+ dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
+ dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
+ dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
+ dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
+ dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
+ dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
+ dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
+ dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
+ dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
+ dev_info(dev, "This is %s PF\n",
+ hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
+ dev_info(dev, "DCB %s\n",
+ hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
+ dev_info(dev, "MQPRIO %s\n",
+ hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
+
+ dev_info(dev, "PF info end.\n");
+}
+
static int hclge_init_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
@@ -7301,6 +7915,9 @@ static int hclge_init_client_instance(struct hnae3_client *client,
hnae3_set_client_init_flag(client, ae_dev, 1);
+ if (netif_msg_drv(&hdev->vport->nic))
+ hclge_info_show(hdev);
+
if (hdev->roce_client &&
hnae3_dev_roce_supported(hdev)) {
struct hnae3_client *rc = hdev->roce_client;
@@ -7660,6 +8277,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
+ INIT_KFIFO(hdev->mac_tnl_log);
+
hclge_dcb_ops_set(hdev);
timer_setup(&hdev->service_timer, hclge_service_timer, 0);
@@ -7708,7 +8327,7 @@ static void hclge_reset_vport_state(struct hclge_dev *hdev)
int i;
for (i = 0; i < hdev->num_alloc_vport; i++) {
- hclge_vport_start(vport);
+ hclge_vport_stop(vport);
vport++;
}
}
@@ -7813,6 +8432,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_enable_vector(&hdev->misc_vector, false);
synchronize_irq(hdev->misc_vector.vector_irq);
+ hclge_config_mac_tnl_int(hdev, false);
hclge_hw_error_set_state(hdev, false);
hclge_cmd_uninit(hdev);
hclge_misc_irq_uninit(hdev);
@@ -8234,9 +8854,11 @@ static const struct hnae3_ae_ops hclge_ops = {
.client_stop = hclge_client_stop,
.get_status = hclge_get_status,
.get_ksettings_an_result = hclge_get_ksettings_an_result,
- .update_speed_duplex_h = hclge_update_speed_duplex_h,
.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
.get_media_type = hclge_get_media_type,
+ .check_port_speed = hclge_check_port_speed,
+ .get_fec = hclge_get_fec,
+ .set_fec = hclge_set_fec,
.get_rss_key_size = hclge_get_rss_key_size,
.get_rss_indir_size = hclge_get_rss_indir_size,
.get_rss = hclge_get_rss,
@@ -8253,11 +8875,13 @@ static const struct hnae3_ae_ops hclge_ops = {
.rm_mc_addr = hclge_rm_mc_addr,
.set_autoneg = hclge_set_autoneg,
.get_autoneg = hclge_get_autoneg,
+ .restart_autoneg = hclge_restart_autoneg,
.get_pauseparam = hclge_get_pauseparam,
.set_pauseparam = hclge_set_pauseparam,
.set_mtu = hclge_set_mtu,
.reset_queue = hclge_reset_tqp,
.get_stats = hclge_get_stats,
+ .get_mac_pause_stats = hclge_get_mac_pause_stat,
.update_stats = hclge_update_stats,
.get_strings = hclge_get_strings,
.get_sset_count = hclge_get_sset_count,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index b57ac4beb313..dd06b11187b0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -7,6 +7,7 @@
#include <linux/types.h>
#include <linux/phy.h>
#include <linux/if_vlan.h>
+#include <linux/kfifo.h>
#include "hclge_cmd.h"
#include "hnae3.h"
@@ -188,6 +189,8 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_SUPPORT_25G_BIT BIT(2)
#define HCLGE_SUPPORT_50G_BIT BIT(3)
#define HCLGE_SUPPORT_100G_BIT BIT(4)
+/* to be compatible with exsit board */
+#define HCLGE_SUPPORT_40G_BIT BIT(5)
#define HCLGE_SUPPORT_100M_BIT BIT(6)
#define HCLGE_SUPPORT_10M_BIT BIT(7)
#define HCLGE_SUPPORT_GE \
@@ -235,15 +238,25 @@ enum HCLGE_MAC_DUPLEX {
HCLGE_MAC_FULL
};
+#define QUERY_SFP_SPEED 0
+#define QUERY_ACTIVE_SPEED 1
+
struct hclge_mac {
u8 phy_addr;
u8 flag;
- u8 media_type;
+ u8 media_type; /* port media type, e.g. fibre/copper/backplane */
u8 mac_addr[ETH_ALEN];
u8 autoneg;
u8 duplex;
+ u8 support_autoneg;
+ u8 speed_type; /* 0: sfp speed, 1: active speed */
u32 speed;
- int link; /* store the link status of mac & phy (if phy exit)*/
+ u32 speed_ability; /* speed ability supported by current media */
+ u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */
+ u32 fec_mode; /* active fec mode */
+ u32 user_fec_mode;
+ u32 fec_ability;
+ int link; /* store the link status of mac & phy (if phy exit) */
struct phy_device *phydev;
struct mii_bus *mdio_bus;
phy_interface_t phy_if;
@@ -649,6 +662,23 @@ struct hclge_vport_vlan_cfg {
u16 vlan_id;
};
+struct hclge_rst_stats {
+ u32 reset_done_cnt; /* the number of reset has completed */
+ u32 hw_reset_done_cnt; /* the number of HW reset has completed */
+ u32 pf_rst_cnt; /* the number of PF reset */
+ u32 flr_rst_cnt; /* the number of FLR */
+ u32 core_rst_cnt; /* the number of CORE reset */
+ u32 global_rst_cnt; /* the number of GLOBAL */
+ u32 imp_rst_cnt; /* the number of IMP reset */
+ u32 reset_cnt; /* the number of reset */
+};
+
+/* time and register status when mac tunnel interruption occur */
+struct hclge_mac_tnl_stats {
+ u64 time;
+ u32 status;
+};
+
/* For each bit of TCAM entry, it uses a pair of 'x' and
* 'y' to indicate which value to match, like below:
* ----------------------------------
@@ -675,6 +705,7 @@ struct hclge_vport_vlan_cfg {
(y) = (_k_ ^ ~_v_) & (_k_); \
} while (0)
+#define HCLGE_MAC_TNL_LOG_SIZE 8
#define HCLGE_VPORT_NUM 256
struct hclge_dev {
struct pci_dev *pdev;
@@ -691,7 +722,7 @@ struct hclge_dev {
unsigned long default_reset_request;
unsigned long reset_request; /* reset has been requested */
unsigned long reset_pending; /* client rst is pending to be served */
- unsigned long reset_count; /* the number of reset has been done */
+ struct hclge_rst_stats rst_stats;
u32 reset_fail_cnt;
u32 fw_version;
u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
@@ -791,6 +822,9 @@ struct hclge_dev {
struct mutex umv_mutex; /* protect share_umv_size */
struct mutex vport_cfg_mutex; /* Protect stored vf table */
+
+ DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
+ HCLGE_MAC_TNL_LOG_SIZE);
};
/* VPort level vlan tag configuration for TX direction */
@@ -807,10 +841,11 @@ struct hclge_tx_vtag_cfg {
/* VPort level vlan tag configuration for RX direction */
struct hclge_rx_vtag_cfg {
- bool strip_tag1_en; /* Whether strip inner vlan tag */
- bool strip_tag2_en; /* Whether strip outer vlan tag */
- bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */
- bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */
+ u8 rx_vlan_offload_en; /* Whether enable rx vlan offload */
+ u8 strip_tag1_en; /* Whether strip inner vlan tag */
+ u8 strip_tag2_en; /* Whether strip outer vlan tag */
+ u8 vlan1_vlan_prionly; /* Inner VLAN Tag up to descriptor Enable */
+ u8 vlan2_vlan_prionly; /* Outer VLAN Tag up to descriptor Enable */
};
struct hclge_rss_tuple_cfg {
@@ -829,6 +864,17 @@ enum HCLGE_VPORT_STATE {
HCLGE_VPORT_STATE_MAX
};
+struct hclge_vlan_info {
+ u16 vlan_proto; /* so far support 802.1Q only */
+ u16 qos;
+ u16 vlan_tag;
+};
+
+struct hclge_port_base_vlan_config {
+ u16 state;
+ struct hclge_vlan_info vlan_info;
+};
+
struct hclge_vport {
u16 alloc_tqps; /* Allocated Tx/Rx queues */
@@ -842,9 +888,10 @@ struct hclge_vport {
u16 alloc_rss_size;
u16 qs_offset;
- u16 bw_limit; /* VSI BW Limit (0 = disabled) */
+ u32 bw_limit; /* VSI BW Limit (0 = disabled) */
u8 dwrr;
+ struct hclge_port_base_vlan_config port_base_vlan_cfg;
struct hclge_tx_vtag_cfg txvlan_cfg;
struct hclge_rx_vtag_cfg rxvlan_cfg;
@@ -924,9 +971,11 @@ void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
enum HCLGE_MAC_ADDR_TYPE mac_type);
void hclge_uninit_vport_mac_table(struct hclge_dev *hdev);
-void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id);
-void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
- bool is_write_tbl);
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev);
+int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
+ struct hclge_vlan_info *vlan_info);
+int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
+ u16 state, u16 vlan_tag, u16 qos,
+ u16 vlan_proto);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 306a23e486de..0e04e63f2a94 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -212,8 +212,7 @@ static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
}
static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- bool gen_resp)
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
struct hclge_dev *hdev = vport->back;
@@ -249,7 +248,7 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
return -EIO;
}
- if (gen_resp)
+ if (mbx_req->mbx_need_resp & HCLGE_MBX_NEED_RESP_BIT)
hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
return 0;
@@ -289,9 +288,25 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
return 0;
}
+int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
+ u16 state, u16 vlan_tag, u16 qos,
+ u16 vlan_proto)
+{
+#define MSG_DATA_SIZE 8
+
+ u8 msg_data[MSG_DATA_SIZE];
+
+ memcpy(&msg_data[0], &state, sizeof(u16));
+ memcpy(&msg_data[2], &vlan_proto, sizeof(u16));
+ memcpy(&msg_data[4], &qos, sizeof(u16));
+ memcpy(&msg_data[6], &vlan_tag, sizeof(u16));
+
+ return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
+ HLCGE_MBX_PUSH_VLAN_INFO, vfid);
+}
+
static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
- bool gen_resp)
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
int status = 0;
@@ -305,19 +320,27 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
vlan, is_kill);
- if (!status)
- is_kill ? hclge_rm_vport_vlan_table(vport, vlan, false)
- : hclge_add_vport_vlan_table(vport, vlan);
} else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) {
struct hnae3_handle *handle = &vport->nic;
bool en = mbx_req->msg[2] ? true : false;
status = hclge_en_hw_strip_rxvtag(handle, en);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_PORT_BASE_VLAN_CFG) {
+ struct hclge_vlan_info *vlan_info;
+ u16 *state;
+
+ state = (u16 *)&mbx_req->msg[2];
+ vlan_info = (struct hclge_vlan_info *)&mbx_req->msg[4];
+ status = hclge_update_port_base_vlan_cfg(vport, *state,
+ vlan_info);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) {
+ u8 state;
+
+ state = vport->port_base_vlan_cfg.state;
+ status = hclge_gen_resp_to_vf(vport, mbx_req, 0, &state,
+ sizeof(u8));
}
- if (gen_resp)
- status = hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
-
return status;
}
@@ -385,24 +408,33 @@ static int hclge_get_vf_queue_depth(struct hclge_vport *vport,
HCLGE_TQPS_DEPTH_INFO_LEN);
}
+static int hclge_get_vf_media_type(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ struct hclge_dev *hdev = vport->back;
+ u8 resp_data[2];
+
+ resp_data[0] = hdev->hw.mac.media_type;
+ resp_data[1] = hdev->hw.mac.module_type;
+ return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data,
+ sizeof(resp_data));
+}
+
static int hclge_get_link_info(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
struct hclge_dev *hdev = vport->back;
u16 link_status;
- u8 msg_data[10];
- u16 media_type;
+ u8 msg_data[8];
u8 dest_vfid;
u16 duplex;
/* mac.link can only be 0 or 1 */
link_status = (u16)hdev->hw.mac.link;
duplex = hdev->hw.mac.duplex;
- media_type = hdev->hw.mac.media_type;
memcpy(&msg_data[0], &link_status, sizeof(u16));
memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
memcpy(&msg_data[6], &duplex, sizeof(u16));
- memcpy(&msg_data[8], &media_type, sizeof(u16));
dest_vfid = mbx_req->mbx_src_vfid;
/* send this requested info to VF */
@@ -565,7 +597,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
ret);
break;
case HCLGE_MBX_SET_UNICAST:
- ret = hclge_set_vf_uc_mac_addr(vport, req, true);
+ ret = hclge_set_vf_uc_mac_addr(vport, req);
if (ret)
dev_err(&hdev->pdev->dev,
"PF fail(%d) to set VF UC MAC Addr\n",
@@ -579,7 +611,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
ret);
break;
case HCLGE_MBX_SET_VLAN:
- ret = hclge_set_vf_vlan_cfg(vport, req, false);
+ ret = hclge_set_vf_vlan_cfg(vport, req);
if (ret)
dev_err(&hdev->pdev->dev,
"PF failed(%d) to config VF's VLAN\n",
@@ -662,6 +694,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
hclge_rm_vport_all_vlan_table(vport, true);
mutex_unlock(&hdev->vport_cfg_mutex);
break;
+ case HCLGE_MBX_GET_MEDIA_TYPE:
+ ret = hclge_get_vf_media_type(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to media type for VF\n",
+ ret);
+ break;
default:
dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %d\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 48eda2c6fdae..1e8134892d77 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -3,6 +3,7 @@
#include <linux/etherdevice.h>
#include <linux/kernel.h>
+#include <linux/marvell_phy.h>
#include "hclge_cmd.h"
#include "hclge_main.h"
@@ -121,12 +122,18 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
int hclge_mac_mdio_config(struct hclge_dev *hdev)
{
+#define PHY_INEXISTENT 255
+
struct hclge_mac *mac = &hdev->hw.mac;
struct phy_device *phydev;
struct mii_bus *mdio_bus;
int ret;
- if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) {
+ if (hdev->hw.mac.phy_addr == PHY_INEXISTENT) {
+ dev_info(&hdev->pdev->dev,
+ "no phy device is connected to mdio bus\n");
+ return 0;
+ } else if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) {
dev_err(&hdev->pdev->dev, "phy_addr(%d) is too large.\n",
hdev->hw.mac.phy_addr);
return -EINVAL;
@@ -203,6 +210,8 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle)
linkmode_clear_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
+ phydev->dev_flags |= MARVELL_PHY_LED0_LINK_LED1_ACTIVE;
+
ret = phy_connect_direct(netdev, phydev,
hclge_mac_adjust_link,
PHY_INTERFACE_MODE_SGMII);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index aafc69f4bfdd..a7bbb6d3091a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -1331,8 +1331,11 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
ret = hclge_pfc_setup_hw(hdev);
if (init && ret == -EOPNOTSUPP)
dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
- else
+ else if (ret) {
+ dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
+ ret);
return ret;
+ }
return hclge_tm_bp_setup(hdev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 9441b453d38d..71f356fc2446 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -27,26 +27,39 @@ static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
return ring->desc_num - used - 1;
}
+static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring,
+ int head)
+{
+ int ntu = ring->next_to_use;
+ int ntc = ring->next_to_clean;
+
+ if (ntu > ntc)
+ return head >= ntc && head <= ntu;
+
+ return head >= ntc || head <= ntu;
+}
+
static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
{
+ struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw);
struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
- u16 ntc = csq->next_to_clean;
- struct hclgevf_desc *desc;
int clean = 0;
u32 head;
- desc = &csq->desc[ntc];
head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
- while (head != ntc) {
- memset(desc, 0, sizeof(*desc));
- ntc++;
- if (ntc == csq->desc_num)
- ntc = 0;
- desc = &csq->desc[ntc];
- clean++;
+ rmb(); /* Make sure head is ready before touch any data */
+
+ if (!hclgevf_is_valid_csq_clean_head(csq, head)) {
+ dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
+ csq->next_to_use, csq->next_to_clean);
+ dev_warn(&hdev->pdev->dev,
+ "Disabling any further commands to IMP firmware\n");
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ return -EIO;
}
- csq->next_to_clean = ntc;
+ clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
+ csq->next_to_clean = head;
return clean;
}
@@ -321,13 +334,13 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
int ret;
spin_lock_bh(&hdev->hw.cmq.csq.lock);
- spin_lock_bh(&hdev->hw.cmq.crq.lock);
+ spin_lock(&hdev->hw.cmq.crq.lock);
/* initialize the pointers of async rx queue of mailbox */
hdev->arq.hdev = hdev;
hdev->arq.head = 0;
hdev->arq.tail = 0;
- hdev->arq.count = 0;
+ atomic_set(&hdev->arq.count, 0);
hdev->hw.cmq.csq.next_to_clean = 0;
hdev->hw.cmq.csq.next_to_use = 0;
hdev->hw.cmq.crq.next_to_clean = 0;
@@ -335,7 +348,7 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
hclgevf_cmd_init_regs(&hdev->hw);
- spin_unlock_bh(&hdev->hw.cmq.crq.lock);
+ spin_unlock(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
@@ -344,8 +357,8 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
* reset may happen when lower level reset is being processed.
*/
if (hclgevf_is_reset_pending(hdev)) {
- set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
- return -EBUSY;
+ ret = -EBUSY;
+ goto err_cmd_init;
}
/* get firmware version */
@@ -353,13 +366,18 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to query firmware version\n", ret);
- return ret;
+ goto err_cmd_init;
}
hdev->fw_version = version;
dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
return 0;
+
+err_cmd_init:
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+
+ return ret;
}
static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 8bc28e6f465f..5d53467ee2d2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -245,6 +245,27 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
return 0;
}
+static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ u8 resp_msg;
+ int ret;
+
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_GET_PORT_BASE_VLAN_STATE,
+ NULL, 0, true, &resp_msg, sizeof(u8));
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get port based vlan state failed %d",
+ ret);
+ return ret;
+ }
+
+ nic->port_base_vlan_state = resp_msg;
+
+ return 0;
+}
+
static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
{
#define HCLGEVF_TQPS_RSS_INFO_LEN 6
@@ -307,6 +328,26 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
return qid_in_pf;
}
+static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
+{
+ u8 resp_msg[2];
+ int ret;
+
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0,
+ true, resp_msg, sizeof(resp_msg));
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get the pf port media type failed %d",
+ ret);
+ return ret;
+ }
+
+ hdev->hw.mac.media_type = resp_msg[0];
+ hdev->hw.mac.module_type = resp_msg[1];
+
+ return 0;
+}
+
static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
{
struct hclgevf_tqp *tqp;
@@ -404,7 +445,7 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
}
}
-void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
+static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
{
#define HCLGEVF_ADVERTISING 0
#define HCLGEVF_SUPPORTED 1
@@ -1375,9 +1416,11 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
case HNAE3_VF_FUNC_RESET:
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
0, true, NULL, sizeof(u8));
+ hdev->rst_stats.vf_func_rst_cnt++;
break;
case HNAE3_FLR_RESET:
set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ hdev->rst_stats.flr_rst_cnt++;
break;
default:
break;
@@ -1400,7 +1443,7 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
* know if device is undergoing reset
*/
ae_dev->reset_type = hdev->reset_type;
- hdev->reset_count++;
+ hdev->rst_stats.rst_cnt++;
rtnl_lock();
/* bring down the nic to stop any ongoing TX/RX */
@@ -1426,6 +1469,8 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
goto err_reset;
}
+ hdev->rst_stats.hw_rst_done_cnt++;
+
rtnl_lock();
/* now, re-initialize the nic client and ae device*/
@@ -1444,6 +1489,7 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
hdev->last_reset_time = jiffies;
ae_dev->reset_type = HNAE3_NONE_RESET;
+ hdev->rst_stats.rst_done_cnt++;
return ret;
err_reset_lock:
@@ -1455,6 +1501,8 @@ err_reset:
*/
hclgevf_cmd_init(hdev);
dev_err(&hdev->pdev->dev, "failed to reset VF\n");
+ if (hclgevf_is_reset_pending(hdev))
+ hclgevf_reset_task_schedule(hdev);
return ret;
}
@@ -1564,8 +1612,7 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
{
- if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state) &&
- !test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) {
+ if (!test_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state)) {
set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state);
schedule_work(&hdev->rst_service_task);
}
@@ -1603,6 +1650,7 @@ static void hclgevf_service_timer(struct timer_list *t)
mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
+ hdev->stats_timer++;
hclgevf_task_schedule(hdev);
}
@@ -1711,7 +1759,7 @@ static void hclgevf_keep_alive_task(struct work_struct *work)
hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
- if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
return;
ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
@@ -1723,9 +1771,16 @@ static void hclgevf_keep_alive_task(struct work_struct *work)
static void hclgevf_service_task(struct work_struct *work)
{
+ struct hnae3_handle *handle;
struct hclgevf_dev *hdev;
hdev = container_of(work, struct hclgevf_dev, service_task);
+ handle = &hdev->nic;
+
+ if (hdev->stats_timer >= HCLGEVF_STATS_TIMER_INTERVAL) {
+ hclgevf_tqps_update_stats(handle);
+ hdev->stats_timer = 0;
+ }
/* request the link status from the PF. PF would be able to tell VF
* about such updates in future so we might remove this later
@@ -1762,6 +1817,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B);
*clearval = cmdq_src_reg;
+ hdev->rst_stats.vf_rst_cnt++;
return HCLGEVF_VECTOR0_EVENT_RST;
}
@@ -1814,6 +1870,11 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
{
int ret;
+ /* get current port based vlan state from PF */
+ ret = hclgevf_get_port_base_vlan_filter_state(hdev);
+ if (ret)
+ return ret;
+
/* get queue configuration from PF */
ret = hclgevf_get_queue_info(hdev);
if (ret)
@@ -1824,6 +1885,10 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
if (ret)
return ret;
+ ret = hclgevf_get_pf_media_type(hdev);
+ if (ret)
+ return ret;
+
/* get tc configuration from PF */
return hclgevf_get_tc_info(hdev);
}
@@ -1986,8 +2051,10 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
- for (i = 0; i < handle->kinfo.num_tqps; i++)
- hclgevf_reset_tqp(handle, i);
+ if (hdev->reset_type != HNAE3_VF_RESET)
+ for (i = 0; i < handle->kinfo.num_tqps; i++)
+ if (hclgevf_reset_tqp(handle, i))
+ break;
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle);
@@ -2007,9 +2074,15 @@ static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
static int hclgevf_client_start(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret;
+
+ ret = hclgevf_set_alive(handle, true);
+ if (ret)
+ return ret;
mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
- return hclgevf_set_alive(handle, true);
+
+ return 0;
}
static void hclgevf_client_stop(struct hnae3_handle *handle)
@@ -2051,6 +2124,10 @@ static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
{
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+ if (hdev->keep_alive_timer.function)
+ del_timer_sync(&hdev->keep_alive_timer);
+ if (hdev->keep_alive_task.func)
+ cancel_work_sync(&hdev->keep_alive_task);
if (hdev->service_timer.function)
del_timer_sync(&hdev->service_timer);
if (hdev->service_task.func)
@@ -2155,6 +2232,23 @@ static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
hclgevf_free_vector(hdev, 0);
}
+static void hclgevf_info_show(struct hclgevf_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+
+ dev_info(dev, "VF info begin:\n");
+
+ dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
+ dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
+ dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
+ dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
+ dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
+ dev_info(dev, "PF media type of this VF: %d\n",
+ hdev->hw.mac.media_type);
+
+ dev_info(dev, "VF info end.\n");
+}
+
static int hclgevf_init_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
@@ -2172,6 +2266,9 @@ static int hclgevf_init_client_instance(struct hnae3_client *client,
hnae3_set_client_init_flag(client, ae_dev, 1);
+ if (netif_msg_drv(&hdev->nic))
+ hclgevf_info_show(hdev);
+
if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
struct hnae3_client *rc = hdev->roce_client;
@@ -2651,12 +2748,16 @@ static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable)
return hclgevf_config_gro(hdev, enable);
}
-static void hclgevf_get_media_type(struct hnae3_handle *handle,
- u8 *media_type)
+static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type,
+ u8 *module_type)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
if (media_type)
*media_type = hdev->hw.mac.media_type;
+
+ if (module_type)
+ *module_type = hdev->hw.mac.module_type;
}
static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
@@ -2677,7 +2778,7 @@ static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- return hdev->reset_count;
+ return hdev->rst_stats.hw_rst_done_cnt;
}
static void hclgevf_get_link_mode(struct hnae3_handle *handle,
@@ -2756,6 +2857,31 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
}
}
+void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
+ u8 *port_base_vlan_info, u8 data_size)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+
+ rtnl_lock();
+ hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ rtnl_unlock();
+
+ /* send msg to PF and wait update port based vlan info */
+ hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_PORT_BASE_VLAN_CFG,
+ port_base_vlan_info, data_size,
+ false, NULL, 0);
+
+ if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
+ else
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+
+ rtnl_lock();
+ hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
+ rtnl_unlock();
+}
+
static const struct hnae3_ae_ops hclgevf_ops = {
.init_ae_dev = hclgevf_init_ae_dev,
.uninit_ae_dev = hclgevf_uninit_ae_dev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index c128863ee7d0..cc52f54f8c08 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -116,6 +116,8 @@
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
+#define HCLGEVF_STATS_TIMER_INTERVAL (36)
+
enum hclgevf_evt_cause {
HCLGEVF_VECTOR0_EVENT_RST,
HCLGEVF_VECTOR0_EVENT_MBX,
@@ -141,6 +143,7 @@ enum hclgevf_states {
struct hclgevf_mac {
u8 media_type;
+ u8 module_type;
u8 mac_addr[ETH_ALEN];
int link;
u8 duplex;
@@ -210,6 +213,15 @@ struct hclgevf_misc_vector {
int vector_irq;
};
+struct hclgevf_rst_stats {
+ u32 rst_cnt; /* the number of reset */
+ u32 vf_func_rst_cnt; /* the number of VF function reset */
+ u32 flr_rst_cnt; /* the number of FLR */
+ u32 vf_rst_cnt; /* the number of VF reset */
+ u32 rst_done_cnt; /* the number of reset completed */
+ u32 hw_rst_done_cnt; /* the number of HW reset completed */
+};
+
struct hclgevf_dev {
struct pci_dev *pdev;
struct hnae3_ae_dev *ae_dev;
@@ -227,7 +239,7 @@ struct hclgevf_dev {
#define HCLGEVF_RESET_REQUESTED 0
#define HCLGEVF_RESET_PENDING 1
unsigned long reset_state; /* requested, pending */
- unsigned long reset_count; /* the number of reset has been done */
+ struct hclgevf_rst_stats rst_stats;
u32 reset_attempts;
u32 fw_version;
@@ -272,6 +284,7 @@ struct hclgevf_dev {
struct hnae3_client *nic_client;
struct hnae3_client *roce_client;
u32 flag;
+ u32 stats_timer;
};
static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
@@ -290,4 +303,6 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
u8 duplex);
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev);
void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev);
+void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
+ u8 *port_base_vlan_info, u8 data_size);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 7dc3c9f79169..30f2e9352cf3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -49,8 +49,8 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
if (i >= HCLGEVF_MAX_TRY_TIMES) {
dev_err(&hdev->pdev->dev,
- "VF could not get mbx resp(=%d) from PF in %d tries\n",
- hdev->mbx_resp.received_resp, i);
+ "VF could not get mbx(%d,%d) resp(=%d) from PF in %d tries\n",
+ code0, code1, hdev->mbx_resp.received_resp, i);
return -EIO;
}
@@ -68,8 +68,11 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) {
dev_err(&hdev->pdev->dev,
- "VF could not match resp code(code0=%d,code1=%d), %d",
+ "VF could not match resp code(code0=%d,code1=%d), %d\n",
code0, code1, mbx_resp->resp_status);
+ dev_err(&hdev->pdev->dev,
+ "VF could not match resp r_code(r_code0=%d,r_code1=%d)\n",
+ r_code0, r_code1);
return -EIO;
}
@@ -95,6 +98,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
}
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
+ req->mbx_need_resp |= need_resp ? HCLGE_MBX_NEED_RESP_BIT :
+ ~HCLGE_MBX_NEED_RESP_BIT;
req->msg[0] = code;
req->msg[1] = subcode;
memcpy(&req->msg[2], msg_data, msg_len);
@@ -198,6 +203,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
case HCLGE_MBX_LINK_STAT_CHANGE:
case HCLGE_MBX_ASSERTING_RESET:
case HCLGE_MBX_LINK_STAT_MODE:
+ case HLCGE_MBX_PUSH_VLAN_INFO:
/* set this mbx event as pending. This is required as we
* might loose interrupt event when mbx task is busy
* handling. This shall be cleared when mbx task just
@@ -208,7 +214,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
/* we will drop the async msg if we find ARQ as full
* and continue with next message
*/
- if (hdev->arq.count >= HCLGE_MBX_MAX_ARQ_MSG_NUM) {
+ if (atomic_read(&hdev->arq.count) >=
+ HCLGE_MBX_MAX_ARQ_MSG_NUM) {
dev_warn(&hdev->pdev->dev,
"Async Q full, dropping msg(%d)\n",
req->msg[1]);
@@ -220,7 +227,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
memcpy(&msg_q[0], req->msg,
HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
hclge_mbx_tail_ptr_move_arq(hdev->arq);
- hdev->arq.count++;
+ atomic_inc(&hdev->arq.count);
hclgevf_mbx_task_schedule(hdev);
@@ -243,8 +250,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
{
enum hnae3_reset_type reset_type;
- u16 link_status;
- u16 *msg_q;
+ u16 link_status, state;
+ u16 *msg_q, *vlan_info;
u8 duplex;
u32 speed;
u32 tail;
@@ -272,7 +279,6 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
link_status = le16_to_cpu(msg_q[1]);
memcpy(&speed, &msg_q[2], sizeof(speed));
duplex = (u8)le16_to_cpu(msg_q[4]);
- hdev->hw.mac.media_type = (u8)le16_to_cpu(msg_q[5]);
/* update upper layer with new link link status */
hclgevf_update_link_status(hdev, link_status);
@@ -300,6 +306,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
hclgevf_reset_task_schedule(hdev);
break;
+ case HLCGE_MBX_PUSH_VLAN_INFO:
+ state = le16_to_cpu(msg_q[1]);
+ vlan_info = &msg_q[1];
+ hclgevf_update_port_base_vlan_info(hdev, state,
+ (u8 *)vlan_info, 8);
+ break;
default:
dev_err(&hdev->pdev->dev,
"fetched unsupported(%d) message from arq\n",
@@ -308,7 +320,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
}
hclge_mbx_head_ptr_move_arq(hdev->arq);
- hdev->arq.count--;
+ atomic_dec(&hdev->arq.count);
msg_q = hdev->arq.msg_q[hdev->arq.head];
}
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index e17bf33eba0c..0fbe8046824b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -518,7 +518,7 @@ process_sq_wqe:
flush_skbs:
netdev_txq = netdev_get_tx_queue(netdev, q_id);
- if ((!skb->xmit_more) || (netif_xmit_stopped(netdev_txq)))
+ if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
return err;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 90b62c1412c8..707c8ba120c2 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1463,7 +1463,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
memset(pr, 0, sizeof(struct ehea_port_res));
- pr->tx_bytes = rx_bytes;
+ pr->tx_bytes = tx_bytes;
pr->tx_packets = tx_packets;
pr->rx_bytes = rx_bytes;
pr->rx_packets = rx_packets;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 5e4e37132bf2..77ce17383aba 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -123,8 +123,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
int nr_of_cqe, u64 eq_handle, u32 cq_token)
{
struct ehea_cq *cq;
- struct h_epa epa;
- u64 *cq_handle_ref, hret, rpage;
+ u64 hret, rpage;
u32 counter;
int ret;
void *vpage;
@@ -139,8 +138,6 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
cq->adapter = adapter;
- cq_handle_ref = &cq->fw_handle;
-
hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
&cq->fw_handle, &cq->epas);
if (hret != H_SUCCESS) {
@@ -188,7 +185,6 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
}
hw_qeit_reset(&cq->hw_queue);
- epa = cq->epas.kernel;
ehea_reset_cq_ep(cq);
ehea_reset_cq_n1(cq);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index dd71d5db7274..d86b0e5895a6 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -93,7 +93,7 @@ struct ibmveth_stat {
#define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
#define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
-struct ibmveth_stat ibmveth_stats[] = {
+static struct ibmveth_stat ibmveth_stats[] = {
{ "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
{ "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
{ "replenish_add_buff_failure",
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 3dfb2d131eb7..b398d6c94dbd 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -120,6 +120,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *);
static void release_crq_queue(struct ibmvnic_adapter *);
static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
static int init_crq_queue(struct ibmvnic_adapter *adapter);
+static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -1968,13 +1969,11 @@ static void __ibmvnic_reset(struct work_struct *work)
{
struct ibmvnic_rwi *rwi;
struct ibmvnic_adapter *adapter;
- struct net_device *netdev;
bool we_lock_rtnl = false;
u32 reset_state;
int rc = 0;
adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
- netdev = adapter->netdev;
/* netif_set_real_num_xx_queues needs to take rtnl lock here
* unless wait_for_reset is set, in which case the rtnl lock
@@ -2279,23 +2278,20 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
static int ibmvnic_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
- u32 supported, advertising;
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ int rc;
- supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
- SUPPORTED_FIBRE);
- advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
- ADVERTISED_FIBRE);
- cmd->base.speed = SPEED_1000;
- cmd->base.duplex = DUPLEX_FULL;
+ rc = send_query_phys_parms(adapter);
+ if (rc) {
+ adapter->speed = SPEED_UNKNOWN;
+ adapter->duplex = DUPLEX_UNKNOWN;
+ }
+ cmd->base.speed = adapter->speed;
+ cmd->base.duplex = adapter->duplex;
cmd->base.port = PORT_FIBRE;
cmd->base.phy_address = 0;
cmd->base.autoneg = AUTONEG_ENABLE;
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
-
return 0;
}
@@ -2923,8 +2919,10 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
goto req_tx_irq_failed;
}
+ snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
+ adapter->vdev->unit_address, i);
rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
- 0, "ibmvnic_tx", scrq);
+ 0, scrq->name, scrq);
if (rc) {
dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
@@ -2944,8 +2942,10 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
dev_err(dev, "Error mapping irq\n");
goto req_rx_irq_failed;
}
+ snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
+ adapter->vdev->unit_address, i);
rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
- 0, "ibmvnic_rx", scrq);
+ 0, scrq->name, scrq);
if (rc) {
dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
scrq->irq, rc);
@@ -4297,6 +4297,73 @@ out:
}
}
+static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
+{
+ union ibmvnic_crq crq;
+ int rc;
+
+ memset(&crq, 0, sizeof(crq));
+ crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
+ crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
+ init_completion(&adapter->fw_done);
+ rc = ibmvnic_send_crq(adapter, &crq);
+ if (rc)
+ return rc;
+ wait_for_completion(&adapter->fw_done);
+ return adapter->fw_done_rc ? -EIO : 0;
+}
+
+static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int rc;
+
+ rc = crq->query_phys_parms_rsp.rc.code;
+ if (rc) {
+ netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
+ return rc;
+ }
+ switch (cpu_to_be32(crq->query_phys_parms_rsp.speed)) {
+ case IBMVNIC_10MBPS:
+ adapter->speed = SPEED_10;
+ break;
+ case IBMVNIC_100MBPS:
+ adapter->speed = SPEED_100;
+ break;
+ case IBMVNIC_1GBPS:
+ adapter->speed = SPEED_1000;
+ break;
+ case IBMVNIC_10GBP:
+ adapter->speed = SPEED_10000;
+ break;
+ case IBMVNIC_25GBPS:
+ adapter->speed = SPEED_25000;
+ break;
+ case IBMVNIC_40GBPS:
+ adapter->speed = SPEED_40000;
+ break;
+ case IBMVNIC_50GBPS:
+ adapter->speed = SPEED_50000;
+ break;
+ case IBMVNIC_100GBPS:
+ adapter->speed = SPEED_100000;
+ break;
+ default:
+ netdev_warn(netdev, "Unknown speed 0x%08x\n",
+ cpu_to_be32(crq->query_phys_parms_rsp.speed));
+ adapter->speed = SPEED_UNKNOWN;
+ }
+ if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
+ adapter->duplex = DUPLEX_FULL;
+ else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
+ adapter->duplex = DUPLEX_HALF;
+ else
+ adapter->duplex = DUPLEX_UNKNOWN;
+
+ return rc;
+}
+
static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
@@ -4445,6 +4512,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
case GET_VPD_RSP:
handle_vpd_rsp(crq, adapter);
break;
+ case QUERY_PHYS_PARMS_RSP:
+ adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
+ complete(&adapter->fw_done);
+ break;
default:
netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
gen_crq->cmd);
@@ -4600,8 +4671,9 @@ static int init_crq_queue(struct ibmvnic_adapter *adapter)
(unsigned long)adapter);
netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
- rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
- adapter);
+ snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
+ adapter->vdev->unit_address);
+ rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
if (rc) {
dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
vdev->irq, rc);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index f2018dbebfa5..cffdac372a33 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -377,11 +377,16 @@ struct ibmvnic_phys_parms {
u8 flags2;
#define IBMVNIC_LOGICAL_LNK_ACTIVE 0x80
__be32 speed;
-#define IBMVNIC_AUTONEG 0x80
-#define IBMVNIC_10MBPS 0x40
-#define IBMVNIC_100MBPS 0x20
-#define IBMVNIC_1GBPS 0x10
-#define IBMVNIC_10GBPS 0x08
+#define IBMVNIC_AUTONEG 0x80000000
+#define IBMVNIC_10MBPS 0x40000000
+#define IBMVNIC_100MBPS 0x20000000
+#define IBMVNIC_1GBPS 0x10000000
+#define IBMVNIC_10GBP 0x08000000
+#define IBMVNIC_40GBPS 0x04000000
+#define IBMVNIC_100GBPS 0x02000000
+#define IBMVNIC_25GBPS 0x01000000
+#define IBMVNIC_50GBPS 0x00800000
+#define IBMVNIC_200GBPS 0x00400000
__be32 mtu;
struct ibmvnic_rc rc;
} __packed __aligned(8);
@@ -850,6 +855,7 @@ struct ibmvnic_crq_queue {
dma_addr_t msg_token;
spinlock_t lock;
bool active;
+ char name[32];
};
union sub_crq {
@@ -876,6 +882,7 @@ struct ibmvnic_sub_crq_queue {
struct sk_buff *rx_skb_top;
struct ibmvnic_adapter *adapter;
atomic_t used;
+ char name[32];
};
struct ibmvnic_long_term_buff {
@@ -999,6 +1006,9 @@ struct ibmvnic_adapter {
int phys_link_state;
int logical_link_state;
+ u32 speed;
+ u8 duplex;
+
/* login data */
struct ibmvnic_login_buffer *login_buf;
dma_addr_t login_buf_token;
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 0fd268070fb4..a65d5a9ba7db 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2797,7 +2797,7 @@ static int e100_set_features(struct net_device *netdev,
netdev->features = features;
e100_exec_cb(nic, NULL, e100_configure);
- return 0;
+ return 1;
}
static const struct net_device_ops e100_netdev_ops = {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 466bf1ea186d..551de8c2fef2 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -820,7 +820,7 @@ static int e1000_set_features(struct net_device *netdev,
else
e1000_reset(adapter);
- return 0;
+ return 1;
}
static const struct net_device_ops e1000_netdev_ops = {
@@ -3267,7 +3267,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
/* Make sure there is space in the ring for the next send. */
e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
- if (!skb->xmit_more ||
+ if (!netdev_xmit_more() ||
netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 022c3ac0e40f..0e09bede42a2 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5896,7 +5896,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
DIV_ROUND_UP(PAGE_SIZE,
adapter->tx_fifo_limit) + 2));
- if (!skb->xmit_more ||
+ if (!netdev_xmit_more() ||
netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_tdt_wa(tx_ring,
@@ -6996,7 +6996,7 @@ static int e1000_set_features(struct net_device *netdev,
else
e1000e_reset(adapter);
- return 0;
+ return 1;
}
static const struct net_device_ops e1000e_netdev_ops = {
@@ -7343,7 +7343,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
- if (pci_dev_run_wake(pdev))
+ if (pci_dev_run_wake(pdev) && hw->mac.type < e1000_pch_cnp)
pm_runtime_put_noidle(&pdev->dev);
return 0;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index cbf76a96e94e..90270b4a1682 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -280,7 +280,7 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
/* we need the header to contain the greater of either ETH_HLEN or
* 60 bytes if the skb->len is less than 60 for skb_pad.
*/
- pull_len = eth_get_headlen(va, FM10K_RX_HDR_LEN);
+ pull_len = eth_get_headlen(skb->dev, va, FM10K_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
@@ -1037,7 +1037,7 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring,
fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
}
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index 50590e8d1fd1..2f21b3e89fd0 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -21,6 +21,7 @@ i40e-objs := i40e_main.o \
i40e_diag.o \
i40e_txrx.o \
i40e_ptp.o \
+ i40e_ddp.o \
i40e_client.o \
i40e_virtchnl_pf.o \
i40e_xsk.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d3cc3427caad..7ce42040b851 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -149,6 +149,7 @@ enum i40e_state_t {
__I40E_CLIENT_L2_CHANGE,
__I40E_CLIENT_RESET,
__I40E_VIRTCHNL_OP_PENDING,
+ __I40E_RECOVERY_MODE,
/* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__,
};
@@ -321,6 +322,29 @@ struct i40e_udp_port_config {
u8 filter_index;
};
+#define I40_DDP_FLASH_REGION 100
+#define I40E_PROFILE_INFO_SIZE 48
+#define I40E_MAX_PROFILE_NUM 16
+#define I40E_PROFILE_LIST_SIZE \
+ (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4)
+#define I40E_DDP_PROFILE_PATH "intel/i40e/ddp/"
+#define I40E_DDP_PROFILE_NAME_MAX 64
+
+int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
+ bool is_add);
+int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash);
+
+struct i40e_ddp_profile_list {
+ u32 p_count;
+ struct i40e_profile_info p_info[0];
+};
+
+struct i40e_ddp_old_profile_list {
+ struct list_head list;
+ size_t old_ddp_size;
+ u8 old_ddp_buf[0];
+};
+
/* macros related to FLX_PIT */
#define I40E_FLEX_SET_FSIZE(fsize) (((fsize) << \
I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \
@@ -589,6 +613,8 @@ struct i40e_pf {
struct sk_buff *ptp_tx_skb;
unsigned long ptp_tx_start;
struct hwtstamp_config tstamp_config;
+ struct timespec64 ptp_prev_hw_time;
+ ktime_t ptp_reset_start;
struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */
u32 ptp_adj_mult;
u32 tx_hwtstamp_timeouts;
@@ -610,6 +636,8 @@ struct i40e_pf {
u16 override_q_count;
u16 last_sw_conf_flags;
u16 last_sw_conf_valid_flags;
+ /* List to keep previous DDP profiles to be rolled back in the future */
+ struct list_head ddp_old_prof;
};
/**
@@ -1083,6 +1111,8 @@ void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index);
void i40e_ptp_set_increment(struct i40e_pf *pf);
int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
+void i40e_ptp_save_hw_time(struct i40e_pf *pf);
+void i40e_ptp_restore_hw_time(struct i40e_pf *pf);
void i40e_ptp_init(struct i40e_pf *pf);
void i40e_ptp_stop(struct i40e_pf *pf);
int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 7ab61f6ebb5f..243dcd4bec19 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -608,6 +608,11 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
hw->aq.api_min_ver >= 7))
hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
+ if (hw->aq.api_maj_ver > 1 ||
+ (hw->aq.api_maj_ver == 1 &&
+ hw->aq.api_min_ver >= 8))
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
+
if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
goto init_adminq_free_arq;
@@ -749,7 +754,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
if (val >= hw->aq.num_asq_entries) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: head overrun at %d\n", val);
- status = I40E_ERR_QUEUE_EMPTY;
+ status = I40E_ERR_ADMIN_QUEUE_FULL;
goto asq_send_command_error;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 11506102471c..6536023fa074 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -11,8 +11,8 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR_X722 0x0006
-#define I40E_FW_API_VERSION_MINOR_X710 0x0007
+#define I40E_FW_API_VERSION_MINOR_X722 0x0008
+#define I40E_FW_API_VERSION_MINOR_X710 0x0008
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
I40E_FW_API_VERSION_MINOR_X710 : \
@@ -261,6 +261,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07,
i40e_aqc_opc_lldp_set_local_mib = 0x0A08,
i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
+ i40e_aqc_opc_lldp_restore = 0x0A0A,
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
@@ -1887,6 +1888,8 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_25GBASE_LR = 0x22,
I40E_PHY_TYPE_25GBASE_AOC = 0x23,
I40E_PHY_TYPE_25GBASE_ACC = 0x24,
+ I40E_PHY_TYPE_2_5GBASE_T = 0x30,
+ I40E_PHY_TYPE_5GBASE_T = 0x31,
I40E_PHY_TYPE_MAX,
I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
I40E_PHY_TYPE_EMPTY = 0xFE,
@@ -1928,19 +1931,25 @@ enum i40e_aq_phy_type {
BIT_ULL(I40E_PHY_TYPE_25GBASE_SR) | \
BIT_ULL(I40E_PHY_TYPE_25GBASE_LR) | \
BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC) | \
- BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC))
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC) | \
+ BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T) | \
+ BIT_ULL(I40E_PHY_TYPE_5GBASE_T))
+#define I40E_LINK_SPEED_2_5GB_SHIFT 0x0
#define I40E_LINK_SPEED_100MB_SHIFT 0x1
#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
#define I40E_LINK_SPEED_10GB_SHIFT 0x3
#define I40E_LINK_SPEED_40GB_SHIFT 0x4
#define I40E_LINK_SPEED_20GB_SHIFT 0x5
#define I40E_LINK_SPEED_25GB_SHIFT 0x6
+#define I40E_LINK_SPEED_5GB_SHIFT 0x7
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT),
I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_2_5GB = (1 << I40E_LINK_SPEED_2_5GB_SHIFT),
+ I40E_LINK_SPEED_5GB = (1 << I40E_LINK_SPEED_5GB_SHIFT),
I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT),
@@ -1986,6 +1995,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10
#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20
+#define I40E_AQ_PHY_TYPE_EXT_2_5GBASE_T 0x40
+#define I40E_AQ_PHY_TYPE_EXT_5GBASE_T 0x80
u8 fec_cfg_curr_mod_ext_info;
#define I40E_AQ_ENABLE_FEC_KR 0x01
#define I40E_AQ_ENABLE_FEC_RS 0x02
@@ -2498,18 +2509,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
/* Stop LLDP (direct 0x0A05) */
struct i40e_aqc_lldp_stop {
u8 command;
-#define I40E_AQ_LLDP_AGENT_STOP 0x0
-#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+#define I40E_AQ_LLDP_AGENT_STOP 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+#define I40E_AQ_LLDP_AGENT_STOP_PERSIST 0x2
u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
/* Start LLDP (direct 0x0A06) */
-
struct i40e_aqc_lldp_start {
u8 command;
-#define I40E_AQ_LLDP_AGENT_START 0x1
+#define I40E_AQ_LLDP_AGENT_START 0x1
+#define I40E_AQ_LLDP_AGENT_START_PERSIST 0x2
u8 reserved[15];
};
@@ -2633,6 +2645,16 @@ struct i40e_aqc_lldp_stop_start_specific_agent {
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent);
+/* Restore LLDP Agent factory settings (direct 0x0A0A) */
+struct i40e_aqc_lldp_restore {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_RESTORE_NOT 0x0
+#define I40E_AQ_LLDP_AGENT_RESTORE 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_restore);
+
/* Add Udp Tunnel command and completion (direct 0x0B00) */
struct i40e_aqc_add_udp_tunnel {
__le16 udp_port;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 5f3b8b9ff511..e81530ca08d0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -578,11 +578,9 @@ static int i40e_client_setup_qvlist(struct i40e_info *ldev,
struct i40e_hw *hw = &pf->hw;
struct i40e_qv_info *qv_info;
u32 v_idx, i, reg_idx, reg;
- u32 size;
- size = sizeof(struct i40e_qvlist_info) +
- (sizeof(struct i40e_qv_info) * (qvlist_info->num_vectors - 1));
- ldev->qvlist_info = kzalloc(size, GFP_KERNEL);
+ ldev->qvlist_info = kzalloc(struct_size(ldev->qvlist_info, qv_info,
+ qvlist_info->num_vectors - 1), GFP_KERNEL);
if (!ldev->qvlist_info)
return -ENOMEM;
ldev->qvlist_info->num_vectors = qvlist_info->num_vectors;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 97a9b1fb4763..ecb1adaa54ec 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -28,10 +28,14 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_C:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_B:
+ case I40E_DEV_ID_10G_SFP:
case I40E_DEV_ID_20G_KR2:
case I40E_DEV_ID_20G_KR2_A:
case I40E_DEV_ID_25G_B:
case I40E_DEV_ID_25G_SFP28:
+ case I40E_DEV_ID_X710_N3000:
+ case I40E_DEV_ID_XXV710_N3000:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_DEV_ID_KX_X722:
@@ -1149,6 +1153,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
break;
case I40E_PHY_TYPE_100BASE_TX:
case I40E_PHY_TYPE_1000BASE_T:
+ case I40E_PHY_TYPE_2_5GBASE_T:
+ case I40E_PHY_TYPE_5GBASE_T:
case I40E_PHY_TYPE_10GBASE_T:
media = I40E_MEDIA_TYPE_BASET;
break;
@@ -1466,7 +1472,6 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
**/
u32 i40e_led_get(struct i40e_hw *hw)
{
- u32 current_mode = 0;
u32 mode = 0;
int i;
@@ -1479,21 +1484,6 @@ u32 i40e_led_get(struct i40e_hw *hw)
if (!gpio_val)
continue;
- /* ignore gpio LED src mode entries related to the activity
- * LEDs
- */
- current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
- >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
- switch (current_mode) {
- case I40E_COMBINED_ACTIVITY:
- case I40E_FILTER_ACTIVITY:
- case I40E_MAC_ACTIVITY:
- case I40E_LINK_ACTIVITY:
- continue;
- default:
- break;
- }
-
mode = (gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK) >>
I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT;
break;
@@ -1513,7 +1503,6 @@ u32 i40e_led_get(struct i40e_hw *hw)
**/
void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
{
- u32 current_mode = 0;
int i;
if (mode & 0xfffffff0)
@@ -1527,22 +1516,6 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
if (!gpio_val)
continue;
-
- /* ignore gpio LED src mode entries related to the activity
- * LEDs
- */
- current_mode = ((gpio_val & I40E_GLGEN_GPIO_CTL_LED_MODE_MASK)
- >> I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT);
- switch (current_mode) {
- case I40E_COMBINED_ACTIVITY:
- case I40E_FILTER_ACTIVITY:
- case I40E_MAC_ACTIVITY:
- case I40E_LINK_ACTIVITY:
- continue;
- default:
- break;
- }
-
gpio_val &= ~I40E_GLGEN_GPIO_CTL_LED_MODE_MASK;
/* this & is a bit of paranoia, but serves as a range check */
gpio_val |= ((mode << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) &
@@ -3657,14 +3630,54 @@ i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
}
/**
+ * i40e_aq_restore_lldp
+ * @hw: pointer to the hw struct
+ * @setting: pointer to factory setting variable or NULL
+ * @restore: True if factory settings should be restored
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Restore LLDP Agent factory settings if @restore set to True. In other case
+ * only returns factory setting in AQ response.
+ **/
+enum i40e_status_code
+i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_lldp_restore *cmd =
+ (struct i40e_aqc_lldp_restore *)&desc.params.raw;
+ i40e_status status;
+
+ if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Restore LLDP not supported by current FW version.\n");
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
+
+ if (restore)
+ cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (setting)
+ *setting = cmd->command & 1;
+
+ return status;
+}
+
+/**
* i40e_aq_stop_lldp
* @hw: pointer to the hw struct
* @shutdown_agent: True if LLDP Agent needs to be Shutdown
+ * @persist: True if stop of LLDP should be persistent across power cycles
* @cmd_details: pointer to command details structure or NULL
*
* Stop or Shutdown the embedded LLDP Agent
**/
i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ bool persist,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -3677,6 +3690,14 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
if (shutdown_agent)
cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
+ if (persist) {
+ if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
+ cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
+ else
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Persistent Stop LLDP not supported by current FW version.\n");
+ }
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -3686,13 +3707,14 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
* i40e_aq_start_lldp
* @hw: pointer to the hw struct
* @buff: buffer for result
+ * @persist: True if start of LLDP should be persistent across power cycles
* @buff_size: buffer size
* @cmd_details: pointer to command details structure or NULL
*
* Start the embedded LLDP Agent on all ports.
**/
-i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details)
+i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_start *cmd =
@@ -3702,6 +3724,15 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
cmd->command = I40E_AQ_LLDP_AGENT_START;
+
+ if (persist) {
+ if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
+ cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
+ else
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Persistent Start LLDP not supported by current FW version.\n");
+ }
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -4873,6 +4904,7 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw,
break;
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
+ case I40E_DEV_ID_10G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T_X722:
case I40E_DEV_ID_25G_B:
case I40E_DEV_ID_25G_SFP28:
@@ -5448,6 +5480,163 @@ i40e_find_segment_in_package(u32 segment_type,
return NULL;
}
+/* Get section table in profile */
+#define I40E_SECTION_TABLE(profile, sec_tbl) \
+ do { \
+ struct i40e_profile_segment *p = (profile); \
+ u32 count; \
+ u32 *nvm; \
+ count = p->device_table_count; \
+ nvm = (u32 *)&p->device_table[count]; \
+ sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; \
+ } while (0)
+
+/* Get section header in profile */
+#define I40E_SECTION_HEADER(profile, offset) \
+ (struct i40e_profile_section_header *)((u8 *)(profile) + (offset))
+
+/**
+ * i40e_find_section_in_profile
+ * @section_type: the section type to search for (i.e., SECTION_TYPE_NOTE)
+ * @profile: pointer to the i40e segment header to be searched
+ *
+ * This function searches i40e segment for a particular section type. On
+ * success it returns a pointer to the section header, otherwise it will
+ * return NULL.
+ **/
+struct i40e_profile_section_header *
+i40e_find_section_in_profile(u32 section_type,
+ struct i40e_profile_segment *profile)
+{
+ struct i40e_profile_section_header *sec;
+ struct i40e_section_table *sec_tbl;
+ u32 sec_off;
+ u32 i;
+
+ if (profile->header.type != SEGMENT_TYPE_I40E)
+ return NULL;
+
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ if (sec->section.type == section_type)
+ return sec;
+ }
+
+ return NULL;
+}
+
+/**
+ * i40e_ddp_exec_aq_section - Execute generic AQ for DDP
+ * @hw: pointer to the hw struct
+ * @aq: command buffer containing all data to execute AQ
+ **/
+static enum
+i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
+ struct i40e_profile_aq_section *aq)
+{
+ i40e_status status;
+ struct i40e_aq_desc desc;
+ u8 *msg = NULL;
+ u16 msglen;
+
+ i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
+ desc.flags |= cpu_to_le16(aq->flags);
+ memcpy(desc.params.raw, aq->param, sizeof(desc.params.raw));
+
+ msglen = aq->datalen;
+ if (msglen) {
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF |
+ I40E_AQ_FLAG_RD));
+ if (msglen > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = cpu_to_le16(msglen);
+ msg = &aq->data[0];
+ }
+
+ status = i40e_asq_send_command(hw, &desc, msg, msglen, NULL);
+
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "unable to exec DDP AQ opcode %u, error %d\n",
+ aq->opcode, status);
+ return status;
+ }
+
+ /* copy returned desc to aq_buf */
+ memcpy(aq->param, desc.params.raw, sizeof(desc.params.raw));
+
+ return 0;
+}
+
+/**
+ * i40e_validate_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be validated
+ * @track_id: package tracking id
+ * @rollback: flag if the profile is for rollback.
+ *
+ * Validates supported devices and profile's sections.
+ */
+static enum i40e_status_code
+i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id, bool rollback)
+{
+ struct i40e_profile_section_header *sec = NULL;
+ i40e_status status = 0;
+ struct i40e_section_table *sec_tbl;
+ u32 vendor_dev_id;
+ u32 dev_cnt;
+ u32 sec_off;
+ u32 i;
+
+ if (track_id == I40E_DDP_TRACKID_INVALID) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE, "Invalid track_id\n");
+ return I40E_NOT_SUPPORTED;
+ }
+
+ dev_cnt = profile->device_table_count;
+ for (i = 0; i < dev_cnt; i++) {
+ vendor_dev_id = profile->device_table[i].vendor_dev_id;
+ if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL &&
+ hw->device_id == (vendor_dev_id & 0xFFFF))
+ break;
+ }
+ if (dev_cnt && i == dev_cnt) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Device doesn't support DDP\n");
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+ }
+
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ /* Validate sections types */
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ if (rollback) {
+ if (sec->section.type == SECTION_TYPE_MMIO ||
+ sec->section.type == SECTION_TYPE_AQ ||
+ sec->section.type == SECTION_TYPE_RB_AQ) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Not a roll-back package\n");
+ return I40E_NOT_SUPPORTED;
+ }
+ } else {
+ if (sec->section.type == SECTION_TYPE_RB_AQ ||
+ sec->section.type == SECTION_TYPE_RB_MMIO) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Not an original package\n");
+ return I40E_NOT_SUPPORTED;
+ }
+ }
+ }
+
+ return status;
+}
+
/**
* i40e_write_profile
* @hw: pointer to the hardware structure
@@ -5463,47 +5652,99 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
i40e_status status = 0;
struct i40e_section_table *sec_tbl;
struct i40e_profile_section_header *sec = NULL;
- u32 dev_cnt;
- u32 vendor_dev_id;
- u32 *nvm;
+ struct i40e_profile_aq_section *ddp_aq;
u32 section_size = 0;
u32 offset = 0, info = 0;
+ u32 sec_off;
u32 i;
- dev_cnt = profile->device_table_count;
+ status = i40e_validate_profile(hw, profile, track_id, false);
+ if (status)
+ return status;
- for (i = 0; i < dev_cnt; i++) {
- vendor_dev_id = profile->device_table[i].vendor_dev_id;
- if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL)
- if (hw->device_id == (vendor_dev_id & 0xFFFF))
+ I40E_SECTION_TABLE(profile, sec_tbl);
+
+ for (i = 0; i < sec_tbl->section_count; i++) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+ /* Process generic admin command */
+ if (sec->section.type == SECTION_TYPE_AQ) {
+ ddp_aq = (struct i40e_profile_aq_section *)&sec[1];
+ status = i40e_ddp_exec_aq_section(hw, ddp_aq);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to execute aq: section %d, opcode %u\n",
+ i, ddp_aq->opcode);
break;
+ }
+ sec->section.type = SECTION_TYPE_RB_AQ;
+ }
+
+ /* Skip any non-mmio sections */
+ if (sec->section.type != SECTION_TYPE_MMIO)
+ continue;
+
+ section_size = sec->section.size +
+ sizeof(struct i40e_profile_section_header);
+
+ /* Write MMIO section */
+ status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
+ track_id, &offset, &info, NULL);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_PACKAGE,
+ "Failed to write profile: section %d, offset %d, info %d\n",
+ i, offset, info);
+ break;
+ }
}
- if (i == dev_cnt) {
- i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP");
- return I40E_ERR_DEVICE_NOT_SUPPORTED;
- }
+ return status;
+}
+
+/**
+ * i40e_rollback_profile
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package to be removed
+ * @track_id: package tracking id
+ *
+ * Rolls back previously loaded package.
+ */
+enum i40e_status_code
+i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u32 track_id)
+{
+ struct i40e_profile_section_header *sec = NULL;
+ i40e_status status = 0;
+ struct i40e_section_table *sec_tbl;
+ u32 offset = 0, info = 0;
+ u32 section_size = 0;
+ u32 sec_off;
+ int i;
- nvm = (u32 *)&profile->device_table[dev_cnt];
- sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1];
+ status = i40e_validate_profile(hw, profile, track_id, true);
+ if (status)
+ return status;
- for (i = 0; i < sec_tbl->section_count; i++) {
- sec = (struct i40e_profile_section_header *)((u8 *)profile +
- sec_tbl->section_offset[i]);
+ I40E_SECTION_TABLE(profile, sec_tbl);
- /* Skip 'AQ', 'note' and 'name' sections */
- if (sec->section.type != SECTION_TYPE_MMIO)
+ /* For rollback write sections in reverse */
+ for (i = sec_tbl->section_count - 1; i >= 0; i--) {
+ sec_off = sec_tbl->section_offset[i];
+ sec = I40E_SECTION_HEADER(profile, sec_off);
+
+ /* Skip any non-rollback sections */
+ if (sec->section.type != SECTION_TYPE_RB_MMIO)
continue;
section_size = sec->section.size +
sizeof(struct i40e_profile_section_header);
- /* Write profile */
+ /* Write roll-back MMIO section */
status = i40e_aq_write_ddp(hw, (void *)sec, (u16)section_size,
track_id, &offset, &info, NULL);
if (status) {
i40e_debug(hw, I40E_DEBUG_PACKAGE,
- "Failed to write profile: offset %d, info %d",
- offset, info);
+ "Failed to write profile: section %d, offset %d, info %d\n",
+ i, offset, info);
break;
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 56bff8faf371..292eeb3def10 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -863,22 +863,23 @@ out:
/**
* i40e_init_dcb
* @hw: pointer to the hw struct
+ * @enable_mib_change: enable mib change event
*
* Update DCB configuration from the Firmware
**/
-i40e_status i40e_init_dcb(struct i40e_hw *hw)
+i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
{
i40e_status ret = 0;
struct i40e_lldp_variables lldp_cfg;
u8 adminstatus = 0;
if (!hw->func_caps.dcb)
- return ret;
+ return I40E_NOT_SUPPORTED;
/* Read LLDP NVM area */
ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
if (ret)
- return ret;
+ return I40E_ERR_NOT_READY;
/* Get the LLDP AdminStatus for the current port */
adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
@@ -887,7 +888,7 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw)
/* LLDP agent disabled */
if (!adminstatus) {
hw->dcbx_status = I40E_DCBX_STATUS_DISABLED;
- return ret;
+ return I40E_ERR_NOT_READY;
}
/* Get DCBX status */
@@ -896,26 +897,19 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw)
return ret;
/* Check the DCBX Status */
- switch (hw->dcbx_status) {
- case I40E_DCBX_STATUS_DONE:
- case I40E_DCBX_STATUS_IN_PROGRESS:
+ if (hw->dcbx_status == I40E_DCBX_STATUS_DONE ||
+ hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) {
/* Get current DCBX configuration */
ret = i40e_get_dcb_config(hw);
if (ret)
return ret;
- break;
- case I40E_DCBX_STATUS_DISABLED:
- return ret;
- case I40E_DCBX_STATUS_NOT_STARTED:
- case I40E_DCBX_STATUS_MULTIPLE_PEERS:
- default:
- break;
+ } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
+ return I40E_ERR_NOT_READY;
}
/* Configure the LLDP MIB change event */
- ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
- if (ret)
- return ret;
+ if (enable_mib_change)
+ ret = i40e_aq_cfg_lldp_mib_change_event(hw, true, NULL);
return ret;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 2b748a60a843..ddb48ae7cce4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -124,5 +124,5 @@ i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
u8 bridgetype,
struct i40e_dcbx_config *dcbcfg);
i40e_status i40e_get_dcb_config(struct i40e_hw *hw);
-i40e_status i40e_init_dcb(struct i40e_hw *hw);
+i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change);
#endif /* _I40E_DCB_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
new file mode 100644
index 000000000000..5e08f100c413
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
@@ -0,0 +1,481 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#include "i40e.h"
+
+#include <linux/firmware.h>
+
+/**
+ * i40e_ddp_profiles_eq - checks if DDP profiles are the equivalent
+ * @a: new profile info
+ * @b: old profile info
+ *
+ * checks if DDP profiles are the equivalent.
+ * Returns true if profiles are the same.
+ **/
+static bool i40e_ddp_profiles_eq(struct i40e_profile_info *a,
+ struct i40e_profile_info *b)
+{
+ return a->track_id == b->track_id &&
+ !memcmp(&a->version, &b->version, sizeof(a->version)) &&
+ !memcmp(&a->name, &b->name, I40E_DDP_NAME_SIZE);
+}
+
+/**
+ * i40e_ddp_does_profile_exist - checks if DDP profile loaded already
+ * @hw: HW data structure
+ * @pinfo: DDP profile information structure
+ *
+ * checks if DDP profile loaded already.
+ * Returns >0 if the profile exists.
+ * Returns 0 if the profile is absent.
+ * Returns <0 if error.
+ **/
+static int i40e_ddp_does_profile_exist(struct i40e_hw *hw,
+ struct i40e_profile_info *pinfo)
+{
+ struct i40e_ddp_profile_list *profile_list;
+ u8 buff[I40E_PROFILE_LIST_SIZE];
+ i40e_status status;
+ int i;
+
+ status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
+ NULL);
+ if (status)
+ return -1;
+
+ profile_list = (struct i40e_ddp_profile_list *)buff;
+ for (i = 0; i < profile_list->p_count; i++) {
+ if (i40e_ddp_profiles_eq(pinfo, &profile_list->p_info[i]))
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * i40e_ddp_profiles_overlap - checks if DDP profiles overlap.
+ * @new: new profile info
+ * @old: old profile info
+ *
+ * checks if DDP profiles overlap.
+ * Returns true if profiles are overlap.
+ **/
+static bool i40e_ddp_profiles_overlap(struct i40e_profile_info *new,
+ struct i40e_profile_info *old)
+{
+ unsigned int group_id_old = (u8)((old->track_id & 0x00FF0000) >> 16);
+ unsigned int group_id_new = (u8)((new->track_id & 0x00FF0000) >> 16);
+
+ /* 0x00 group must be only the first */
+ if (group_id_new == 0)
+ return true;
+ /* 0xFF group is compatible with anything else */
+ if (group_id_new == 0xFF || group_id_old == 0xFF)
+ return false;
+ /* otherwise only profiles from the same group are compatible*/
+ return group_id_old != group_id_new;
+}
+
+/**
+ * i40e_ddp_does_profiles_ - checks if DDP overlaps with existing one.
+ * @hw: HW data structure
+ * @pinfo: DDP profile information structure
+ *
+ * checks if DDP profile overlaps with existing one.
+ * Returns >0 if the profile overlaps.
+ * Returns 0 if the profile is ok.
+ * Returns <0 if error.
+ **/
+static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw,
+ struct i40e_profile_info *pinfo)
+{
+ struct i40e_ddp_profile_list *profile_list;
+ u8 buff[I40E_PROFILE_LIST_SIZE];
+ i40e_status status;
+ int i;
+
+ status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
+ NULL);
+ if (status)
+ return -EIO;
+
+ profile_list = (struct i40e_ddp_profile_list *)buff;
+ for (i = 0; i < profile_list->p_count; i++) {
+ if (i40e_ddp_profiles_overlap(pinfo,
+ &profile_list->p_info[i]))
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * i40e_add_pinfo
+ * @hw: pointer to the hardware structure
+ * @profile: pointer to the profile segment of the package
+ * @profile_info_sec: buffer for information section
+ * @track_id: package tracking id
+ *
+ * Register a profile to the list of loaded profiles.
+ */
+static enum i40e_status_code
+i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id)
+{
+ struct i40e_profile_section_header *sec;
+ struct i40e_profile_info *pinfo;
+ i40e_status status;
+ u32 offset = 0, info = 0;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct i40e_profile_section_header);
+ sec->section.size = sizeof(struct i40e_profile_info);
+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ pinfo->version = profile->version;
+ pinfo->op = I40E_DDP_ADD_TRACKID;
+
+ /* Clear reserved field */
+ memset(pinfo->reserved, 0, sizeof(pinfo->reserved));
+ memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
+
+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+ return status;
+}
+
+/**
+ * i40e_del_pinfo - delete DDP profile info from NIC
+ * @hw: HW data structure
+ * @profile: DDP profile segment to be deleted
+ * @profile_info_sec: DDP profile section header
+ * @track_id: track ID of the profile for deletion
+ *
+ * Removes DDP profile from the NIC.
+ **/
+static enum i40e_status_code
+i40e_del_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
+ u8 *profile_info_sec, u32 track_id)
+{
+ struct i40e_profile_section_header *sec;
+ struct i40e_profile_info *pinfo;
+ i40e_status status;
+ u32 offset = 0, info = 0;
+
+ sec = (struct i40e_profile_section_header *)profile_info_sec;
+ sec->tbl_size = 1;
+ sec->data_end = sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info);
+ sec->section.type = SECTION_TYPE_INFO;
+ sec->section.offset = sizeof(struct i40e_profile_section_header);
+ sec->section.size = sizeof(struct i40e_profile_info);
+ pinfo = (struct i40e_profile_info *)(profile_info_sec +
+ sec->section.offset);
+ pinfo->track_id = track_id;
+ pinfo->version = profile->version;
+ pinfo->op = I40E_DDP_REMOVE_TRACKID;
+
+ /* Clear reserved field */
+ memset(pinfo->reserved, 0, sizeof(pinfo->reserved));
+ memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE);
+
+ status = i40e_aq_write_ddp(hw, (void *)sec, sec->data_end,
+ track_id, &offset, &info, NULL);
+ return status;
+}
+
+/**
+ * i40e_ddp_is_pkg_hdr_valid - performs basic pkg header integrity checks
+ * @netdev: net device structure (for logging purposes)
+ * @pkg_hdr: pointer to package header
+ * @size_huge: size of the whole DDP profile package in size_t
+ *
+ * Checks correctness of pkg header: Version, size too big/small, and
+ * all segment offsets alignment and boundaries. This function lets
+ * reject non DDP profile file to be loaded by administrator mistake.
+ **/
+static bool i40e_ddp_is_pkg_hdr_valid(struct net_device *netdev,
+ struct i40e_package_header *pkg_hdr,
+ size_t size_huge)
+{
+ u32 size = 0xFFFFFFFFU & size_huge;
+ u32 pkg_hdr_size;
+ u32 segment;
+
+ if (!pkg_hdr)
+ return false;
+
+ if (pkg_hdr->version.major > 0) {
+ struct i40e_ddp_version ver = pkg_hdr->version;
+
+ netdev_err(netdev, "Unsupported DDP profile version %u.%u.%u.%u",
+ ver.major, ver.minor, ver.update, ver.draft);
+ return false;
+ }
+ if (size_huge > size) {
+ netdev_err(netdev, "Invalid DDP profile - size is bigger than 4G");
+ return false;
+ }
+ if (size < (sizeof(struct i40e_package_header) +
+ sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) {
+ netdev_err(netdev, "Invalid DDP profile - size is too small.");
+ return false;
+ }
+
+ pkg_hdr_size = sizeof(u32) * (pkg_hdr->segment_count + 2U);
+ if (size < pkg_hdr_size) {
+ netdev_err(netdev, "Invalid DDP profile - too many segments");
+ return false;
+ }
+ for (segment = 0; segment < pkg_hdr->segment_count; ++segment) {
+ u32 offset = pkg_hdr->segment_offset[segment];
+
+ if (0xFU & offset) {
+ netdev_err(netdev,
+ "Invalid DDP profile %u segment alignment",
+ segment);
+ return false;
+ }
+ if (pkg_hdr_size > offset || offset >= size) {
+ netdev_err(netdev,
+ "Invalid DDP profile %u segment offset",
+ segment);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * i40e_ddp_load - performs DDP loading
+ * @netdev: net device structure
+ * @data: buffer containing recipe file
+ * @size: size of the buffer
+ * @is_add: true when loading profile, false when rolling back the previous one
+ *
+ * Checks correctness and loads DDP profile to the NIC. The function is
+ * also used for rolling back previously loaded profile.
+ **/
+int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
+ bool is_add)
+{
+ u8 profile_info_sec[sizeof(struct i40e_profile_section_header) +
+ sizeof(struct i40e_profile_info)];
+ struct i40e_metadata_segment *metadata_hdr;
+ struct i40e_profile_segment *profile_hdr;
+ struct i40e_profile_info pinfo;
+ struct i40e_package_header *pkg_hdr;
+ i40e_status status;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ u32 track_id;
+ int istatus;
+
+ pkg_hdr = (struct i40e_package_header *)data;
+ if (!i40e_ddp_is_pkg_hdr_valid(netdev, pkg_hdr, size))
+ return -EINVAL;
+
+ if (size < (sizeof(struct i40e_package_header) +
+ sizeof(struct i40e_metadata_segment) + sizeof(u32) * 2)) {
+ netdev_err(netdev, "Invalid DDP recipe size.");
+ return -EINVAL;
+ }
+
+ /* Find beginning of segment data in buffer */
+ metadata_hdr = (struct i40e_metadata_segment *)
+ i40e_find_segment_in_package(SEGMENT_TYPE_METADATA, pkg_hdr);
+ if (!metadata_hdr) {
+ netdev_err(netdev, "Failed to find metadata segment in DDP recipe.");
+ return -EINVAL;
+ }
+
+ track_id = metadata_hdr->track_id;
+ profile_hdr = (struct i40e_profile_segment *)
+ i40e_find_segment_in_package(SEGMENT_TYPE_I40E, pkg_hdr);
+ if (!profile_hdr) {
+ netdev_err(netdev, "Failed to find profile segment in DDP recipe.");
+ return -EINVAL;
+ }
+
+ pinfo.track_id = track_id;
+ pinfo.version = profile_hdr->version;
+ if (is_add)
+ pinfo.op = I40E_DDP_ADD_TRACKID;
+ else
+ pinfo.op = I40E_DDP_REMOVE_TRACKID;
+
+ memcpy(pinfo.name, profile_hdr->name, I40E_DDP_NAME_SIZE);
+
+ /* Check if profile data already exists*/
+ istatus = i40e_ddp_does_profile_exist(&pf->hw, &pinfo);
+ if (istatus < 0) {
+ netdev_err(netdev, "Failed to fetch loaded profiles.");
+ return istatus;
+ }
+ if (is_add) {
+ if (istatus > 0) {
+ netdev_err(netdev, "DDP profile already loaded.");
+ return -EINVAL;
+ }
+ istatus = i40e_ddp_does_profile_overlap(&pf->hw, &pinfo);
+ if (istatus < 0) {
+ netdev_err(netdev, "Failed to fetch loaded profiles.");
+ return istatus;
+ }
+ if (istatus > 0) {
+ netdev_err(netdev, "DDP profile overlaps with existing one.");
+ return -EINVAL;
+ }
+ } else {
+ if (istatus == 0) {
+ netdev_err(netdev,
+ "DDP profile for deletion does not exist.");
+ return -EINVAL;
+ }
+ }
+
+ /* Load profile data */
+ if (is_add) {
+ status = i40e_write_profile(&pf->hw, profile_hdr, track_id);
+ if (status) {
+ if (status == I40E_ERR_DEVICE_NOT_SUPPORTED) {
+ netdev_err(netdev,
+ "Profile is not supported by the device.");
+ return -EPERM;
+ }
+ netdev_err(netdev, "Failed to write DDP profile.");
+ return -EIO;
+ }
+ } else {
+ status = i40e_rollback_profile(&pf->hw, profile_hdr, track_id);
+ if (status) {
+ netdev_err(netdev, "Failed to remove DDP profile.");
+ return -EIO;
+ }
+ }
+
+ /* Add/remove profile to/from profile list in FW */
+ if (is_add) {
+ status = i40e_add_pinfo(&pf->hw, profile_hdr, profile_info_sec,
+ track_id);
+ if (status) {
+ netdev_err(netdev, "Failed to add DDP profile info.");
+ return -EIO;
+ }
+ } else {
+ status = i40e_del_pinfo(&pf->hw, profile_hdr, profile_info_sec,
+ track_id);
+ if (status) {
+ netdev_err(netdev, "Failed to restore DDP profile info.");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_ddp_restore - restore previously loaded profile and remove from list
+ * @pf: PF data struct
+ *
+ * Restores previously loaded profile stored on the list in driver memory.
+ * After rolling back removes entry from the list.
+ **/
+static int i40e_ddp_restore(struct i40e_pf *pf)
+{
+ struct i40e_ddp_old_profile_list *entry;
+ struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
+ int status = 0;
+
+ if (!list_empty(&pf->ddp_old_prof)) {
+ entry = list_first_entry(&pf->ddp_old_prof,
+ struct i40e_ddp_old_profile_list,
+ list);
+ status = i40e_ddp_load(netdev, entry->old_ddp_buf,
+ entry->old_ddp_size, false);
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ return status;
+}
+
+/**
+ * i40e_ddp_flash - callback function for ethtool flash feature
+ * @netdev: net device structure
+ * @flash: kernel flash structure
+ *
+ * Ethtool callback function used for loading and unloading DDP profiles.
+ **/
+int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash)
+{
+ const struct firmware *ddp_config;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ int status = 0;
+
+ /* Check for valid region first */
+ if (flash->region != I40_DDP_FLASH_REGION) {
+ netdev_err(netdev, "Requested firmware region is not recognized by this driver.");
+ return -EINVAL;
+ }
+ if (pf->hw.bus.func != 0) {
+ netdev_err(netdev, "Any DDP operation is allowed only on Phy0 NIC interface");
+ return -EINVAL;
+ }
+
+ /* If the user supplied "-" instead of file name rollback previously
+ * stored profile.
+ */
+ if (strncmp(flash->data, "-", 2) != 0) {
+ struct i40e_ddp_old_profile_list *list_entry;
+ char profile_name[sizeof(I40E_DDP_PROFILE_PATH)
+ + I40E_DDP_PROFILE_NAME_MAX];
+
+ profile_name[sizeof(profile_name) - 1] = 0;
+ strncpy(profile_name, I40E_DDP_PROFILE_PATH,
+ sizeof(profile_name) - 1);
+ strncat(profile_name, flash->data, I40E_DDP_PROFILE_NAME_MAX);
+ /* Load DDP recipe. */
+ status = request_firmware(&ddp_config, profile_name,
+ &netdev->dev);
+ if (status) {
+ netdev_err(netdev, "DDP recipe file request failed.");
+ return status;
+ }
+
+ status = i40e_ddp_load(netdev, ddp_config->data,
+ ddp_config->size, true);
+
+ if (!status) {
+ list_entry =
+ kzalloc(sizeof(struct i40e_ddp_old_profile_list) +
+ ddp_config->size, GFP_KERNEL);
+ if (!list_entry) {
+ netdev_info(netdev, "Failed to allocate memory for previous DDP profile data.");
+ netdev_info(netdev, "New profile loaded but roll-back will be impossible.");
+ } else {
+ memcpy(list_entry->old_ddp_buf,
+ ddp_config->data, ddp_config->size);
+ list_entry->old_ddp_size = ddp_config->size;
+ list_add(&list_entry->list, &pf->ddp_old_prof);
+ }
+ }
+
+ release_firmware(ddp_config);
+ } else {
+ if (!list_empty(&pf->ddp_old_prof)) {
+ status = i40e_ddp_restore(pf);
+ } else {
+ netdev_warn(netdev, "There is no DDP profile to restore.");
+ status = -ENOENT;
+ }
+ }
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index c67d485d6f99..7ea4f09229e4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -1321,7 +1321,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
int ret;
- ret = i40e_aq_stop_lldp(&pf->hw, false, NULL);
+ ret = i40e_aq_stop_lldp(&pf->hw, false, false, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"Stop LLDP AQ command failed =0x%x\n",
@@ -1358,7 +1358,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
/* Continue and start FW LLDP anyways */
}
- ret = i40e_aq_start_lldp(&pf->hw, NULL);
+ ret = i40e_aq_start_lldp(&pf->hw, false, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"Start LLDP AQ command failed =0x%x\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index 334b05ff685a..bac4da031f9b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -5,6 +5,8 @@
#define _I40E_DEVIDS_H_
/* Device IDs */
+#define I40E_DEV_ID_X710_N3000 0x0CF8
+#define I40E_DEV_ID_XXV710_N3000 0x0D58
#define I40E_DEV_ID_SFP_XL710 0x1572
#define I40E_DEV_ID_QEMU 0x1574
#define I40E_DEV_ID_KX_B 0x1580
@@ -18,6 +20,9 @@
#define I40E_DEV_ID_10G_BASE_T4 0x1589
#define I40E_DEV_ID_25G_B 0x158A
#define I40E_DEV_ID_25G_SFP28 0x158B
+#define I40E_DEV_ID_10G_BASE_T_BC 0x15FF
+#define I40E_DEV_ID_10G_B 0x104F
+#define I40E_DEV_ID_10G_SFP 0x104E
#define I40E_DEV_ID_KX_X722 0x37CE
#define I40E_DEV_ID_QSFP_X722 0x37CF
#define I40E_DEV_ID_SFP_X722 0x37D0
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 7874d0ec7fb0..7545b21bee3c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -508,6 +508,20 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
ethtool_link_ksettings_add_link_mode(ks, advertising,
10000baseT_Full);
}
+ if (phy_types & I40E_CAP_PHY_TYPE_2_5GBASE_T) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 2500baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_2_5GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 2500baseT_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_5GBASE_T) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 5000baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_5GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 5000baseT_Full);
+ }
if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
@@ -535,17 +549,23 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
ethtool_link_ksettings_add_link_mode(ks, advertising,
1000baseT_Full);
}
- if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseSR4_Full);
- if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseSR4_Full);
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseLR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseLR4_Full);
+ }
if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
- 40000baseLR4_Full);
+ 40000baseKR4_Full);
ethtool_link_ksettings_add_link_mode(ks, advertising,
- 40000baseLR4_Full);
+ 40000baseKR4_Full);
}
if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
ethtool_link_ksettings_add_link_mode(ks, supported,
@@ -668,13 +688,15 @@ static void i40e_phy_type_to_ethtool(struct i40e_pf *pf,
phy_types & I40E_CAP_PHY_TYPE_25GBASE_KR ||
phy_types & I40E_CAP_PHY_TYPE_25GBASE_CR ||
phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2 ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR ||
phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4 ||
phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR ||
phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_5GBASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_2_5GBASE_T ||
phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL ||
phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
@@ -720,14 +742,20 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
case I40E_PHY_TYPE_40GBASE_AOC:
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseCR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseCR4_Full);
break;
case I40E_PHY_TYPE_40GBASE_SR4:
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseSR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseSR4_Full);
break;
case I40E_PHY_TYPE_40GBASE_LR4:
ethtool_link_ksettings_add_link_mode(ks, supported,
40000baseLR4_Full);
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 40000baseLR4_Full);
break;
case I40E_PHY_TYPE_25GBASE_SR:
case I40E_PHY_TYPE_25GBASE_LR:
@@ -778,12 +806,18 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
10000baseT_Full);
break;
case I40E_PHY_TYPE_10GBASE_T:
+ case I40E_PHY_TYPE_5GBASE_T:
+ case I40E_PHY_TYPE_2_5GBASE_T:
case I40E_PHY_TYPE_1000BASE_T:
case I40E_PHY_TYPE_100BASE_TX:
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, supported,
10000baseT_Full);
ethtool_link_ksettings_add_link_mode(ks, supported,
+ 5000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 2500baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ks, supported,
1000baseT_Full);
ethtool_link_ksettings_add_link_mode(ks, supported,
100baseT_Full);
@@ -791,6 +825,12 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
10000baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_5GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 5000baseT_Full);
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_2_5GB)
+ ethtool_link_ksettings_add_link_mode(ks, advertising,
+ 2500baseT_Full);
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
1000baseT_Full);
@@ -946,6 +986,12 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
case I40E_LINK_SPEED_10GB:
ks->base.speed = SPEED_10000;
break;
+ case I40E_LINK_SPEED_5GB:
+ ks->base.speed = SPEED_5000;
+ break;
+ case I40E_LINK_SPEED_2_5GB:
+ ks->base.speed = SPEED_2500;
+ break;
case I40E_LINK_SPEED_1GB:
ks->base.speed = SPEED_1000;
break;
@@ -1033,6 +1079,7 @@ static int i40e_get_link_ksettings(struct net_device *netdev,
break;
case I40E_MEDIA_TYPE_FIBER:
ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
ks->base.port = PORT_FIBRE;
break;
case I40E_MEDIA_TYPE_UNKNOWN:
@@ -1231,6 +1278,12 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
10000baseLR_Full))
config.link_speed |= I40E_LINK_SPEED_10GB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 2500baseT_Full))
+ config.link_speed |= I40E_LINK_SPEED_2_5GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 5000baseT_Full))
+ config.link_speed |= I40E_LINK_SPEED_5GB;
+ if (ethtool_link_ksettings_test_link_mode(ks, advertising,
20000baseKR2_Full))
config.link_speed |= I40E_LINK_SPEED_20GB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
@@ -4945,7 +4998,7 @@ flags_complete:
if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
struct i40e_dcbx_config *dcbcfg;
- i40e_aq_stop_lldp(&pf->hw, true, NULL);
+ i40e_aq_stop_lldp(&pf->hw, true, false, NULL);
i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
/* reset local_dcbx_config to default */
dcbcfg = &pf->hw.local_dcbx_config;
@@ -4960,7 +5013,7 @@ flags_complete:
dcbcfg->pfc.willing = 1;
dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
} else {
- i40e_aq_start_lldp(&pf->hw, NULL);
+ i40e_aq_start_lldp(&pf->hw, false, NULL);
}
}
@@ -5128,6 +5181,12 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
return 0;
}
+static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = {
+ .set_eeprom = i40e_set_eeprom,
+ .get_eeprom_len = i40e_get_eeprom_len,
+ .get_eeprom = i40e_get_eeprom,
+};
+
static const struct ethtool_ops i40e_ethtool_ops = {
.get_drvinfo = i40e_get_drvinfo,
.get_regs_len = i40e_get_regs_len,
@@ -5171,9 +5230,16 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.set_link_ksettings = i40e_set_link_ksettings,
.get_fecparam = i40e_get_fec_param,
.set_fecparam = i40e_set_fec_param,
+ .flash_device = i40e_ddp_flash,
};
void i40e_set_ethtool_ops(struct net_device *netdev)
{
- netdev->ethtool_ops = &i40e_ethtool_ops;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
+ netdev->ethtool_ops = &i40e_ethtool_ops;
+ else
+ netdev->ethtool_ops = &i40e_ethtool_recovery_mode_ops;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b1c265012c8a..320562b39686 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -27,7 +27,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 2
#define DRV_VERSION_MINOR 8
-#define DRV_VERSION_BUILD 10
+#define DRV_VERSION_BUILD 20
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -46,6 +46,10 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
static int i40e_reset(struct i40e_pf *pf);
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
+static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
+static int i40e_restore_interrupt_scheme(struct i40e_pf *pf);
+static bool i40e_check_recovery_mode(struct i40e_pf *pf);
+static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw);
static void i40e_fdir_sb_setup(struct i40e_pf *pf);
static int i40e_veb_get_bw_info(struct i40e_veb *veb);
static int i40e_get_capabilities(struct i40e_pf *pf,
@@ -69,6 +73,8 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_SFP), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
@@ -77,6 +83,8 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_X710_N3000), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_XXV710_N3000), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_25G_SFP28), 0},
/* required last entry */
@@ -278,8 +286,9 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
**/
void i40e_service_event_schedule(struct i40e_pf *pf)
{
- if (!test_bit(__I40E_DOWN, pf->state) &&
- !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ if ((!test_bit(__I40E_DOWN, pf->state) &&
+ !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) ||
+ test_bit(__I40E_RECOVERY_MODE, pf->state))
queue_work(i40e_wq, &pf->service_task);
}
@@ -2107,11 +2116,22 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
fcnt = i40e_update_filter_state(num_add, list, add_head);
if (fcnt != num_add) {
- set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
- dev_warn(&vsi->back->pdev->dev,
- "Error %s adding RX filters on %s, promiscuous mode forced on\n",
- i40e_aq_str(hw, aq_err),
- vsi_name);
+ if (vsi->type == I40E_VSI_MAIN) {
+ set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
+ dev_warn(&vsi->back->pdev->dev,
+ "Error %s adding RX filters on %s, promiscuous mode forced on\n",
+ i40e_aq_str(hw, aq_err), vsi_name);
+ } else if (vsi->type == I40E_VSI_SRIOV ||
+ vsi->type == I40E_VSI_VMDQ1 ||
+ vsi->type == I40E_VSI_VMDQ2) {
+ dev_warn(&vsi->back->pdev->dev,
+ "Error %s adding RX filters on %s, please set promiscuous on manually for %s\n",
+ i40e_aq_str(hw, aq_err), vsi_name, vsi_name);
+ } else {
+ dev_warn(&vsi->back->pdev->dev,
+ "Error %s adding RX filters on %s, incorrect VSI type: %i.\n",
+ i40e_aq_str(hw, aq_err), vsi_name, vsi->type);
+ }
}
}
@@ -2654,6 +2674,10 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
struct i40e_vsi_context ctxt;
i40e_status ret;
+ /* Don't modify stripping options if a port VLAN is active */
+ if (vsi->info.pvid)
+ return;
+
if ((vsi->info.valid_sections &
cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
@@ -2684,6 +2708,10 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
struct i40e_vsi_context ctxt;
i40e_status ret;
+ /* Don't modify stripping options if a port VLAN is active */
+ if (vsi->info.pvid)
+ return;
+
if ((vsi->info.valid_sections &
cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
@@ -2949,9 +2977,9 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
**/
void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
{
- i40e_vlan_stripping_disable(vsi);
-
vsi->info.pvid = 0;
+
+ i40e_vlan_stripping_disable(vsi);
}
/**
@@ -4000,7 +4028,8 @@ static irqreturn_t i40e_intr(int irq, void *data)
enable_intr:
/* re-enable interrupt causes */
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
- if (!test_bit(__I40E_DOWN, pf->state)) {
+ if (!test_bit(__I40E_DOWN, pf->state) ||
+ test_bit(__I40E_RECOVERY_MODE, pf->state)) {
i40e_service_event_schedule(pf);
i40e_irq_dynamic_enable_icr0(pf);
}
@@ -6403,7 +6432,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
goto out;
/* Get the initial DCB configuration */
- err = i40e_init_dcb(hw);
+ err = i40e_init_dcb(hw, true);
if (!err) {
/* Device/Function is not DCBX capable */
if ((!hw->func_caps.dcb) ||
@@ -6493,6 +6522,12 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
case I40E_LINK_SPEED_10GB:
speed = "10 G";
break;
+ case I40E_LINK_SPEED_5GB:
+ speed = "5 G";
+ break;
+ case I40E_LINK_SPEED_2_5GB:
+ speed = "2.5 G";
+ break;
case I40E_LINK_SPEED_1GB:
speed = "1000 M";
break;
@@ -6846,10 +6881,12 @@ static int i40e_setup_tc(struct net_device *netdev, void *type_data)
struct i40e_pf *pf = vsi->back;
u8 enabled_tc = 0, num_tc, hw;
bool need_reset = false;
+ int old_queue_pairs;
int ret = -EINVAL;
u16 mode;
int i;
+ old_queue_pairs = vsi->num_queue_pairs;
num_tc = mqprio_qopt->qopt.num_tc;
hw = mqprio_qopt->qopt.hw;
mode = mqprio_qopt->mode;
@@ -6950,6 +6987,7 @@ config_tc:
}
ret = i40e_configure_queue_channels(vsi);
if (ret) {
+ vsi->num_queue_pairs = old_queue_pairs;
netdev_info(netdev,
"Failed configuring queue channels\n");
need_reset = true;
@@ -9290,6 +9328,11 @@ static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
dev_warn(&pf->pdev->dev,
"shutdown_lan_hmc failed: %d\n", ret);
}
+
+ /* Save the current PTP time so that we can restore the time after the
+ * reset completes.
+ */
+ i40e_ptp_save_hw_time(pf);
}
/**
@@ -9382,6 +9425,7 @@ static int i40e_reset(struct i40e_pf *pf)
**/
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
{
+ int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
u8 set_fc_aq_fail = 0;
@@ -9389,7 +9433,14 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
u32 val;
int v;
- if (test_bit(__I40E_DOWN, pf->state))
+ if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
+ i40e_check_recovery_mode(pf)) {
+ i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
+ }
+
+ if (test_bit(__I40E_DOWN, pf->state) &&
+ !test_bit(__I40E_RECOVERY_MODE, pf->state) &&
+ !old_recovery_mode_bit)
goto clear_recovery;
dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
@@ -9418,6 +9469,44 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
i40e_verify_eeprom(pf);
+ /* if we are going out of or into recovery mode we have to act
+ * accordingly with regard to resources initialization
+ * and deinitialization
+ */
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state) ||
+ old_recovery_mode_bit) {
+ if (i40e_get_capabilities(pf,
+ i40e_aqc_opc_list_func_capabilities))
+ goto end_unlock;
+
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
+ /* we're staying in recovery mode so we'll reinitialize
+ * misc vector here
+ */
+ if (i40e_setup_misc_vector_for_recovery_mode(pf))
+ goto end_unlock;
+ } else {
+ if (!lock_acquired)
+ rtnl_lock();
+ /* we're going out of recovery mode so we'll free
+ * the IRQ allocated specifically for recovery mode
+ * and restore the interrupt scheme
+ */
+ free_irq(pf->pdev->irq, pf);
+ i40e_clear_interrupt_scheme(pf);
+ if (i40e_restore_interrupt_scheme(pf))
+ goto end_unlock;
+ }
+
+ /* tell the firmware that we're starting */
+ i40e_send_version(pf);
+
+ /* bail out in case recovery mode was detected, as there is
+ * no need for further configuration.
+ */
+ goto end_unlock;
+ }
+
i40e_clear_pxe_mode(hw);
ret = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
if (ret)
@@ -9669,7 +9758,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
bool mdd_detected = false;
- bool pf_mdd_detected = false;
struct i40e_vf *vf;
u32 reg;
int i;
@@ -9715,19 +9803,12 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
reg = rd32(hw, I40E_PF_MDET_TX);
if (reg & I40E_PF_MDET_TX_VALID_MASK) {
wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
- dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
- pf_mdd_detected = true;
+ dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n");
}
reg = rd32(hw, I40E_PF_MDET_RX);
if (reg & I40E_PF_MDET_RX_VALID_MASK) {
wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
- dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
- pf_mdd_detected = true;
- }
- /* Queue belongs to the PF, initiate a reset */
- if (pf_mdd_detected) {
- set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
- i40e_service_event_schedule(pf);
+ dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n");
}
}
@@ -9740,6 +9821,9 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
vf->num_mdd_events++;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i);
+ dev_info(&pf->pdev->dev,
+ "Use PF Control I/F to re-enable the VF\n");
+ set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
}
reg = rd32(hw, I40E_VP_MDET_RX(i));
@@ -9748,11 +9832,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
vf->num_mdd_events++;
dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
i);
- }
-
- if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
- dev_info(&pf->pdev->dev,
- "Too many MDD events on VF %d, disabled\n", i);
dev_info(&pf->pdev->dev,
"Use PF Control I/F to re-enable the VF\n");
set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states);
@@ -9879,31 +9958,38 @@ static void i40e_service_task(struct work_struct *work)
unsigned long start_time = jiffies;
/* don't bother with service tasks if a reset is in progress */
- if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
+ test_bit(__I40E_SUSPENDED, pf->state))
return;
if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
return;
- i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
- i40e_sync_filters_subtask(pf);
- i40e_reset_subtask(pf);
- i40e_handle_mdd_event(pf);
- i40e_vc_process_vflr_event(pf);
- i40e_watchdog_subtask(pf);
- i40e_fdir_reinit_subtask(pf);
- if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
- /* Client subtask will reopen next time through. */
- i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], true);
+ if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
+ i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
+ i40e_sync_filters_subtask(pf);
+ i40e_reset_subtask(pf);
+ i40e_handle_mdd_event(pf);
+ i40e_vc_process_vflr_event(pf);
+ i40e_watchdog_subtask(pf);
+ i40e_fdir_reinit_subtask(pf);
+ if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
+ /* Client subtask will reopen next time through. */
+ i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
+ true);
+ } else {
+ i40e_client_subtask(pf);
+ if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
+ pf->state))
+ i40e_notify_client_of_l2_param_changes(
+ pf->vsi[pf->lan_vsi]);
+ }
+ i40e_sync_filters_subtask(pf);
+ i40e_sync_udp_filters_subtask(pf);
} else {
- i40e_client_subtask(pf);
- if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
- pf->state))
- i40e_notify_client_of_l2_param_changes(
- pf->vsi[pf->lan_vsi]);
- }
- i40e_sync_filters_subtask(pf);
- i40e_sync_udp_filters_subtask(pf);
+ i40e_reset_subtask(pf);
+ }
+
i40e_clean_adminq_subtask(pf);
/* flush memory to make sure state is correct before next watchdog */
@@ -10726,6 +10812,48 @@ err_unwind:
}
/**
+ * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
+ * non queue events in recovery mode
+ * @pf: board private structure
+ *
+ * This sets up the handler for MSIX 0 or MSI/legacy, which is used to manage
+ * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
+ * This is handled differently than in recovery mode since no Tx/Rx resources
+ * are being allocated.
+ **/
+static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf)
+{
+ int err;
+
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ err = i40e_setup_misc_vector(pf);
+
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "MSI-X misc vector request failed, error %d\n",
+ err);
+ return err;
+ }
+ } else {
+ u32 flags = pf->flags & I40E_FLAG_MSI_ENABLED ? 0 : IRQF_SHARED;
+
+ err = request_irq(pf->pdev->irq, i40e_intr, flags,
+ pf->int_name, pf);
+
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "MSI/legacy misc vector request failed, error %d\n",
+ err);
+ return err;
+ }
+ i40e_enable_misc_int_causes(pf);
+ i40e_irq_dynamic_enable_icr0(pf);
+ }
+
+ return 0;
+}
+
+/**
* i40e_setup_misc_vector - Setup the misc vector to handle non queue events
* @pf: board private structure
*
@@ -13888,6 +14016,125 @@ void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
}
/**
+ * i40e_check_recovery_mode - check if we are running transition firmware
+ * @pf: board private structure
+ *
+ * Check registers indicating the firmware runs in recovery mode. Sets the
+ * appropriate driver state.
+ *
+ * Returns true if the recovery mode was detected, false otherwise
+ **/
+static bool i40e_check_recovery_mode(struct i40e_pf *pf)
+{
+ u32 val = rd32(&pf->hw, I40E_GL_FWSTS);
+
+ if (val & I40E_GL_FWSTS_FWS1B_MASK) {
+ dev_notice(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n");
+ dev_notice(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
+ set_bit(__I40E_RECOVERY_MODE, pf->state);
+
+ return true;
+ }
+ if (test_and_clear_bit(__I40E_RECOVERY_MODE, pf->state))
+ dev_info(&pf->pdev->dev, "Reinitializing in normal mode with full functionality.\n");
+
+ return false;
+}
+
+/**
+ * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
+ * @pf: board private structure
+ * @hw: ptr to the hardware info
+ *
+ * This function does a minimal setup of all subsystems needed for running
+ * recovery mode.
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw)
+{
+ struct i40e_vsi *vsi;
+ int err;
+ int v_idx;
+
+ pci_save_state(pf->pdev);
+
+ /* set up periodic task facility */
+ timer_setup(&pf->service_timer, i40e_service_timer, 0);
+ pf->service_timer_period = HZ;
+
+ INIT_WORK(&pf->service_task, i40e_service_task);
+ clear_bit(__I40E_SERVICE_SCHED, pf->state);
+
+ err = i40e_init_interrupt_scheme(pf);
+ if (err)
+ goto err_switch_setup;
+
+ /* The number of VSIs reported by the FW is the minimum guaranteed
+ * to us; HW supports far more and we share the remaining pool with
+ * the other PFs. We allocate space for more than the guarantee with
+ * the understanding that we might not get them all later.
+ */
+ if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
+ pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
+ else
+ pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
+
+ /* Set up the vsi struct and our local tracking of the MAIN PF vsi. */
+ pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
+ GFP_KERNEL);
+ if (!pf->vsi) {
+ err = -ENOMEM;
+ goto err_switch_setup;
+ }
+
+ /* We allocate one VSI which is needed as absolute minimum
+ * in order to register the netdev
+ */
+ v_idx = i40e_vsi_mem_alloc(pf, I40E_VSI_MAIN);
+ if (v_idx < 0)
+ goto err_switch_setup;
+ pf->lan_vsi = v_idx;
+ vsi = pf->vsi[v_idx];
+ if (!vsi)
+ goto err_switch_setup;
+ vsi->alloc_queue_pairs = 1;
+ err = i40e_config_netdev(vsi);
+ if (err)
+ goto err_switch_setup;
+ err = register_netdev(vsi->netdev);
+ if (err)
+ goto err_switch_setup;
+ vsi->netdev_registered = true;
+ i40e_dbg_pf_init(pf);
+
+ err = i40e_setup_misc_vector_for_recovery_mode(pf);
+ if (err)
+ goto err_switch_setup;
+
+ /* tell the firmware that we're starting */
+ i40e_send_version(pf);
+
+ /* since everything's happy, start the service_task timer */
+ mod_timer(&pf->service_timer,
+ round_jiffies(jiffies + pf->service_timer_period));
+
+ return 0;
+
+err_switch_setup:
+ i40e_reset_interrupt_capability(pf);
+ del_timer_sync(&pf->service_timer);
+ i40e_shutdown_adminq(hw);
+ iounmap(hw->hw_addr);
+ pci_disable_pcie_error_reporting(pf->pdev);
+ pci_release_mem_regions(pf->pdev);
+ pci_disable_device(pf->pdev);
+ kfree(pf);
+
+ return err;
+}
+
+/**
* i40e_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in i40e_pci_tbl
@@ -13984,6 +14231,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_LIST_HEAD(&pf->l3_flex_pit_list);
INIT_LIST_HEAD(&pf->l4_flex_pit_list);
+ INIT_LIST_HEAD(&pf->ddp_old_prof);
/* set up the locks for the AQ, do this only once in probe
* and destroy them only once in remove
@@ -14011,13 +14259,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Reset here to make sure all is clean and to define PF 'n' */
i40e_clear_hw(hw);
- err = i40e_pf_reset(hw);
- if (err) {
- dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
- goto err_pf_reset;
+ if (!i40e_check_recovery_mode(pf)) {
+ err = i40e_pf_reset(hw);
+ if (err) {
+ dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
+ goto err_pf_reset;
+ }
+ pf->pfr_count++;
}
- pf->pfr_count++;
-
hw->aq.num_arq_entries = I40E_AQ_LEN;
hw->aq.num_asq_entries = I40E_AQ_LEN;
hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
@@ -14042,7 +14291,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
if (err == I40E_ERR_FIRMWARE_API_VERSION)
dev_info(&pdev->dev,
- "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
+ "The driver for the device stopped because the NVM image v%u.%u is newer than expected v%u.%u. You must install the most recent version of the network driver.\n",
+ hw->aq.api_maj_ver,
+ hw->aq.api_min_ver,
+ I40E_FW_API_VERSION_MAJOR,
+ I40E_FW_MINOR_VERSION(hw));
else
dev_info(&pdev->dev,
"The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
@@ -14051,19 +14304,28 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
i40e_get_oem_version(hw);
- /* provide nvm, fw, api versions */
- dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
+ /* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */
+ dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
hw->aq.api_maj_ver, hw->aq.api_min_ver,
- i40e_nvm_version_str(hw));
+ i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id,
+ hw->subsystem_vendor_id, hw->subsystem_device_id);
if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
dev_info(&pdev->dev,
- "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+ "The driver for the device detected a newer version of the NVM image v%u.%u than expected v%u.%u. Please install the most recent version of the network driver.\n",
+ hw->aq.api_maj_ver,
+ hw->aq.api_min_ver,
+ I40E_FW_API_VERSION_MAJOR,
+ I40E_FW_MINOR_VERSION(hw));
else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4)
dev_info(&pdev->dev,
- "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+ "The driver for the device detected an older version of the NVM image v%u.%u than expected v%u.%u. Please update the NVM image.\n",
+ hw->aq.api_maj_ver,
+ hw->aq.api_min_ver,
+ I40E_FW_API_VERSION_MAJOR,
+ I40E_FW_MINOR_VERSION(hw));
i40e_verify_eeprom(pf);
@@ -14072,6 +14334,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
i40e_clear_pxe_mode(hw);
+
err = i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities);
if (err)
goto err_adminq_setup;
@@ -14082,6 +14345,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_sw_init;
}
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state))
+ return i40e_init_recovery_mode(pf, hw);
+
err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp, 0, 0);
if (err) {
@@ -14102,7 +14368,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
if (pf->hw_features & I40E_HW_STOP_FW_LLDP) {
dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
- i40e_aq_stop_lldp(hw, true, NULL);
+ i40e_aq_stop_lldp(hw, true, false, NULL);
}
/* allow a platform config to override the HW addr */
@@ -14467,6 +14733,19 @@ static void i40e_remove(struct pci_dev *pdev)
if (pf->service_task.func)
cancel_work_sync(&pf->service_task);
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state)) {
+ struct i40e_vsi *vsi = pf->vsi[0];
+
+ /* We know that we have allocated only one vsi for this PF,
+ * it was just for registering netdevice, so the interface
+ * could be visible in the 'ifconfig' output
+ */
+ unregister_netdev(vsi->netdev);
+ free_netdev(vsi->netdev);
+
+ goto unmap;
+ }
+
/* Client close must be called explicitly here because the timer
* has been stopped.
*/
@@ -14516,6 +14795,12 @@ static void i40e_remove(struct pci_dev *pdev)
ret_code);
}
+unmap:
+ /* Free MSI/legacy interrupt 0 when in recovery mode. */
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
+ !(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ free_irq(pf->pdev->irq, pf);
+
/* shutdown the adminq */
i40e_shutdown_adminq(hw);
@@ -14528,7 +14813,8 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_clear_interrupt_scheme(pf);
for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i]) {
- i40e_vsi_clear_rings(pf->vsi[i]);
+ if (!test_bit(__I40E_RECOVERY_MODE, pf->state))
+ i40e_vsi_clear_rings(pf->vsi[i]);
i40e_vsi_clear(pf->vsi[i]);
pf->vsi[i] = NULL;
}
@@ -14736,6 +15022,11 @@ static void i40e_shutdown(struct pci_dev *pdev)
wr32(hw, I40E_PFPM_WUFC,
(pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+ /* Free MSI/legacy interrupt 0 when in recovery mode. */
+ if (test_bit(__I40E_RECOVERY_MODE, pf->state) &&
+ !(pf->flags & I40E_FLAG_MSIX_ENABLED))
+ free_irq(pf->pdev->irq, pf);
+
/* Since we're going to destroy queues during the
* i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
* whole section
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 0299e5bbb902..c508b75c3c09 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -578,11 +578,10 @@ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
__le16 le_sum;
ret_code = i40e_calc_nvm_checksum(hw, &checksum);
- if (!ret_code) {
- le_sum = cpu_to_le16(checksum);
+ le_sum = cpu_to_le16(checksum);
+ if (!ret_code)
ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
1, &le_sum, true);
- }
return ret_code;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index e08d754824b1..882627073dce 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -203,14 +203,18 @@ i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
bool enable_update,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ bool persist,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
bool dcb_enable,
struct i40e_asq_cmd_details
*cmd_details);
-i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
@@ -429,10 +433,16 @@ i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
+struct i40e_profile_section_header *
+i40e_find_section_in_profile(u32 section_type,
+ struct i40e_profile_segment *profile);
enum i40e_status_code
i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
enum i40e_status_code
+i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
+ u32 track_id);
+enum i40e_status_code
i40e_add_pinfo_to_list(struct i40e_hw *hw,
struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 31575c0bb884..439c35f0c581 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -725,16 +725,68 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
pf->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
pf->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+ /* Set the previous "reset" time to the current Kernel clock time */
+ pf->ptp_prev_hw_time = ktime_to_timespec64(ktime_get_real());
+ pf->ptp_reset_start = ktime_get();
+
return 0;
}
/**
+ * i40e_ptp_save_hw_time - Save the current PTP time as ptp_prev_hw_time
+ * @pf: Board private structure
+ *
+ * Read the current PTP time and save it into pf->ptp_prev_hw_time. This should
+ * be called at the end of preparing to reset, just before hardware reset
+ * occurs, in order to preserve the PTP time as close as possible across
+ * resets.
+ */
+void i40e_ptp_save_hw_time(struct i40e_pf *pf)
+{
+ /* don't try to access the PTP clock if it's not enabled */
+ if (!(pf->flags & I40E_FLAG_PTP))
+ return;
+
+ i40e_ptp_gettimex(&pf->ptp_caps, &pf->ptp_prev_hw_time, NULL);
+ /* Get a monotonic starting time for this reset */
+ pf->ptp_reset_start = ktime_get();
+}
+
+/**
+ * i40e_ptp_restore_hw_time - Restore the ptp_prev_hw_time + delta to PTP regs
+ * @pf: Board private structure
+ *
+ * Restore the PTP hardware clock registers. We previously cached the PTP
+ * hardware time as pf->ptp_prev_hw_time. To be as accurate as possible,
+ * update this value based on the time delta since the time was saved, using
+ * CLOCK_MONOTONIC (via ktime_get()) to calculate the time difference.
+ *
+ * This ensures that the hardware clock is restored to nearly what it should
+ * have been if a reset had not occurred.
+ */
+void i40e_ptp_restore_hw_time(struct i40e_pf *pf)
+{
+ ktime_t delta = ktime_sub(ktime_get(), pf->ptp_reset_start);
+
+ /* Update the previous HW time with the ktime delta */
+ timespec64_add_ns(&pf->ptp_prev_hw_time, ktime_to_ns(delta));
+
+ /* Restore the hardware clock registers */
+ i40e_ptp_settime(&pf->ptp_caps, &pf->ptp_prev_hw_time);
+}
+
+/**
* i40e_ptp_init - Initialize the 1588 support after device probe or reset
* @pf: Board private structure
*
* This function sets device up for 1588 support. The first time it is run, it
* will create a PHC clock device. It does not create a clock device if one
* already exists. It also reconfigures the device after a reset.
+ *
+ * The first time a clock is created, i40e_ptp_create_clock will set
+ * pf->ptp_prev_hw_time to the current system time. During resets, it is
+ * expected that this timespec will be set to the last known PTP clock time,
+ * in order to preserve the clock time as close as possible across a reset.
**/
void i40e_ptp_init(struct i40e_pf *pf)
{
@@ -766,7 +818,6 @@ void i40e_ptp_init(struct i40e_pf *pf)
dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n",
__func__);
} else if (pf->ptp_clock) {
- struct timespec64 ts;
u32 regval;
if (pf->hw.debug_mask & I40E_DEBUG_LAN)
@@ -787,9 +838,8 @@ void i40e_ptp_init(struct i40e_pf *pf)
/* reset timestamping mode */
i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config);
- /* Set the clock value. */
- ts = ktime_to_timespec64(ktime_get_real());
- i40e_ptp_settime(&pf->ptp_caps, &ts);
+ /* Restore the clock time based on last known value */
+ i40e_ptp_restore_hw_time(pf);
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index ffb611bbedfa..20a283702c9f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2035,7 +2035,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
/* Determine available headroom for copy */
headlen = size;
if (headlen > I40E_RX_HDR_SIZE)
- headlen = eth_get_headlen(xdp->data, I40E_RX_HDR_SIZE);
+ headlen = eth_get_headlen(skb->dev, xdp->data,
+ I40E_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, headlen), xdp->data,
@@ -3469,7 +3470,7 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
first->next_to_watch = tx_desc;
/* notify HW of packet */
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 2781ab91ca82..8f43aa47c263 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -252,6 +252,12 @@ struct i40e_phy_info {
I40E_PHY_TYPE_OFFSET)
#define I40E_CAP_PHY_TYPE_25GBASE_ACC BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC + \
I40E_PHY_TYPE_OFFSET)
+/* Offset for 2.5G/5G PHY Types value to bit number conversion */
+#define I40E_PHY_TYPE_OFFSET2 (-10)
+#define I40E_CAP_PHY_TYPE_2_5GBASE_T BIT_ULL(I40E_PHY_TYPE_2_5GBASE_T + \
+ I40E_PHY_TYPE_OFFSET2)
+#define I40E_CAP_PHY_TYPE_5GBASE_T BIT_ULL(I40E_PHY_TYPE_5GBASE_T + \
+ I40E_PHY_TYPE_OFFSET2)
#define I40E_HW_CAP_MAX_GPIO 30
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {
@@ -616,6 +622,7 @@ struct i40e_hw {
#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
+#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5)
u64 flags;
/* Used in set switch config AQ command */
@@ -1527,6 +1534,8 @@ struct i40e_generic_seg_header {
struct i40e_metadata_segment {
struct i40e_generic_seg_header header;
struct i40e_ddp_version version;
+#define I40E_DDP_TRACKID_RDONLY 0
+#define I40E_DDP_TRACKID_INVALID 0xFFFFFFFF
u32 track_id;
char name[I40E_DDP_NAME_SIZE];
};
@@ -1555,15 +1564,36 @@ struct i40e_profile_section_header {
struct {
#define SECTION_TYPE_INFO 0x00000010
#define SECTION_TYPE_MMIO 0x00000800
+#define SECTION_TYPE_RB_MMIO 0x00001800
#define SECTION_TYPE_AQ 0x00000801
+#define SECTION_TYPE_RB_AQ 0x00001801
#define SECTION_TYPE_NOTE 0x80000000
#define SECTION_TYPE_NAME 0x80000001
+#define SECTION_TYPE_PROTO 0x80000002
+#define SECTION_TYPE_PCTYPE 0x80000003
+#define SECTION_TYPE_PTYPE 0x80000004
u32 type;
u32 offset;
u32 size;
} section;
};
+struct i40e_profile_tlv_section_record {
+ u8 rtype;
+ u8 type;
+ u16 len;
+ u8 data[12];
+};
+
+/* Generic AQ section in proflie */
+struct i40e_profile_aq_section {
+ u16 opcode;
+ u16 flags;
+ u8 param[16];
+ u16 datalen;
+ u8 data[1];
+};
+
struct i40e_profile_info {
u32 track_id;
struct i40e_ddp_version version;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 831d52bc3c9a..479bc60c8f71 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -181,7 +181,7 @@ static inline bool i40e_vc_isvalid_vsi_id(struct i40e_vf *vf, u16 vsi_id)
* check for the valid queue id
**/
static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
- u8 qid)
+ u16 qid)
{
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id);
@@ -196,7 +196,7 @@ static inline bool i40e_vc_isvalid_queue_id(struct i40e_vf *vf, u16 vsi_id,
*
* check for the valid vector id
**/
-static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u8 vector_id)
+static inline bool i40e_vc_isvalid_vector_id(struct i40e_vf *vf, u32 vector_id)
{
struct i40e_pf *pf = vf->pf;
@@ -441,14 +441,28 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
u32 v_idx, i, reg_idx, reg;
u32 next_q_idx, next_q_type;
u32 msix_vf, size;
+ int ret = 0;
+
+ msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
+
+ if (qvlist_info->num_vectors > msix_vf) {
+ dev_warn(&pf->pdev->dev,
+ "Incorrect number of iwarp vectors %u. Maximum %u allowed.\n",
+ qvlist_info->num_vectors,
+ msix_vf);
+ ret = -EINVAL;
+ goto err_out;
+ }
size = sizeof(struct virtchnl_iwarp_qvlist_info) +
(sizeof(struct virtchnl_iwarp_qv_info) *
(qvlist_info->num_vectors - 1));
+ kfree(vf->qvlist_info);
vf->qvlist_info = kzalloc(size, GFP_KERNEL);
- if (!vf->qvlist_info)
- return -ENOMEM;
-
+ if (!vf->qvlist_info) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
vf->qvlist_info->num_vectors = qvlist_info->num_vectors;
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
@@ -459,8 +473,10 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
v_idx = qv_info->v_idx;
/* Validate vector id belongs to this vf */
- if (!i40e_vc_isvalid_vector_id(vf, v_idx))
- goto err;
+ if (!i40e_vc_isvalid_vector_id(vf, v_idx)) {
+ ret = -EINVAL;
+ goto err_free;
+ }
vf->qvlist_info->qv_info[i] = *qv_info;
@@ -502,10 +518,11 @@ static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
}
return 0;
-err:
+err_free:
kfree(vf->qvlist_info);
vf->qvlist_info = NULL;
- return -EINVAL;
+err_out:
+ return ret;
}
/**
@@ -1112,15 +1129,6 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
return I40E_ERR_PARAM;
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
- (allmulti || alluni)) {
- dev_err(&pf->pdev->dev,
- "Unprivileged VF %d is attempting to configure promiscuous mode\n",
- vf->vf_id);
- /* Lie to the VF on purpose. */
- return 0;
- }
-
if (vf->port_vlan_id) {
aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
allmulti,
@@ -1997,8 +2005,31 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
bool allmulti = false;
bool alluni = false;
- if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states))
- return I40E_ERR_PARAM;
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err_out;
+ }
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(&pf->pdev->dev,
+ "Unprivileged VF %d is attempting to configure promiscuous mode\n",
+ vf->vf_id);
+
+ /* Lie to the VF on purpose, because this is an error we can
+ * ignore. Unprivileged VF is not a virtual channel error.
+ */
+ aq_ret = 0;
+ goto err_out;
+ }
+
+ if (info->flags > I40E_MAX_VF_PROMISC_FLAGS) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err_out;
+ }
+
+ if (!i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err_out;
+ }
/* Multicast promiscuous handling*/
if (info->flags & FLAG_VF_MULTICAST_PROMISC)
@@ -2032,7 +2063,7 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
clear_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states);
}
}
-
+err_out:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf,
VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
@@ -2054,17 +2085,16 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_queue_pair_info *qpi;
struct i40e_pf *pf = vf->pf;
u16 vsi_id, vsi_queue_id = 0;
+ u16 num_qps_all = 0;
i40e_status aq_ret = 0;
int i, j = 0, idx = 0;
- vsi_id = qci->vsi_id;
-
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
- if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ if (!i40e_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2074,10 +2104,27 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
+ if (vf->adq_enabled) {
+ for (i = 0; i < I40E_MAX_VF_VSI; i++)
+ num_qps_all += vf->ch[i].num_qps;
+ if (num_qps_all != qci->num_queue_pairs) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ }
+
+ vsi_id = qci->vsi_id;
+
for (i = 0; i < qci->num_queue_pairs; i++) {
qpi = &qci->qpair[i];
if (!vf->adq_enabled) {
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
+ qpi->txq.queue_id)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
vsi_queue_id = qpi->txq.queue_id;
if (qpi->txq.vsi_id != qci->vsi_id ||
@@ -2088,10 +2135,8 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
}
}
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
+ if (vf->adq_enabled)
+ vsi_id = vf->ch[idx].vsi_id;
if (i40e_config_vsi_rx_queue(vf, vsi_id, vsi_queue_id,
&qpi->rxq) ||
@@ -2115,7 +2160,6 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
j++;
vsi_queue_id++;
}
- vsi_id = vf->ch[idx].vsi_id;
}
}
/* set vsi num_queue_pairs in use to num configured by VF */
@@ -2174,7 +2218,7 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_irq_map_info *irqmap_info =
(struct virtchnl_irq_map_info *)msg;
struct virtchnl_vector_map *map;
- u16 vsi_id, vector_id;
+ u16 vsi_id;
i40e_status aq_ret = 0;
int i;
@@ -2183,16 +2227,21 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
+ if (irqmap_info->num_vectors >
+ vf->pf->hw.func_caps.num_msix_vectors_vf) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+
for (i = 0; i < irqmap_info->num_vectors; i++) {
map = &irqmap_info->vecmap[i];
- vector_id = map->vector_id;
- vsi_id = map->vsi_id;
/* validate msg params */
- if (!i40e_vc_isvalid_vector_id(vf, vector_id) ||
- !i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
+ if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) ||
+ !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
+ vsi_id = map->vsi_id;
if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
aq_ret = I40E_ERR_PARAM;
@@ -2340,7 +2389,9 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
- if ((0 == vqs->rx_queues) && (0 == vqs->tx_queues)) {
+ if ((vqs->rx_queues == 0 && vqs->tx_queues == 0) ||
+ vqs->rx_queues > I40E_MAX_VF_QUEUES ||
+ vqs->tx_queues > I40E_MAX_VF_QUEUES) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2454,8 +2505,10 @@ error_param:
(u8 *)&stats, sizeof(stats));
}
-/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
-#define I40E_VC_MAX_MAC_ADDR_PER_VF 12
+/* If the VF is not trusted restrict the number of MAC/VLAN it can program
+ * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
+ */
+#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
#define I40E_VC_MAX_VLAN_PER_VF 8
/**
@@ -2764,7 +2817,8 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
vsi = pf->vsi[vf->lan_vsi_idx];
if (vsi->info.pvid) {
- aq_ret = I40E_ERR_PARAM;
+ if (vfl->num_elements > 1 || vfl->vlan_id[0])
+ aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -3126,7 +3180,7 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
}
if (mask.dst_port & data.dst_port) {
- if (!data.dst_port || be16_to_cpu(data.dst_port) > 0xFFFF) {
+ if (!data.dst_port) {
dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
vf->vf_id);
goto err;
@@ -3134,7 +3188,7 @@ static int i40e_validate_cloud_filter(struct i40e_vf *vf,
}
if (mask.src_port & data.src_port) {
- if (!data.src_port || be16_to_cpu(data.src_port) > 0xFFFF) {
+ if (!data.src_port) {
dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
vf->vf_id);
goto err;
@@ -3374,7 +3428,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
- goto err;
+ goto err_out;
}
if (!vf->adq_enabled) {
@@ -3382,7 +3436,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
"VF %d: ADq is not enabled, can't apply cloud filter\n",
vf->vf_id);
aq_ret = I40E_ERR_PARAM;
- goto err;
+ goto err_out;
}
if (i40e_validate_cloud_filter(vf, vcf)) {
@@ -3390,7 +3444,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
"VF %d: Invalid input/s, can't apply cloud filter\n",
vf->vf_id);
aq_ret = I40E_ERR_PARAM;
- goto err;
+ goto err_out;
}
cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
@@ -3451,13 +3505,17 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
"VF %d: Failed to add cloud filter, err %s aq_err %s\n",
vf->vf_id, i40e_stat_str(&pf->hw, ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
- goto err;
+ goto err_free;
}
INIT_HLIST_NODE(&cfilter->cloud_node);
hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
+ /* release the pointer passing it to the collection */
+ cfilter = NULL;
vf->num_cloud_filters++;
-err:
+err_free:
+ kfree(cfilter);
+err_out:
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
aq_ret);
}
@@ -4009,6 +4067,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
{
u16 vlanprio = vlan_id | (qos << I40E_VLAN_PRIORITY_SHIFT);
struct i40e_netdev_priv *np = netdev_priv(netdev);
+ bool allmulti = false, alluni = false;
struct i40e_pf *pf = np->vsi->back;
struct i40e_vsi *vsi;
struct i40e_vf *vf;
@@ -4093,6 +4152,15 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ /* disable promisc modes in case they were enabled */
+ ret = i40e_config_vf_promiscuous_mode(vf, vf->lan_vsi_id,
+ allmulti, alluni);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to config VF promiscuous mode\n");
+ goto error_pvid;
+ }
+
if (vlan_id || qos)
ret = i40e_vsi_add_pvid(vsi, vlanprio);
else
@@ -4119,6 +4187,12 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ if (test_bit(I40E_VF_STATE_UC_PROMISC, &vf->vf_states))
+ alluni = true;
+
+ if (test_bit(I40E_VF_STATE_MC_PROMISC, &vf->vf_states))
+ allmulti = true;
+
/* Schedule the worker thread to take care of applying changes */
i40e_service_event_schedule(vsi->back);
@@ -4131,6 +4205,13 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
* default LAN MAC address.
*/
vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
+
+ ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
+ goto error_pvid;
+ }
+
ret = 0;
error_pvid:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index f9621026beef..f65cc0c16550 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -17,6 +17,8 @@
#define I40E_VLAN_MASK 0xFFF
#define I40E_PRIORITY_MASK 0xE000
+#define I40E_MAX_VF_PROMISC_FLAGS 3
+
/* Various queue ctrls */
enum i40e_queue_ctrl {
I40E_QUEUE_CTRL_UNKNOWN = 0,
diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h
index af4f94a6541e..e5ae4a1c0cff 100644
--- a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h
@@ -14,7 +14,7 @@
#define I40E_FW_API_VERSION_MAJOR 0x0001
#define I40E_FW_API_VERSION_MINOR_X722 0x0005
-#define I40E_FW_API_VERSION_MINOR_X710 0x0007
+#define I40E_FW_API_VERSION_MINOR_X710 0x0008
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
I40E_FW_API_VERSION_MINOR_X710 : \
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 6bfef82e7607..06d1509d57f7 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1315,7 +1315,7 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
/* Determine available headroom for copy */
headlen = size;
if (headlen > IAVF_RX_HDR_SIZE)
- headlen = eth_get_headlen(va, IAVF_RX_HDR_SIZE);
+ headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
@@ -2358,7 +2358,7 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
first->next_to_watch = tx_desc;
/* notify HW of packet */
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
}
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index e5d6f684437e..2d140ba83781 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -17,3 +17,4 @@ ice-y := ice_main.o \
ice_txrx.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
+ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_lib.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 89440775aea1..792e6e42030e 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -34,6 +34,7 @@
#include "ice_devids.h"
#include "ice_type.h"
#include "ice_txrx.h"
+#include "ice_dcb.h"
#include "ice_switch.h"
#include "ice_common.h"
#include "ice_sched.h"
@@ -42,10 +43,21 @@
extern const char ice_drv_ver[];
#define ICE_BAR0 0
-#define ICE_DFLT_NUM_DESC 128
#define ICE_REQ_DESC_MULTIPLE 32
#define ICE_MIN_NUM_DESC ICE_REQ_DESC_MULTIPLE
#define ICE_MAX_NUM_DESC 8160
+/* set default number of Rx/Tx descriptors to the minimum between
+ * ICE_MAX_NUM_DESC and the number of descriptors to fill up an entire page
+ */
+#define ICE_DFLT_NUM_RX_DESC min_t(u16, ICE_MAX_NUM_DESC, \
+ ALIGN(PAGE_SIZE / \
+ sizeof(union ice_32byte_rx_desc), \
+ ICE_REQ_DESC_MULTIPLE))
+#define ICE_DFLT_NUM_TX_DESC min_t(u16, ICE_MAX_NUM_DESC, \
+ ALIGN(PAGE_SIZE / \
+ sizeof(struct ice_tx_desc), \
+ ICE_REQ_DESC_MULTIPLE))
+
#define ICE_DFLT_TRAFFIC_CLASS BIT(0)
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_ETHTOOL_FWVER_LEN 32
@@ -71,6 +83,8 @@ extern const char ice_drv_ver[];
#define ICE_MAX_QS_PER_VF 256
#define ICE_MIN_QS_PER_VF 1
#define ICE_DFLT_QS_PER_VF 4
+#define ICE_NONQ_VECS_VF 1
+#define ICE_MAX_SCATTER_QS_PER_VF 16
#define ICE_MAX_BASE_QS_PER_VF 16
#define ICE_MAX_INTR_PER_VF 65
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
@@ -114,6 +128,23 @@ extern const char ice_drv_ver[];
#define ice_for_each_q_vector(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++)
+#define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX | \
+ ICE_PROMISC_UCAST_RX | ICE_PROMISC_MCAST_RX)
+
+#define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_TX | \
+ ICE_PROMISC_MCAST_TX | \
+ ICE_PROMISC_UCAST_RX | \
+ ICE_PROMISC_MCAST_RX | \
+ ICE_PROMISC_VLAN_TX | \
+ ICE_PROMISC_VLAN_RX)
+
+#define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX)
+
+#define ICE_MCAST_VLAN_PROMISC_BITS (ICE_PROMISC_MCAST_TX | \
+ ICE_PROMISC_MCAST_RX | \
+ ICE_PROMISC_VLAN_TX | \
+ ICE_PROMISC_VLAN_RX)
+
struct ice_tc_info {
u16 qoffset;
u16 qcount_tx;
@@ -123,7 +154,7 @@ struct ice_tc_info {
struct ice_tc_cfg {
u8 numtc; /* Total number of enabled TCs */
- u8 ena_tc; /* TX map */
+ u8 ena_tc; /* Tx map */
struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS];
};
@@ -134,7 +165,7 @@ struct ice_res_tracker {
};
struct ice_qs_cfg {
- struct mutex *qs_mutex; /* will be assgined to &pf->avail_q_mutex */
+ struct mutex *qs_mutex; /* will be assigned to &pf->avail_q_mutex */
unsigned long *pf_map;
unsigned long pf_map_size;
unsigned int q_count;
@@ -224,6 +255,8 @@ struct ice_vsi {
s16 vf_id; /* VF ID for SR-IOV VSIs */
+ u16 ethtype; /* Ethernet protocol for pause frame */
+
/* RSS config */
u16 rss_table_size; /* HW RSS table size */
u16 rss_size; /* Allocated RSS queues */
@@ -247,6 +280,7 @@ struct ice_vsi {
u8 irqs_ready;
u8 current_isup; /* Sync 'link up' logging */
u8 stat_offsets_loaded;
+ u8 vlan_ena;
/* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -257,26 +291,34 @@ struct ice_vsi {
u16 num_txq; /* Used Tx queues */
u16 alloc_rxq; /* Allocated Rx queues */
u16 num_rxq; /* Used Rx queues */
- u16 num_desc;
+ u16 num_rx_desc;
+ u16 num_tx_desc;
struct ice_tc_cfg tc_cfg;
} ____cacheline_internodealigned_in_smp;
/* struct that defines an interrupt vector */
struct ice_q_vector {
struct ice_vsi *vsi;
- cpumask_t affinity_mask;
- struct napi_struct napi;
- struct ice_ring_container rx;
- struct ice_ring_container tx;
- struct irq_affinity_notify affinity_notify;
+
u16 v_idx; /* index in the vsi->q_vector array. */
- u8 num_ring_tx; /* total number of Tx rings in vector */
+ u16 reg_idx;
u8 num_ring_rx; /* total number of Rx rings in vector */
- char name[ICE_INT_NAME_STR_LEN];
+ u8 num_ring_tx; /* total number of Tx rings in vector */
+ u8 itr_countdown; /* when 0 should adjust adaptive ITR */
/* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
* value to the device
*/
u8 intrl;
+
+ struct napi_struct napi;
+
+ struct ice_ring_container rx;
+ struct ice_ring_container tx;
+
+ cpumask_t affinity_mask;
+ struct irq_affinity_notify affinity_notify;
+
+ char name[ICE_INT_NAME_STR_LEN];
} ____cacheline_internodealigned_in_smp;
enum ice_pf_flags {
@@ -285,7 +327,11 @@ enum ice_pf_flags {
ICE_FLAG_RSS_ENA,
ICE_FLAG_SRIOV_ENA,
ICE_FLAG_SRIOV_CAPABLE,
+ ICE_FLAG_DCB_CAPABLE,
+ ICE_FLAG_DCB_ENA,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
+ ICE_FLAG_DISABLE_FW_LLDP,
+ ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -324,8 +370,8 @@ struct ice_pf {
u32 hw_oicr_idx; /* Other interrupt cause vector HW index */
u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */
u32 num_lan_msix; /* Total MSIX vectors for base driver */
- u16 num_lan_tx; /* num lan Tx queues setup */
- u16 num_lan_rx; /* num lan Rx queues setup */
+ u16 num_lan_tx; /* num LAN Tx queues setup */
+ u16 num_lan_rx; /* num LAN Rx queues setup */
u16 q_left_tx; /* remaining num Tx queues left unclaimed */
u16 q_left_rx; /* remaining num Rx queues left unclaimed */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
@@ -339,6 +385,9 @@ struct ice_pf {
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
u8 stat_prev_loaded; /* has previous stats been loaded */
+#ifdef CONFIG_DCB
+ u16 dcbx_cap;
+#endif /* CONFIG_DCB */
u32 tx_timeout_count;
unsigned long tx_timeout_last_recovery;
u32 tx_timeout_recovery_level;
@@ -351,14 +400,15 @@ struct ice_netdev_priv {
/**
* ice_irq_dynamic_ena - Enable default interrupt generation settings
- * @hw: pointer to hw struct
- * @vsi: pointer to vsi struct, can be NULL
+ * @hw: pointer to HW struct
+ * @vsi: pointer to VSI struct, can be NULL
* @q_vector: pointer to q_vector, can be NULL
*/
-static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
- struct ice_q_vector *q_vector)
+static inline void
+ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
+ struct ice_q_vector *q_vector)
{
- u32 vector = (vsi && q_vector) ? vsi->hw_base_vector + q_vector->v_idx :
+ u32 vector = (vsi && q_vector) ? q_vector->reg_idx :
((struct ice_pf *)hw->back)->hw_oicr_idx;
int itr = ICE_ITR_NONE;
u32 val;
@@ -374,10 +424,24 @@ static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
wr32(hw, GLINT_DYN_CTL(vector), val);
}
-static inline void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
+/**
+ * ice_find_vsi_by_type - Find and return VSI of a given type
+ * @pf: PF to search for VSI
+ * @type: Value indicating type of VSI we are looking for
+ */
+static inline struct ice_vsi *
+ice_find_vsi_by_type(struct ice_pf *pf, enum ice_vsi_type type)
{
- vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
- vsi->tc_cfg.numtc = 1;
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ struct ice_vsi *vsi = pf->vsi[i];
+
+ if (vsi && vsi->type == type)
+ return vsi;
+ }
+
+ return NULL;
}
void ice_set_ethtool_ops(struct net_device *netdev);
@@ -388,5 +452,9 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
void ice_napi_del(struct ice_vsi *vsi);
+#ifdef CONFIG_DCB
+int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked);
+void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked);
+#endif /* CONFIG_DCB */
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 242c78469181..6ef083002f5b 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -62,7 +62,7 @@ struct ice_aqc_req_res {
#define ICE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
#define ICE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
#define ICE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
- /* For SDP: pin id of the SDP */
+ /* For SDP: pin ID of the SDP */
__le32 res_number;
/* Status is only used for ICE_AQC_RES_ID_GLBL_LOCK */
__le16 status;
@@ -747,6 +747,32 @@ struct ice_aqc_delete_elem {
__le32 teid[1];
};
+/* Query Port ETS (indirect 0x040E)
+ *
+ * This indirect command is used to query port TC node configuration.
+ */
+struct ice_aqc_query_port_ets {
+ __le32 port_teid;
+ __le32 reserved;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_port_ets_elem {
+ u8 tc_valid_bits;
+ u8 reserved[3];
+ /* 3 bits for UP per TC 0-7, 4th byte reserved */
+ __le32 up2tc;
+ u8 tc_bw_share[8];
+ __le32 port_eir_prof_id;
+ __le32 port_cir_prof_id;
+ /* 3 bits per Node priority to TC 0-7, 4th byte reserved */
+ __le32 tc_node_prio;
+#define ICE_TC_NODE_PRIO_S 0x4
+ u8 reserved1[4];
+ __le32 tc_node_teid[8]; /* Used for response, reserved in command */
+};
+
/* Query Scheduler Resource Allocation (indirect 0x0412)
* This indirect command retrieves the scheduler resources allocated by
* EMP Firmware to the given PF.
@@ -953,8 +979,9 @@ struct ice_aqc_set_phy_cfg_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
__le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
u8 caps;
-#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
-#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
+#define ICE_AQ_PHY_ENA_VALID_MASK ICE_M(0xef, 0)
+#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
+#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2)
#define ICE_AQ_PHY_ENA_LINK BIT(3)
#define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5)
@@ -1023,7 +1050,7 @@ struct ice_aqc_get_link_status_data {
u8 ext_info;
#define ICE_AQ_LINK_PHY_TEMP_ALARM BIT(0)
#define ICE_AQ_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */
- /* Port TX Suspended */
+ /* Port Tx Suspended */
#define ICE_AQ_LINK_TX_S 2
#define ICE_AQ_LINK_TX_M (0x03 << ICE_AQ_LINK_TX_S)
#define ICE_AQ_LINK_TX_ACTIVE 0
@@ -1119,9 +1146,9 @@ struct ice_aqc_nvm {
};
/**
- * Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to PF command (indirect 0x0801) ID is only used by PF
*
- * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to VF command (indirect 0x0802) ID is only used by PF
*
*/
struct ice_aqc_pf_vf_msg {
@@ -1131,6 +1158,126 @@ struct ice_aqc_pf_vf_msg {
__le32 addr_low;
};
+/* Get LLDP MIB (indirect 0x0A00)
+ * Note: This is also used by the LLDP MIB Change Event (0x0A01)
+ * as the format is the same.
+ */
+struct ice_aqc_lldp_get_mib {
+ u8 type;
+#define ICE_AQ_LLDP_MIB_TYPE_S 0
+#define ICE_AQ_LLDP_MIB_TYPE_M (0x3 << ICE_AQ_LLDP_MIB_TYPE_S)
+#define ICE_AQ_LLDP_MIB_LOCAL 0
+#define ICE_AQ_LLDP_MIB_REMOTE 1
+#define ICE_AQ_LLDP_MIB_LOCAL_AND_REMOTE 2
+#define ICE_AQ_LLDP_BRID_TYPE_S 2
+#define ICE_AQ_LLDP_BRID_TYPE_M (0x3 << ICE_AQ_LLDP_BRID_TYPE_S)
+#define ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID 0
+#define ICE_AQ_LLDP_BRID_TYPE_NON_TPMR 1
+/* Tx pause flags in the 0xA01 event use ICE_AQ_LLDP_TX_* */
+#define ICE_AQ_LLDP_TX_S 0x4
+#define ICE_AQ_LLDP_TX_M (0x03 << ICE_AQ_LLDP_TX_S)
+#define ICE_AQ_LLDP_TX_ACTIVE 0
+#define ICE_AQ_LLDP_TX_SUSPENDED 1
+#define ICE_AQ_LLDP_TX_FLUSHED 3
+/* The following bytes are reserved for the Get LLDP MIB command (0x0A00)
+ * and in the LLDP MIB Change Event (0x0A01). They are valid for the
+ * Get LLDP MIB (0x0A00) response only.
+ */
+ u8 reserved1;
+ __le16 local_len;
+ __le16 remote_len;
+ u8 reserved2[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Configure LLDP MIB Change Event (direct 0x0A01) */
+/* For MIB Change Event use ice_aqc_lldp_get_mib structure above */
+struct ice_aqc_lldp_set_mib_change {
+ u8 command;
+#define ICE_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
+#define ICE_AQ_LLDP_MIB_UPDATE_DIS 0x1
+ u8 reserved[15];
+};
+
+/* Stop LLDP (direct 0x0A05) */
+struct ice_aqc_lldp_stop {
+ u8 command;
+#define ICE_AQ_LLDP_AGENT_STATE_MASK BIT(0)
+#define ICE_AQ_LLDP_AGENT_STOP 0x0
+#define ICE_AQ_LLDP_AGENT_SHUTDOWN ICE_AQ_LLDP_AGENT_STATE_MASK
+#define ICE_AQ_LLDP_AGENT_PERSIST_DIS BIT(1)
+ u8 reserved[15];
+};
+
+/* Start LLDP (direct 0x0A06) */
+struct ice_aqc_lldp_start {
+ u8 command;
+#define ICE_AQ_LLDP_AGENT_START BIT(0)
+#define ICE_AQ_LLDP_AGENT_PERSIST_ENA BIT(1)
+ u8 reserved[15];
+};
+
+/* Get CEE DCBX Oper Config (0x0A07)
+ * The command uses the generic descriptor struct and
+ * returns the struct below as an indirect response.
+ */
+struct ice_aqc_get_cee_dcb_cfg_resp {
+ u8 oper_num_tc;
+ u8 oper_prio_tc[4];
+ u8 oper_tc_bw[8];
+ u8 oper_pfc_en;
+ __le16 oper_app_prio;
+#define ICE_AQC_CEE_APP_FCOE_S 0
+#define ICE_AQC_CEE_APP_FCOE_M (0x7 << ICE_AQC_CEE_APP_FCOE_S)
+#define ICE_AQC_CEE_APP_ISCSI_S 3
+#define ICE_AQC_CEE_APP_ISCSI_M (0x7 << ICE_AQC_CEE_APP_ISCSI_S)
+#define ICE_AQC_CEE_APP_FIP_S 8
+#define ICE_AQC_CEE_APP_FIP_M (0x7 << ICE_AQC_CEE_APP_FIP_S)
+ __le32 tlv_status;
+#define ICE_AQC_CEE_PG_STATUS_S 0
+#define ICE_AQC_CEE_PG_STATUS_M (0x7 << ICE_AQC_CEE_PG_STATUS_S)
+#define ICE_AQC_CEE_PFC_STATUS_S 3
+#define ICE_AQC_CEE_PFC_STATUS_M (0x7 << ICE_AQC_CEE_PFC_STATUS_S)
+#define ICE_AQC_CEE_FCOE_STATUS_S 8
+#define ICE_AQC_CEE_FCOE_STATUS_M (0x7 << ICE_AQC_CEE_FCOE_STATUS_S)
+#define ICE_AQC_CEE_ISCSI_STATUS_S 11
+#define ICE_AQC_CEE_ISCSI_STATUS_M (0x7 << ICE_AQC_CEE_ISCSI_STATUS_S)
+#define ICE_AQC_CEE_FIP_STATUS_S 16
+#define ICE_AQC_CEE_FIP_STATUS_M (0x7 << ICE_AQC_CEE_FIP_STATUS_S)
+ u8 reserved[12];
+};
+
+/* Set Local LLDP MIB (indirect 0x0A08)
+ * Used to replace the local MIB of a given LLDP agent. e.g. DCBx
+ */
+struct ice_aqc_lldp_set_local_mib {
+ u8 type;
+#define SET_LOCAL_MIB_TYPE_DCBX_M BIT(0)
+#define SET_LOCAL_MIB_TYPE_LOCAL_MIB 0
+#define SET_LOCAL_MIB_TYPE_CEE_M BIT(1)
+#define SET_LOCAL_MIB_TYPE_CEE_WILLING 0
+#define SET_LOCAL_MIB_TYPE_CEE_NON_WILLING SET_LOCAL_MIB_TYPE_CEE_M
+ u8 reserved0;
+ __le16 length;
+ u8 reserved1[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Stop/Start LLDP Agent (direct 0x0A09)
+ * Used for stopping/starting specific LLDP agent. e.g. DCBx.
+ * The same structure is used for the response, with the command field
+ * being used as the status field.
+ */
+struct ice_aqc_lldp_stop_start_specific_agent {
+ u8 command;
+#define ICE_AQC_START_STOP_AGENT_M BIT(0)
+#define ICE_AQC_START_STOP_AGENT_STOP_DCBX 0
+#define ICE_AQC_START_STOP_AGENT_START_DCBX ICE_AQC_START_STOP_AGENT_M
+ u8 reserved[15];
+};
+
/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
struct ice_aqc_get_set_rss_key {
#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15)
@@ -1144,6 +1291,9 @@ struct ice_aqc_get_set_rss_key {
#define ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE 0x28
#define ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE 0xC
+#define ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE \
+ (ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE + \
+ ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE)
struct ice_aqc_get_set_rss_keys {
u8 standard_rss_key[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE];
@@ -1185,7 +1335,7 @@ struct ice_aqc_get_set_rss_lut {
__le32 addr_low;
};
-/* Add TX LAN Queues (indirect 0x0C30) */
+/* Add Tx LAN Queues (indirect 0x0C30) */
struct ice_aqc_add_txqs {
u8 num_qgrps;
u8 reserved[3];
@@ -1194,7 +1344,7 @@ struct ice_aqc_add_txqs {
__le32 addr_low;
};
-/* This is the descriptor of each queue entry for the Add TX LAN Queues
+/* This is the descriptor of each queue entry for the Add Tx LAN Queues
* command (0x0C30). Only used within struct ice_aqc_add_tx_qgrp.
*/
struct ice_aqc_add_txqs_perq {
@@ -1206,7 +1356,7 @@ struct ice_aqc_add_txqs_perq {
struct ice_aqc_txsched_elem info;
};
-/* The format of the command buffer for Add TX LAN Queues (0x0C30)
+/* The format of the command buffer for Add Tx LAN Queues (0x0C30)
* is an array of the following structs. Please note that the length of
* each struct ice_aqc_add_tx_qgrp is variable due
* to the variable number of queues in each group!
@@ -1218,7 +1368,7 @@ struct ice_aqc_add_tx_qgrp {
struct ice_aqc_add_txqs_perq txqs[1];
};
-/* Disable TX LAN Queues (indirect 0x0C31) */
+/* Disable Tx LAN Queues (indirect 0x0C31) */
struct ice_aqc_dis_txqs {
u8 cmd_type;
#define ICE_AQC_Q_DIS_CMD_S 0
@@ -1240,7 +1390,7 @@ struct ice_aqc_dis_txqs {
__le32 addr_low;
};
-/* The buffer for Disable TX LAN Queues (indirect 0x0C31)
+/* The buffer for Disable Tx LAN Queues (indirect 0x0C31)
* contains the following structures, arrayed one after the
* other.
* Note: Since the q_id is 16 bits wide, if the
@@ -1387,8 +1537,15 @@ struct ice_aq_desc {
struct ice_aqc_get_topo get_topo;
struct ice_aqc_sched_elem_cmd sched_elem_cmd;
struct ice_aqc_query_txsched_res query_sched_res;
+ struct ice_aqc_query_port_ets port_ets;
struct ice_aqc_nvm nvm;
struct ice_aqc_pf_vf_msg virt;
+ struct ice_aqc_lldp_get_mib lldp_get_mib;
+ struct ice_aqc_lldp_set_mib_change lldp_set_event;
+ struct ice_aqc_lldp_stop lldp_stop;
+ struct ice_aqc_lldp_start lldp_start;
+ struct ice_aqc_lldp_set_local_mib lldp_set_mib;
+ struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl;
struct ice_aqc_get_set_rss_lut get_set_rss_lut;
struct ice_aqc_get_set_rss_key get_set_rss_key;
struct ice_aqc_add_txqs add_txqs;
@@ -1421,6 +1578,8 @@ struct ice_aq_desc {
/* error codes */
enum ice_aq_err {
ICE_AQ_RC_OK = 0, /* Success */
+ ICE_AQ_RC_EPERM = 1, /* Operation not permitted */
+ ICE_AQ_RC_ENOENT = 2, /* No such element */
ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
ICE_AQ_RC_EEXIST = 13, /* Object already exists */
@@ -1473,6 +1632,7 @@ enum ice_adminq_opc {
ice_aqc_opc_get_sched_elems = 0x0404,
ice_aqc_opc_suspend_sched_elems = 0x0409,
ice_aqc_opc_resume_sched_elems = 0x040A,
+ ice_aqc_opc_query_port_ets = 0x040E,
ice_aqc_opc_delete_sched_elems = 0x040F,
ice_aqc_opc_query_sched_res = 0x0412,
@@ -1490,6 +1650,14 @@ enum ice_adminq_opc {
/* PF/VF mailbox commands */
ice_mbx_opc_send_msg_to_pf = 0x0801,
ice_mbx_opc_send_msg_to_vf = 0x0802,
+ /* LLDP commands */
+ ice_aqc_opc_lldp_get_mib = 0x0A00,
+ ice_aqc_opc_lldp_set_mib_change = 0x0A01,
+ ice_aqc_opc_lldp_stop = 0x0A05,
+ ice_aqc_opc_lldp_start = 0x0A06,
+ ice_aqc_opc_get_cee_dcb_cfg = 0x0A07,
+ ice_aqc_opc_lldp_set_local_mib = 0x0A08,
+ ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09,
/* RSS commands */
ice_aqc_opc_set_rss_key = 0x0B02,
@@ -1497,7 +1665,7 @@ enum ice_adminq_opc {
ice_aqc_opc_get_rss_key = 0x0B04,
ice_aqc_opc_get_rss_lut = 0x0B05,
- /* TX queue handling commands/events */
+ /* Tx queue handling commands/events */
ice_aqc_opc_add_txqs = 0x0C30,
ice_aqc_opc_dis_txqs = 0x0C31,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 63f003441300..da7878529929 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -31,7 +31,7 @@
* @hw: pointer to the HW structure
*
* This function sets the MAC type of the adapter based on the
- * vendor ID and device ID stored in the hw structure.
+ * vendor ID and device ID stored in the HW structure.
*/
static enum ice_status ice_set_mac_type(struct ice_hw *hw)
{
@@ -77,7 +77,7 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
/**
* ice_aq_manage_mac_read - manage MAC address read command
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @buf: a virtual buffer to hold the manage MAC read response
* @buf_size: Size of the virtual buffer
* @cd: pointer to command details structure or NULL
@@ -262,7 +262,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
*
* Get Link Status (0x607). Returns the link status of the adapter.
*/
-static enum ice_status
+enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd)
{
@@ -331,7 +331,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
/* flag cleared so calling functions don't call AQ again */
pi->phy.get_link_info = false;
- return status;
+ return 0;
}
/**
@@ -358,22 +358,22 @@ static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
*/
case ICE_RXDID_FLEX_NIC:
case ICE_RXDID_FLEX_NIC_2:
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG,
- ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI,
- ICE_RXFLG_FIN, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_FRG,
+ ICE_FLG_UDP_GRE, ICE_FLG_PKT_DSI,
+ ICE_FLG_FIN, idx++);
/* flex flag 1 is not used for flexi-flag programming, skipping
* these four FLG64 bits.
*/
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100,
- ICE_RXFLG_EVLAN_x9100, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100,
- ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC,
- ICE_RXFLG_TNL0, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_SYN, ICE_FLG_RST,
+ ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_DSI,
+ ICE_FLG_PKT_DSI, ICE_FLG_EVLAN_x8100,
+ ICE_FLG_EVLAN_x9100, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_VLAN_x8100,
+ ICE_FLG_TNL_VLAN, ICE_FLG_TNL_MAC,
+ ICE_FLG_TNL0, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_TNL1, ICE_FLG_TNL2,
+ ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx);
break;
default:
@@ -418,7 +418,7 @@ static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
/**
* ice_init_fltr_mgmt_struct - initializes filter management list and locks
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*/
static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
{
@@ -438,7 +438,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
/**
* ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*/
static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
{
@@ -477,7 +477,7 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
/**
* ice_cfg_fw_log - configure FW logging
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @enable: enable certain FW logging events if true, disable all if false
*
* This function enables/disables the FW logging via Rx CQ events and a UART
@@ -626,7 +626,7 @@ out:
/**
* ice_output_fw_log
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @desc: pointer to the AQ message descriptor
* @buf: pointer to the buffer accompanying the AQ message
*
@@ -642,12 +642,12 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
/**
* ice_get_itr_intrl_gran - determine int/intrl granularity
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Determines the itr/intrl granularities based on the maximum aggregate
* bandwidth according to the device's configuration during power-on.
*/
-static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw)
+static void ice_get_itr_intrl_gran(struct ice_hw *hw)
{
u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
@@ -664,13 +664,7 @@ static enum ice_status ice_get_itr_intrl_gran(struct ice_hw *hw)
hw->itr_gran = ICE_ITR_GRAN_MAX_25;
hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
break;
- default:
- ice_debug(hw, ICE_DBG_INIT,
- "Failed to determine itr/intrl granularity\n");
- return ICE_ERR_CFG;
}
-
- return 0;
}
/**
@@ -697,9 +691,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
return status;
- status = ice_get_itr_intrl_gran(hw);
- if (status)
- return status;
+ ice_get_itr_intrl_gran(hw);
status = ice_init_all_ctrlq(hw);
if (status)
@@ -731,7 +723,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
goto err_unroll_cqinit;
}
- /* set the back pointer to hw */
+ /* set the back pointer to HW */
hw->port_info->hw = hw;
/* Initialize port_info struct with switch configuration data */
@@ -988,7 +980,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
* @ice_rxq_ctx: pointer to the rxq context
* @rxq_index: the index of the Rx queue
*
- * Copies rxq context from dense structure to hw register space
+ * Copies rxq context from dense structure to HW register space
*/
static enum ice_status
ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
@@ -1001,7 +993,7 @@ ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
if (rxq_index > QRX_CTRL_MAX_INDEX)
return ICE_ERR_PARAM;
- /* Copy each dword separately to hw */
+ /* Copy each dword separately to HW */
for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
wr32(hw, QRX_CONTEXT(i, rxq_index),
*((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
@@ -1045,7 +1037,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* @rxq_index: the index of the Rx queue
*
* Converts rxq context from sparse to dense structure and then writes
- * it to hw register space
+ * it to HW register space
*/
enum ice_status
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
@@ -1100,8 +1092,9 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
*
* Dumps debug log about control command with descriptor contents.
*/
-void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
- void *buf, u16 buf_len)
+void
+ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf,
+ u16 buf_len)
{
struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
u16 len;
@@ -1143,7 +1136,7 @@ void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
/**
* ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @desc: descriptor describing the command
* @buf: buffer to use for indirect commands (NULL for direct commands)
* @buf_size: size of buffer for indirect commands (0 for direct commands)
@@ -1160,7 +1153,7 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
/**
* ice_aq_get_fw_ver
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @cd: pointer to command details structure or NULL
*
* Get the firmware version (0x0001) from the admin queue commands
@@ -1194,7 +1187,7 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
/**
* ice_aq_q_shutdown
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @unloading: is the driver unloading itself
*
* Tell the Firmware that we're shutting down the AdminQ and whether
@@ -1217,8 +1210,8 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
/**
* ice_aq_req_res
- * @hw: pointer to the hw struct
- * @res: resource id
+ * @hw: pointer to the HW struct
+ * @res: resource ID
* @access: access type
* @sdp_number: resource number
* @timeout: the maximum time in ms that the driver may hold the resource
@@ -1303,8 +1296,8 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
/**
* ice_aq_release_res
- * @hw: pointer to the hw struct
- * @res: resource id
+ * @hw: pointer to the HW struct
+ * @res: resource ID
* @sdp_number: resource number
* @cd: pointer to command details structure or NULL
*
@@ -1330,7 +1323,7 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
/**
* ice_acquire_res
* @hw: pointer to the HW structure
- * @res: resource id
+ * @res: resource ID
* @access: access type (read or write)
* @timeout: timeout in milliseconds
*
@@ -1392,7 +1385,7 @@ ice_acquire_res_exit:
/**
* ice_release_res
* @hw: pointer to the HW structure
- * @res: resource id
+ * @res: resource ID
*
* This function will release a resource using the proper Admin Command.
*/
@@ -1404,7 +1397,7 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
status = ice_aq_release_res(hw, res, 0, NULL);
/* there are some rare cases when trying to release the resource
- * results in an admin Q timeout, so handle them correctly
+ * results in an admin queue timeout, so handle them correctly
*/
while ((status == ICE_ERR_AQ_TIMEOUT) &&
(total_delay < hw->adminq.sq_cmd_timeout)) {
@@ -1415,13 +1408,15 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
}
/**
- * ice_get_guar_num_vsi - determine number of guar VSI for a PF
- * @hw: pointer to the hw structure
+ * ice_get_num_per_func - determine number of resources per PF
+ * @hw: pointer to the HW structure
+ * @max: value to be evenly split between each PF
*
* Determine the number of valid functions by going through the bitmap returned
- * from parsing capabilities and use this to calculate the number of VSI per PF.
+ * from parsing capabilities and use this to calculate the number of resources
+ * per PF based on the max value passed in.
*/
-static u32 ice_get_guar_num_vsi(struct ice_hw *hw)
+static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
{
u8 funcs;
@@ -1432,12 +1427,12 @@ static u32 ice_get_guar_num_vsi(struct ice_hw *hw)
if (!funcs)
return 0;
- return ICE_MAX_VSI / funcs;
+ return max / funcs;
}
/**
* ice_parse_caps - parse function/device capabilities
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @buf: pointer to a buffer containing function/device capability records
* @cap_count: number of capability records in the list
* @opc: type of capabilities list to parse
@@ -1512,7 +1507,8 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
"HW caps: Dev.VSI cnt = %d\n",
dev_p->num_vsi_allocd_to_host);
} else if (func_p) {
- func_p->guar_num_vsi = ice_get_guar_num_vsi(hw);
+ func_p->guar_num_vsi =
+ ice_get_num_per_func(hw, ICE_MAX_VSI);
ice_debug(hw, ICE_DBG_INIT,
"HW caps: Func.VSI cnt = %d\n",
number);
@@ -1578,7 +1574,7 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
/**
* ice_aq_discover_caps - query function/device capabilities
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @buf: a virtual buffer to hold the capabilities
* @buf_size: Size of the virtual buffer
* @cap_count: cap count needed if AQ err==ENOMEM
@@ -1617,8 +1613,8 @@ ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
* @hw: pointer to the hardware structure
* @opc: capabilities type to discover - pass in the command opcode
*/
-static enum ice_status ice_discover_caps(struct ice_hw *hw,
- enum ice_adminq_opc opc)
+static enum ice_status
+ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
{
enum ice_status status;
u32 cap_count;
@@ -1677,7 +1673,7 @@ enum ice_status ice_get_caps(struct ice_hw *hw)
/**
* ice_aq_manage_mac_write - manage MAC address write command
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
* @flags: flags to control write behavior
* @cd: pointer to command details structure or NULL
@@ -1705,7 +1701,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
/**
* ice_aq_clear_pxe_mode
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Tell the firmware that the driver is taking over from PXE (0x0110).
*/
@@ -1721,7 +1717,7 @@ static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
/**
* ice_clear_pxe_mode - clear pxe operations mode
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Make sure all PXE mode settings are cleared, including things
* like descriptor fetch/write-back mode.
@@ -1737,10 +1733,10 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
* @phy_type_low: lower part of phy_type
* @phy_type_high: higher part of phy_type
*
- * This helper function will convert an entry in phy type structure
+ * This helper function will convert an entry in PHY type structure
* [phy_type_low, phy_type_high] to its corresponding link speed.
* Note: In the structure of [phy_type_low, phy_type_high], there should
- * be one bit set, as this function will convert one phy type to its
+ * be one bit set, as this function will convert one PHY type to its
* speed.
* If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
* If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
@@ -1884,10 +1880,10 @@ void
ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
u16 link_speeds_bitmap)
{
- u16 speed = ICE_AQ_LINK_SPEED_UNKNOWN;
u64 pt_high;
u64 pt_low;
int index;
+ u16 speed;
/* We first check with low part of phy_type */
for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
@@ -1910,7 +1906,7 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
/**
* ice_aq_set_phy_cfg
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @lport: logical port number
* @cfg: structure with PHY configuration data to be set
* @cd: pointer to command details structure or NULL
@@ -1929,6 +1925,15 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
if (!cfg)
return ICE_ERR_PARAM;
+ /* Ensure that only valid bits of cfg->caps can be turned on. */
+ if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
+ ice_debug(hw, ICE_DBG_PHY,
+ "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
+ cfg->caps);
+
+ cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
+ }
+
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
desc.params.set_phy.lport_num = lport;
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
@@ -2016,7 +2021,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
if (!pcaps)
return ICE_ERR_NO_MEMORY;
- /* Get the current phy config */
+ /* Get the current PHY config */
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
NULL);
if (status) {
@@ -2027,8 +2032,10 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
/* clear the old pause settings */
cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
ICE_AQC_PHY_EN_RX_LINK_PAUSE);
+
/* set the new capabilities */
cfg.caps |= pause_mask;
+
/* If the capabilities have changed, then set the new config */
if (cfg.caps != pcaps->caps) {
int retry_count, retry_max = 10;
@@ -2136,6 +2143,32 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
}
/**
+ * ice_aq_set_event_mask
+ * @hw: pointer to the HW struct
+ * @port_num: port number of the physical function
+ * @mask: event mask to be set
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set event mask (0x0613)
+ */
+enum ice_status
+ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_event_mask *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_event_mask;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
+
+ cmd->lport_num = port_num;
+
+ cmd->event_mask = cpu_to_le16(mask);
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
* ice_aq_set_port_id_led
* @pi: pointer to the port information
* @is_orig_mode: is this LED set to original mode (by the net-list)
@@ -2297,7 +2330,7 @@ ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
/**
* __ice_aq_get_set_rss_key
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_id: VSI FW index
* @key: pointer to key info struct
* @set: set true to set the key, false to get the key
@@ -2332,7 +2365,7 @@ ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
/**
* ice_aq_get_rss_key
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: software VSI handle
* @key: pointer to key info struct
*
@@ -2351,7 +2384,7 @@ ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
/**
* ice_aq_set_rss_key
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: software VSI handle
* @keys: pointer to key info struct
*
@@ -2436,7 +2469,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
* @num_qgrps: number of groups in the list
* @qg_list: the list of groups to disable
* @buf_size: the total size of the qg_list buffer in bytes
- * @rst_src: if called due to reset, specifies the RST source
+ * @rst_src: if called due to reset, specifies the reset source
* @vmvf_num: the relative VM or VF number that is undergoing the reset
* @cd: pointer to command details structure or NULL
*
@@ -2476,7 +2509,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
break;
case ICE_VF_RESET:
cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
- /* In this case, FW expects vmvf_num to be absolute VF id */
+ /* In this case, FW expects vmvf_num to be absolute VF ID */
cmd->vmvf_and_timeout |=
cpu_to_le16((vmvf_num + hw->func_caps.vf_base_id) &
ICE_AQC_Q_DIS_VMVF_NUM_M);
@@ -2534,8 +2567,8 @@ do_aq:
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
*/
-static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+static void
+ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u8 src_byte, dest_byte, mask;
u8 *from, *dest;
@@ -2573,8 +2606,8 @@ static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
*/
-static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+static void
+ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u16 src_word, mask;
__le16 dest_word;
@@ -2616,8 +2649,8 @@ static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
*/
-static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+static void
+ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u32 src_dword, mask;
__le32 dest_dword;
@@ -2667,8 +2700,8 @@ static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
*/
-static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+static void
+ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u64 src_qword, mask;
__le64 dest_qword;
@@ -2750,24 +2783,50 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
}
/**
+ * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
+ * @hw: pointer to the HW struct
+ * @vsi_handle: software VSI handle
+ * @tc: TC number
+ * @q_handle: software queue handle
+ */
+static struct ice_q_ctx *
+ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
+{
+ struct ice_vsi_ctx *vsi;
+ struct ice_q_ctx *q_ctx;
+
+ vsi = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi)
+ return NULL;
+ if (q_handle >= vsi->num_lan_q_entries[tc])
+ return NULL;
+ if (!vsi->lan_q_ctx[tc])
+ return NULL;
+ q_ctx = vsi->lan_q_ctx[tc];
+ return &q_ctx[q_handle];
+}
+
+/**
* ice_ena_vsi_txq
* @pi: port information structure
* @vsi_handle: software VSI handle
- * @tc: tc number
+ * @tc: TC number
+ * @q_handle: software queue handle
* @num_qgrps: Number of added queue groups
* @buf: list of queue groups to be added
* @buf_size: size of buffer for indirect command
* @cd: pointer to command details structure or NULL
*
- * This function adds one lan q
+ * This function adds one LAN queue
*/
enum ice_status
-ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
- struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
+ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
+ u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_txsched_elem_data node = { 0 };
struct ice_sched_node *parent;
+ struct ice_q_ctx *q_ctx;
enum ice_status status;
struct ice_hw *hw;
@@ -2784,6 +2843,14 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
mutex_lock(&pi->sched_lock);
+ q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
+ if (!q_ctx) {
+ ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
+ q_handle);
+ status = ICE_ERR_PARAM;
+ goto ena_txq_exit;
+ }
+
/* find a parent node */
parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
ICE_SCHED_NODE_OWNER_LAN);
@@ -2803,14 +2870,14 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
* Bit 5-6.
* - Bit 7 is reserved.
* Without setting the generic section as valid in valid_sections, the
- * Admin Q command will fail with error code ICE_AQ_RC_EINVAL.
+ * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
*/
buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
- /* add the lan q */
+ /* add the LAN queue */
status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
if (status) {
- ice_debug(hw, ICE_DBG_SCHED, "enable Q %d failed %d\n",
+ ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
le16_to_cpu(buf->txqs[0].txq_id),
hw->adminq.sq_last_status);
goto ena_txq_exit;
@@ -2818,8 +2885,9 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
node.node_teid = buf->txqs[0].q_teid;
node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
+ q_ctx->q_handle = q_handle;
- /* add a leaf node into schduler tree q layer */
+ /* add a leaf node into schduler tree queue layer */
status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
ena_txq_exit:
@@ -2830,35 +2898,43 @@ ena_txq_exit:
/**
* ice_dis_vsi_txq
* @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: TC number
* @num_queues: number of queues
+ * @q_handles: pointer to software queue handle array
* @q_ids: pointer to the q_id array
* @q_teids: pointer to queue node teids
- * @rst_src: if called due to reset, specifies the RST source
+ * @rst_src: if called due to reset, specifies the reset source
* @vmvf_num: the relative VM or VF number that is undergoing the reset
* @cd: pointer to command details structure or NULL
*
* This function removes queues and their corresponding nodes in SW DB
*/
enum ice_status
-ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
- u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
+ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
+ u16 *q_handles, u16 *q_ids, u32 *q_teids,
+ enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
struct ice_aqc_dis_txq_item qg_list;
+ struct ice_q_ctx *q_ctx;
u16 i;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG;
- /* if queue is disabled already yet the disable queue command has to be
- * sent to complete the VF reset, then call ice_aq_dis_lan_txq without
- * any queue information
- */
- if (!num_queues && rst_src)
- return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src, vmvf_num,
- NULL);
+ if (!num_queues) {
+ /* if queue is disabled already yet the disable queue command
+ * has to be sent to complete the VF reset, then call
+ * ice_aq_dis_lan_txq without any queue information
+ */
+ if (rst_src)
+ return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
+ vmvf_num, NULL);
+ return ICE_ERR_CFG;
+ }
mutex_lock(&pi->sched_lock);
@@ -2868,6 +2944,17 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
if (!node)
continue;
+ q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
+ if (!q_ctx) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
+ q_handles[i]);
+ continue;
+ }
+ if (q_ctx->q_handle != q_handles[i]) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
+ q_ctx->q_handle, q_handles[i]);
+ continue;
+ }
qg_list.parent_teid = node->info.parent_teid;
qg_list.num_qs = 1;
qg_list.q_id[0] = cpu_to_le16(q_ids[i]);
@@ -2878,18 +2965,19 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
if (status)
break;
ice_free_sched_node(pi, node);
+ q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
}
mutex_unlock(&pi->sched_lock);
return status;
}
/**
- * ice_cfg_vsi_qs - configure the new/exisiting VSI queues
+ * ice_cfg_vsi_qs - configure the new/existing VSI queues
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc_bitmap: TC bitmap
* @maxqs: max queues array per TC
- * @owner: lan or rdma
+ * @owner: LAN or RDMA
*
* This function adds/updates the VSI queues per TC.
*/
@@ -2908,7 +2996,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
mutex_lock(&pi->sched_lock);
- for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ ice_for_each_traffic_class(i) {
/* configuration is possible only if TC node is present */
if (!ice_sched_get_tc_node(pi, i))
continue;
@@ -2924,13 +3012,13 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
}
/**
- * ice_cfg_vsi_lan - configure VSI lan queues
+ * ice_cfg_vsi_lan - configure VSI LAN queues
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc_bitmap: TC bitmap
- * @max_lanqs: max lan queues array per TC
+ * @max_lanqs: max LAN queues array per TC
*
- * This function adds/updates the VSI lan queues per TC.
+ * This function adds/updates the VSI LAN queues per TC.
*/
enum ice_status
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
@@ -2942,7 +3030,7 @@ ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
/**
* ice_replay_pre_init - replay pre initialization
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Initializes required config data for VSI, FD, ACL, and RSS before replay.
*/
@@ -2966,7 +3054,7 @@ static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
/**
* ice_replay_vsi - replay VSI configuration
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: driver VSI handle
*
* Restore all VSI configuration after reset. It is required to call this
@@ -2993,7 +3081,7 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
/**
* ice_replay_post - post replay configuration cleanup
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Post replay cleanup.
*/
@@ -3012,8 +3100,9 @@ void ice_replay_post(struct ice_hw *hw)
* @prev_stat: ptr to previous loaded stat value
* @cur_stat: ptr to current stat value
*/
-void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
- bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
+void
+ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
+ bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
{
u64 new_data;
@@ -3043,8 +3132,9 @@ void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
* @prev_stat: ptr to previous loaded stat value
* @cur_stat: ptr to current stat value
*/
-void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
- u64 *prev_stat, u64 *cur_stat)
+void
+ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat)
{
u32 new_data;
@@ -3063,3 +3153,28 @@ void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
/* to manage the potential roll-over */
*cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
}
+
+/**
+ * ice_sched_query_elem - query element information from HW
+ * @hw: pointer to the HW struct
+ * @node_teid: node TEID to be queried
+ * @buf: buffer to element information
+ *
+ * This function queries HW element information
+ */
+enum ice_status
+ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
+ struct ice_aqc_get_elem *buf)
+{
+ u16 buf_size, num_elem_ret = 0;
+ enum ice_status status;
+
+ buf_size = sizeof(*buf);
+ memset(buf, 0, buf_size);
+ buf->generic[0].node_teid = cpu_to_le32(node_teid);
+ status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
+ NULL);
+ if (status || num_elem_ret != 1)
+ ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index d7c7c2ed8823..f1ddebf45231 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -9,8 +9,8 @@
#include "ice_switch.h"
#include <linux/avf/virtchnl.h>
-void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf,
- u16 buf_len);
+void
+ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, u16 buf_len);
enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
enum ice_status ice_check_reset(struct ice_hw *hw);
@@ -28,8 +28,8 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
enum ice_status ice_init_nvm(struct ice_hw *hw);
-enum ice_status ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words,
- u16 *data);
+enum ice_status
+ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data);
enum ice_status
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
@@ -89,25 +89,37 @@ enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
enum ice_status
+ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
+ struct ice_link_status *link, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
+ struct ice_sq_cd *cd);
+enum ice_status
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd);
enum ice_status
-ice_dis_vsi_txq(struct ice_port_info *pi, u8 num_queues, u16 *q_ids,
- u32 *q_teids, enum ice_disq_rst_src rst_src, u16 vmvf_num,
- struct ice_sq_cd *cmd_details);
+ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
+ u16 *q_handle, u16 *q_ids, u32 *q_teids,
+ enum ice_disq_rst_src rst_src, u16 vmvf_num,
+ struct ice_sq_cd *cd);
enum ice_status
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
u16 *max_lanqs);
enum ice_status
-ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
- struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
+ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
+ u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd);
enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
-void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
- bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat);
-void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
- u64 *prev_stat, u64 *cur_stat);
+void
+ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
+ bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat);
+void
+ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat);
+enum ice_status
+ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
+ struct ice_aqc_get_elem *buf);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 2bf5e11f559a..cc8cb5fdcdc1 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -51,7 +51,7 @@ static void ice_mailbox_init_regs(struct ice_hw *hw)
/**
* ice_check_sq_alive
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
*
* Returns true if Queue is enabled else false.
@@ -287,7 +287,7 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*
- * Configure base address and length registers for the receive (event q)
+ * Configure base address and length registers for the receive (event queue)
*/
static enum ice_status
ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
@@ -751,7 +751,7 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
/**
* ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
*
* Returns true if the firmware has processed all descriptors on the
@@ -767,7 +767,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
/**
* ice_sq_send_cmd - send command to Control Queue (ATQ)
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
* @desc: prefilled descriptor describing the command (non DMA mem)
* @buf: buffer to use for indirect commands (or NULL for direct commands)
@@ -962,7 +962,7 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
/**
* ice_clean_rq_elem
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
* @e: event info from the receive descriptor, includes any buffers
* @pending: number of events that could be left to process
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index 0038a4109c99..e0585394d984 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -79,6 +79,7 @@ struct ice_rq_event_info {
/* Control Queue information */
struct ice_ctl_q_info {
enum ice_ctl_q qtype;
+ enum ice_aq_err rq_last_status; /* last status on receive queue */
struct ice_ctl_q_ring rq; /* receive queue */
struct ice_ctl_q_ring sq; /* send queue */
u32 sq_cmd_timeout; /* send queue cmd write back timeout */
@@ -86,10 +87,9 @@ struct ice_ctl_q_info {
u16 num_sq_entries; /* send queue depth */
u16 rq_buf_size; /* receive queue buffer size */
u16 sq_buf_size; /* send queue buffer size */
+ enum ice_aq_err sq_last_status; /* last status on send queue */
struct mutex sq_lock; /* Send queue lock */
struct mutex rq_lock; /* Receive queue lock */
- enum ice_aq_err sq_last_status; /* last status on send queue */
- enum ice_aq_err rq_last_status; /* last status on receive queue */
};
#endif /* _ICE_CONTROLQ_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
new file mode 100644
index 000000000000..8bbf48e04a1c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -0,0 +1,1392 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include "ice_common.h"
+#include "ice_sched.h"
+#include "ice_dcb.h"
+
+/**
+ * ice_aq_get_lldp_mib
+ * @hw: pointer to the HW struct
+ * @bridge_type: type of bridge requested
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buf: pointer to the caller-supplied buffer to store the MIB block
+ * @buf_size: size of the buffer (in bytes)
+ * @local_len: length of the returned Local LLDP MIB
+ * @remote_len: length of the returned Remote LLDP MIB
+ * @cd: pointer to command details structure or NULL
+ *
+ * Requests the complete LLDP MIB (entire packet). (0x0A00)
+ */
+static enum ice_status
+ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
+ u16 buf_size, u16 *local_len, u16 *remote_len,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_get_mib *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.lldp_get_mib;
+
+ if (buf_size == 0 || !buf)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_get_mib);
+
+ cmd->type = mib_type & ICE_AQ_LLDP_MIB_TYPE_M;
+ cmd->type |= (bridge_type << ICE_AQ_LLDP_BRID_TYPE_S) &
+ ICE_AQ_LLDP_BRID_TYPE_M;
+
+ desc.datalen = cpu_to_le16(buf_size);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (!status) {
+ if (local_len)
+ *local_len = le16_to_cpu(cmd->local_len);
+ if (remote_len)
+ *remote_len = le16_to_cpu(cmd->remote_len);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_cfg_lldp_mib_change
+ * @hw: pointer to the HW struct
+ * @ena_update: Enable or Disable event posting
+ * @cd: pointer to command details structure or NULL
+ *
+ * Enable or Disable posting of an event on ARQ when LLDP MIB
+ * associated with the interface changes (0x0A01)
+ */
+enum ice_status
+ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_set_mib_change *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_set_event;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_mib_change);
+
+ if (!ena_update)
+ cmd->command |= ICE_AQ_LLDP_MIB_UPDATE_DIS;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_stop_lldp
+ * @hw: pointer to the HW struct
+ * @shutdown_lldp_agent: True if LLDP Agent needs to be Shutdown
+ * False if LLDP Agent needs to be Stopped
+ * @cd: pointer to command details structure or NULL
+ *
+ * Stop or Shutdown the embedded LLDP Agent (0x0A05)
+ */
+enum ice_status
+ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_stop *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_stop;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_stop);
+
+ if (shutdown_lldp_agent)
+ cmd->command |= ICE_AQ_LLDP_AGENT_SHUTDOWN;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_start_lldp
+ * @hw: pointer to the HW struct
+ * @cd: pointer to command details structure or NULL
+ *
+ * Start the embedded LLDP Agent on all ports. (0x0A06)
+ */
+enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_start *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_start;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_start);
+
+ cmd->command = ICE_AQ_LLDP_AGENT_START;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_set_lldp_mib - Set the LLDP MIB
+ * @hw: pointer to the HW struct
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buf: pointer to the caller-supplied buffer to store the MIB block
+ * @buf_size: size of the buffer (in bytes)
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set the LLDP MIB. (0x0A08)
+ */
+static enum ice_status
+ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_set_local_mib *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_set_mib;
+
+ if (buf_size == 0 || !buf)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
+
+ desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
+ desc.datalen = cpu_to_le16(buf_size);
+
+ cmd->type = mib_type;
+ cmd->length = cpu_to_le16(buf_size);
+
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+}
+
+/**
+ * ice_get_dcbx_status
+ * @hw: pointer to the HW struct
+ *
+ * Get the DCBX status from the Firmware
+ */
+u8 ice_get_dcbx_status(struct ice_hw *hw)
+{
+ u32 reg;
+
+ reg = rd32(hw, PRTDCB_GENS);
+ return (u8)((reg & PRTDCB_GENS_DCBX_STATUS_M) >>
+ PRTDCB_GENS_DCBX_STATUS_S);
+}
+
+/**
+ * ice_parse_ieee_ets_common_tlv
+ * @buf: Data buffer to be parsed for ETS CFG/REC data
+ * @ets_cfg: Container to store parsed data
+ *
+ * Parses the common data of IEEE 802.1Qaz ETS CFG/REC TLV
+ */
+static void
+ice_parse_ieee_ets_common_tlv(u8 *buf, struct ice_dcb_ets_cfg *ets_cfg)
+{
+ u8 offset = 0;
+ int i;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ ets_cfg->prio_table[i * 2] =
+ ((buf[offset] & ICE_IEEE_ETS_PRIO_1_M) >>
+ ICE_IEEE_ETS_PRIO_1_S);
+ ets_cfg->prio_table[i * 2 + 1] =
+ ((buf[offset] & ICE_IEEE_ETS_PRIO_0_M) >>
+ ICE_IEEE_ETS_PRIO_0_S);
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ *
+ * TSA Assignment Table (8 octets)
+ * Octets:| 9 | 10| 11| 12| 13| 14| 15| 16|
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ ice_for_each_traffic_class(i) {
+ ets_cfg->tcbwtable[i] = buf[offset];
+ ets_cfg->tsatable[i] = buf[ICE_MAX_TRAFFIC_CLASS + offset++];
+ }
+}
+
+/**
+ * ice_parse_ieee_etscfg_tlv
+ * @tlv: IEEE 802.1Qaz ETS CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses IEEE 802.1Qaz ETS CFG TLV
+ */
+static void
+ice_parse_ieee_etscfg_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etscfg;
+ u8 *buf = tlv->tlvinfo;
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ etscfg->willing = ((buf[0] & ICE_IEEE_ETS_WILLING_M) >>
+ ICE_IEEE_ETS_WILLING_S);
+ etscfg->cbs = ((buf[0] & ICE_IEEE_ETS_CBS_M) >> ICE_IEEE_ETS_CBS_S);
+ etscfg->maxtcs = ((buf[0] & ICE_IEEE_ETS_MAXTC_M) >>
+ ICE_IEEE_ETS_MAXTC_S);
+
+ /* Begin parsing at Priority Assignment Table (offset 1 in buf) */
+ ice_parse_ieee_ets_common_tlv(&buf[1], etscfg);
+}
+
+/**
+ * ice_parse_ieee_etsrec_tlv
+ * @tlv: IEEE 802.1Qaz ETS REC TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Parses IEEE 802.1Qaz ETS REC TLV
+ */
+static void
+ice_parse_ieee_etsrec_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ /* Begin parsing at Priority Assignment Table (offset 1 in buf) */
+ ice_parse_ieee_ets_common_tlv(&buf[1], &dcbcfg->etsrec);
+}
+
+/**
+ * ice_parse_ieee_pfccfg_tlv
+ * @tlv: IEEE 802.1Qaz PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses IEEE 802.1Qaz PFC CFG TLV
+ */
+static void
+ice_parse_ieee_pfccfg_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ dcbcfg->pfc.willing = ((buf[0] & ICE_IEEE_PFC_WILLING_M) >>
+ ICE_IEEE_PFC_WILLING_S);
+ dcbcfg->pfc.mbc = ((buf[0] & ICE_IEEE_PFC_MBC_M) >> ICE_IEEE_PFC_MBC_S);
+ dcbcfg->pfc.pfccap = ((buf[0] & ICE_IEEE_PFC_CAP_M) >>
+ ICE_IEEE_PFC_CAP_S);
+ dcbcfg->pfc.pfcena = buf[1];
+}
+
+/**
+ * ice_parse_ieee_app_tlv
+ * @tlv: IEEE 802.1Qaz APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses IEEE 802.1Qaz APP PRIO TLV
+ */
+static void
+ice_parse_ieee_app_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u16 offset = 0;
+ u16 typelen;
+ int i = 0;
+ u16 len;
+ u8 *buf;
+
+ typelen = ntohs(tlv->typelen);
+ len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ buf = tlv->tlvinfo;
+
+ /* Removing sizeof(ouisubtype) and reserved byte from len.
+ * Remaining len div 3 is number of APP TLVs.
+ */
+ len -= (sizeof(tlv->ouisubtype) + 1);
+
+ /* Move offset to App Priority Table */
+ offset++;
+
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (offset < len) {
+ dcbcfg->app[i].priority = ((buf[offset] &
+ ICE_IEEE_APP_PRIO_M) >>
+ ICE_IEEE_APP_PRIO_S);
+ dcbcfg->app[i].selector = ((buf[offset] &
+ ICE_IEEE_APP_SEL_M) >>
+ ICE_IEEE_APP_SEL_S);
+ dcbcfg->app[i].prot_id = (buf[offset + 1] << 0x8) |
+ buf[offset + 2];
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= ICE_DCBX_MAX_APPS)
+ break;
+ }
+
+ dcbcfg->numapps = i;
+}
+
+/**
+ * ice_parse_ieee_tlv
+ * @tlv: IEEE 802.1Qaz TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ */
+static void
+ice_parse_ieee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u32 ouisubtype;
+ u8 subtype;
+
+ ouisubtype = ntohl(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >>
+ ICE_LLDP_TLV_SUBTYPE_S);
+ switch (subtype) {
+ case ICE_IEEE_SUBTYPE_ETS_CFG:
+ ice_parse_ieee_etscfg_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_SUBTYPE_ETS_REC:
+ ice_parse_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_SUBTYPE_PFC_CFG:
+ ice_parse_ieee_pfccfg_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_SUBTYPE_APP_PRI:
+ ice_parse_ieee_app_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ice_parse_cee_pgcfg_tlv
+ * @tlv: CEE DCBX PG CFG TLV
+ * @dcbcfg: Local store to update ETS CFG data
+ *
+ * Parses CEE DCBX PG CFG TLV
+ */
+static void
+ice_parse_cee_pgcfg_tlv(struct ice_cee_feat_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u16 offset = 0;
+ int i;
+
+ etscfg = &dcbcfg->etscfg;
+
+ if (tlv->en_will_err & ICE_CEE_FEAT_TLV_WILLING_M)
+ etscfg->willing = 1;
+
+ etscfg->cbs = 0;
+ /* Priority Group Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ etscfg->prio_table[i * 2] =
+ ((buf[offset] & ICE_CEE_PGID_PRIO_1_M) >>
+ ICE_CEE_PGID_PRIO_1_S);
+ etscfg->prio_table[i * 2 + 1] =
+ ((buf[offset] & ICE_CEE_PGID_PRIO_0_M) >>
+ ICE_CEE_PGID_PRIO_0_S);
+ offset++;
+ }
+
+ /* PG Percentage Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7|
+ * ---------------------------------
+ */
+ ice_for_each_traffic_class(i)
+ etscfg->tcbwtable[i] = buf[offset++];
+
+ /* Number of TCs supported (1 octet) */
+ etscfg->maxtcs = buf[offset];
+}
+
+/**
+ * ice_parse_cee_pfccfg_tlv
+ * @tlv: CEE DCBX PFC CFG TLV
+ * @dcbcfg: Local store to update PFC CFG data
+ *
+ * Parses CEE DCBX PFC CFG TLV
+ */
+static void
+ice_parse_cee_pfccfg_tlv(struct ice_cee_feat_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+
+ if (tlv->en_will_err & ICE_CEE_FEAT_TLV_WILLING_M)
+ dcbcfg->pfc.willing = 1;
+
+ /* ------------------------
+ * | PFC Enable | PFC TCs |
+ * ------------------------
+ * | 1 octet | 1 octet |
+ */
+ dcbcfg->pfc.pfcena = buf[0];
+ dcbcfg->pfc.pfccap = buf[1];
+}
+
+/**
+ * ice_parse_cee_app_tlv
+ * @tlv: CEE DCBX APP TLV
+ * @dcbcfg: Local store to update APP PRIO data
+ *
+ * Parses CEE DCBX APP PRIO TLV
+ */
+static void
+ice_parse_cee_app_tlv(struct ice_cee_feat_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u16 len, typelen, offset = 0;
+ struct ice_cee_app_prio *app;
+ u8 i;
+
+ typelen = ntohs(tlv->hdr.typelen);
+ len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+
+ dcbcfg->numapps = len / sizeof(*app);
+ if (!dcbcfg->numapps)
+ return;
+ if (dcbcfg->numapps > ICE_DCBX_MAX_APPS)
+ dcbcfg->numapps = ICE_DCBX_MAX_APPS;
+
+ for (i = 0; i < dcbcfg->numapps; i++) {
+ u8 up, selector;
+
+ app = (struct ice_cee_app_prio *)(tlv->tlvinfo + offset);
+ for (up = 0; up < ICE_MAX_USER_PRIORITY; up++)
+ if (app->prio_map & BIT(up))
+ break;
+
+ dcbcfg->app[i].priority = up;
+
+ /* Get Selector from lower 2 bits, and convert to IEEE */
+ selector = (app->upper_oui_sel & ICE_CEE_APP_SELECTOR_M);
+ switch (selector) {
+ case ICE_CEE_APP_SEL_ETHTYPE:
+ dcbcfg->app[i].selector = ICE_APP_SEL_ETHTYPE;
+ break;
+ case ICE_CEE_APP_SEL_TCPIP:
+ dcbcfg->app[i].selector = ICE_APP_SEL_TCPIP;
+ break;
+ default:
+ /* Keep selector as it is for unknown types */
+ dcbcfg->app[i].selector = selector;
+ }
+
+ dcbcfg->app[i].prot_id = ntohs(app->protocol);
+ /* Move to next app */
+ offset += sizeof(*app);
+ }
+}
+
+/**
+ * ice_parse_cee_tlv
+ * @tlv: CEE DCBX TLV
+ * @dcbcfg: Local store to update DCBX config data
+ *
+ * Get the TLV subtype and send it to parsing function
+ * based on the subtype value
+ */
+static void
+ice_parse_cee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_cee_feat_tlv *sub_tlv;
+ u8 subtype, feat_tlv_count = 0;
+ u16 len, tlvlen, typelen;
+ u32 ouisubtype;
+
+ ouisubtype = ntohl(tlv->ouisubtype);
+ subtype = (u8)((ouisubtype & ICE_LLDP_TLV_SUBTYPE_M) >>
+ ICE_LLDP_TLV_SUBTYPE_S);
+ /* Return if not CEE DCBX */
+ if (subtype != ICE_CEE_DCBX_TYPE)
+ return;
+
+ typelen = ntohs(tlv->typelen);
+ tlvlen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ len = sizeof(tlv->typelen) + sizeof(ouisubtype) +
+ sizeof(struct ice_cee_ctrl_tlv);
+ /* Return if no CEE DCBX Feature TLVs */
+ if (tlvlen <= len)
+ return;
+
+ sub_tlv = (struct ice_cee_feat_tlv *)((char *)tlv + len);
+ while (feat_tlv_count < ICE_CEE_MAX_FEAT_TYPE) {
+ u16 sublen;
+
+ typelen = ntohs(sub_tlv->hdr.typelen);
+ sublen = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ subtype = (u8)((typelen & ICE_LLDP_TLV_TYPE_M) >>
+ ICE_LLDP_TLV_TYPE_S);
+ switch (subtype) {
+ case ICE_CEE_SUBTYPE_PG_CFG:
+ ice_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case ICE_CEE_SUBTYPE_PFC_CFG:
+ ice_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg);
+ break;
+ case ICE_CEE_SUBTYPE_APP_PRI:
+ ice_parse_cee_app_tlv(sub_tlv, dcbcfg);
+ break;
+ default:
+ return; /* Invalid Sub-type return */
+ }
+ feat_tlv_count++;
+ /* Move to next sub TLV */
+ sub_tlv = (struct ice_cee_feat_tlv *)
+ ((char *)sub_tlv + sizeof(sub_tlv->hdr.typelen) +
+ sublen);
+ }
+}
+
+/**
+ * ice_parse_org_tlv
+ * @tlv: Organization specific TLV
+ * @dcbcfg: Local store to update ETS REC data
+ *
+ * Currently only IEEE 802.1Qaz TLV is supported, all others
+ * will be returned
+ */
+static void
+ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u32 ouisubtype;
+ u32 oui;
+
+ ouisubtype = ntohl(tlv->ouisubtype);
+ oui = ((ouisubtype & ICE_LLDP_TLV_OUI_M) >> ICE_LLDP_TLV_OUI_S);
+ switch (oui) {
+ case ICE_IEEE_8021QAZ_OUI:
+ ice_parse_ieee_tlv(tlv, dcbcfg);
+ break;
+ case ICE_CEE_DCBX_OUI:
+ ice_parse_cee_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ice_lldp_to_dcb_cfg
+ * @lldpmib: LLDPDU to be parsed
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Parse DCB configuration from the LLDPDU
+ */
+enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_lldp_org_tlv *tlv;
+ enum ice_status ret = 0;
+ u16 offset = 0;
+ u16 typelen;
+ u16 type;
+ u16 len;
+
+ if (!lldpmib || !dcbcfg)
+ return ICE_ERR_PARAM;
+
+ /* set to the start of LLDPDU */
+ lldpmib += ETH_HLEN;
+ tlv = (struct ice_lldp_org_tlv *)lldpmib;
+ while (1) {
+ typelen = ntohs(tlv->typelen);
+ type = ((typelen & ICE_LLDP_TLV_TYPE_M) >> ICE_LLDP_TLV_TYPE_S);
+ len = ((typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S);
+ offset += sizeof(typelen) + len;
+
+ /* END TLV or beyond LLDPDU size */
+ if (type == ICE_TLV_TYPE_END || offset > ICE_LLDPDU_SIZE)
+ break;
+
+ switch (type) {
+ case ICE_TLV_TYPE_ORG:
+ ice_parse_org_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+
+ /* Move to next TLV */
+ tlv = (struct ice_lldp_org_tlv *)
+ ((char *)tlv + sizeof(tlv->typelen) + len);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_aq_get_dcb_cfg
+ * @hw: pointer to the HW struct
+ * @mib_type: mib type for the query
+ * @bridgetype: bridge type for the query (remote)
+ * @dcbcfg: store for LLDPDU data
+ *
+ * Query DCB configuration from the firmware
+ */
+static enum ice_status
+ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ enum ice_status ret;
+ u8 *lldpmib;
+
+ /* Allocate the LLDPDU */
+ lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
+ if (!lldpmib)
+ return ICE_ERR_NO_MEMORY;
+
+ ret = ice_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib,
+ ICE_LLDPDU_SIZE, NULL, NULL, NULL);
+
+ if (!ret)
+ /* Parse LLDP MIB to get DCB configuration */
+ ret = ice_lldp_to_dcb_cfg(lldpmib, dcbcfg);
+
+ devm_kfree(ice_hw_to_dev(hw), lldpmib);
+
+ return ret;
+}
+
+/**
+ * ice_aq_start_stop_dcbx - Start/Stop DCBx service in FW
+ * @hw: pointer to the HW struct
+ * @start_dcbx_agent: True if DCBx Agent needs to be started
+ * False if DCBx Agent needs to be stopped
+ * @dcbx_agent_status: FW indicates back the DCBx agent status
+ * True if DCBx Agent is active
+ * False if DCBx Agent is stopped
+ * @cd: pointer to command details structure or NULL
+ *
+ * Start/Stop the embedded dcbx Agent. In case that this wrapper function
+ * returns ICE_SUCCESS, caller will need to check if FW returns back the same
+ * value as stated in dcbx_agent_status, and react accordingly. (0x0A09)
+ */
+enum ice_status
+ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
+ bool *dcbx_agent_status, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_lldp_stop_start_specific_agent *cmd;
+ enum ice_status status;
+ struct ice_aq_desc desc;
+ u16 opcode;
+
+ cmd = &desc.params.lldp_agent_ctrl;
+
+ opcode = ice_aqc_opc_lldp_stop_start_specific_agent;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, opcode);
+
+ if (start_dcbx_agent)
+ cmd->command = ICE_AQC_START_STOP_AGENT_START_DCBX;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+
+ *dcbx_agent_status = false;
+
+ if (!status &&
+ cmd->command == ICE_AQC_START_STOP_AGENT_START_DCBX)
+ *dcbx_agent_status = true;
+
+ return status;
+}
+
+/**
+ * ice_aq_get_cee_dcb_cfg
+ * @hw: pointer to the HW struct
+ * @buff: response buffer that stores CEE operational configuration
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get CEE DCBX mode operational configuration from firmware (0x0A07)
+ */
+static enum ice_status
+ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
+ struct ice_aqc_get_cee_dcb_cfg_resp *buff,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cee_dcb_cfg);
+
+ return ice_aq_send_cmd(hw, &desc, (void *)buff, sizeof(*buff), cd);
+}
+
+/**
+ * ice_cee_to_dcb_cfg
+ * @cee_cfg: pointer to CEE configuration struct
+ * @dcbcfg: DCB configuration struct
+ *
+ * Convert CEE configuration from firmware to DCB configuration
+ */
+static void
+ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u32 status, tlv_status = le32_to_cpu(cee_cfg->tlv_status);
+ u32 ice_aqc_cee_status_mask, ice_aqc_cee_status_shift;
+ u16 app_prio = le16_to_cpu(cee_cfg->oper_app_prio);
+ u8 i, err, sync, oper, app_index, ice_app_sel_type;
+ u16 ice_aqc_cee_app_mask, ice_aqc_cee_app_shift;
+ u16 ice_app_prot_id_type;
+
+ /* CEE PG data to ETS config */
+ dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
+
+ /* Note that the FW creates the oper_prio_tc nibbles reversed
+ * from those in the CEE Priority Group sub-TLV.
+ */
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS / 2; i++) {
+ dcbcfg->etscfg.prio_table[i * 2] =
+ ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_0_M) >>
+ ICE_CEE_PGID_PRIO_0_S);
+ dcbcfg->etscfg.prio_table[i * 2 + 1] =
+ ((cee_cfg->oper_prio_tc[i] & ICE_CEE_PGID_PRIO_1_M) >>
+ ICE_CEE_PGID_PRIO_1_S);
+ }
+
+ ice_for_each_traffic_class(i) {
+ dcbcfg->etscfg.tcbwtable[i] = cee_cfg->oper_tc_bw[i];
+
+ if (dcbcfg->etscfg.prio_table[i] == ICE_CEE_PGID_STRICT) {
+ /* Map it to next empty TC */
+ dcbcfg->etscfg.prio_table[i] = cee_cfg->oper_num_tc - 1;
+ dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_STRICT;
+ } else {
+ dcbcfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS;
+ }
+ }
+
+ /* CEE PFC data to ETS config */
+ dcbcfg->pfc.pfcena = cee_cfg->oper_pfc_en;
+ dcbcfg->pfc.pfccap = ICE_MAX_TRAFFIC_CLASS;
+
+ app_index = 0;
+ for (i = 0; i < 3; i++) {
+ if (i == 0) {
+ /* FCoE APP */
+ ice_aqc_cee_status_mask = ICE_AQC_CEE_FCOE_STATUS_M;
+ ice_aqc_cee_status_shift = ICE_AQC_CEE_FCOE_STATUS_S;
+ ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_FCOE_M;
+ ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_FCOE_S;
+ ice_app_sel_type = ICE_APP_SEL_ETHTYPE;
+ ice_app_prot_id_type = ICE_APP_PROT_ID_FCOE;
+ } else if (i == 1) {
+ /* iSCSI APP */
+ ice_aqc_cee_status_mask = ICE_AQC_CEE_ISCSI_STATUS_M;
+ ice_aqc_cee_status_shift = ICE_AQC_CEE_ISCSI_STATUS_S;
+ ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_ISCSI_M;
+ ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_ISCSI_S;
+ ice_app_sel_type = ICE_APP_SEL_TCPIP;
+ ice_app_prot_id_type = ICE_APP_PROT_ID_ISCSI;
+ } else {
+ /* FIP APP */
+ ice_aqc_cee_status_mask = ICE_AQC_CEE_FIP_STATUS_M;
+ ice_aqc_cee_status_shift = ICE_AQC_CEE_FIP_STATUS_S;
+ ice_aqc_cee_app_mask = ICE_AQC_CEE_APP_FIP_M;
+ ice_aqc_cee_app_shift = ICE_AQC_CEE_APP_FIP_S;
+ ice_app_sel_type = ICE_APP_SEL_ETHTYPE;
+ ice_app_prot_id_type = ICE_APP_PROT_ID_FIP;
+ }
+
+ status = (tlv_status & ice_aqc_cee_status_mask) >>
+ ice_aqc_cee_status_shift;
+ err = (status & ICE_TLV_STATUS_ERR) ? 1 : 0;
+ sync = (status & ICE_TLV_STATUS_SYNC) ? 1 : 0;
+ oper = (status & ICE_TLV_STATUS_OPER) ? 1 : 0;
+ /* Add FCoE/iSCSI/FIP APP if Error is False and
+ * Oper/Sync is True
+ */
+ if (!err && sync && oper) {
+ dcbcfg->app[app_index].priority =
+ (app_prio & ice_aqc_cee_app_mask) >>
+ ice_aqc_cee_app_shift;
+ dcbcfg->app[app_index].selector = ice_app_sel_type;
+ dcbcfg->app[app_index].prot_id = ice_app_prot_id_type;
+ app_index++;
+ }
+ }
+
+ dcbcfg->numapps = app_index;
+}
+
+/**
+ * ice_get_ieee_dcb_cfg
+ * @pi: port information structure
+ * @dcbx_mode: mode of DCBX (IEEE or CEE)
+ *
+ * Get IEEE or CEE mode DCB configuration from the Firmware
+ */
+static enum ice_status
+ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
+{
+ struct ice_dcbx_cfg *dcbx_cfg = NULL;
+ enum ice_status ret;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ if (dcbx_mode == ICE_DCBX_MODE_IEEE)
+ dcbx_cfg = &pi->local_dcbx_cfg;
+ else if (dcbx_mode == ICE_DCBX_MODE_CEE)
+ dcbx_cfg = &pi->desired_dcbx_cfg;
+
+ /* Get Local DCB Config in case of ICE_DCBX_MODE_IEEE
+ * or get CEE DCB Desired Config in case of ICE_DCBX_MODE_CEE
+ */
+ ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_LOCAL,
+ ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg);
+ if (ret)
+ goto out;
+
+ /* Get Remote DCB Config */
+ dcbx_cfg = &pi->remote_dcbx_cfg;
+ ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
+ ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg);
+ /* Don't treat ENOENT as an error for Remote MIBs */
+ if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
+ ret = 0;
+
+out:
+ return ret;
+}
+
+/**
+ * ice_get_dcb_cfg
+ * @pi: port information structure
+ *
+ * Get DCB configuration from the Firmware
+ */
+enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
+{
+ struct ice_aqc_get_cee_dcb_cfg_resp cee_cfg;
+ struct ice_dcbx_cfg *dcbx_cfg;
+ enum ice_status ret;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
+ if (!ret) {
+ /* CEE mode */
+ dcbx_cfg = &pi->local_dcbx_cfg;
+ dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_CEE;
+ dcbx_cfg->tlv_status = le32_to_cpu(cee_cfg.tlv_status);
+ ice_cee_to_dcb_cfg(&cee_cfg, dcbx_cfg);
+ ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
+ } else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) {
+ /* CEE mode not enabled try querying IEEE data */
+ dcbx_cfg = &pi->local_dcbx_cfg;
+ dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
+ ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_IEEE);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_init_dcb
+ * @hw: pointer to the HW struct
+ *
+ * Update DCB configuration from the Firmware
+ */
+enum ice_status ice_init_dcb(struct ice_hw *hw)
+{
+ struct ice_port_info *pi = hw->port_info;
+ enum ice_status ret = 0;
+
+ if (!hw->func_caps.common_cap.dcb)
+ return ICE_ERR_NOT_SUPPORTED;
+
+ pi->is_sw_lldp = true;
+
+ /* Get DCBX status */
+ pi->dcbx_status = ice_get_dcbx_status(hw);
+
+ if (pi->dcbx_status == ICE_DCBX_STATUS_DONE ||
+ pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS) {
+ /* Get current DCBX configuration */
+ ret = ice_get_dcb_cfg(pi);
+ pi->is_sw_lldp = (hw->adminq.sq_last_status == ICE_AQ_RC_EPERM);
+ if (ret)
+ return ret;
+ } else if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) {
+ return ICE_ERR_NOT_READY;
+ }
+
+ /* Configure the LLDP MIB change event */
+ ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL);
+ if (!ret)
+ pi->is_sw_lldp = false;
+
+ return ret;
+}
+
+/**
+ * ice_add_ieee_ets_common_tlv
+ * @buf: Data buffer to be populated with ice_dcb_ets_cfg data
+ * @ets_cfg: Container for ice_dcb_ets_cfg data
+ *
+ * Populate the TLV buffer with ice_dcb_ets_cfg data
+ */
+static void
+ice_add_ieee_ets_common_tlv(u8 *buf, struct ice_dcb_ets_cfg *ets_cfg)
+{
+ u8 priority0, priority1;
+ u8 offset = 0;
+ int i;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS / 2; i++) {
+ priority0 = ets_cfg->prio_table[i * 2] & 0xF;
+ priority1 = ets_cfg->prio_table[i * 2 + 1] & 0xF;
+ buf[offset] = (priority0 << ICE_IEEE_ETS_PRIO_1_S) | priority1;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ *
+ * TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ ice_for_each_traffic_class(i) {
+ buf[offset] = ets_cfg->tcbwtable[i];
+ buf[ICE_MAX_TRAFFIC_CLASS + offset] = ets_cfg->tsatable[i];
+ offset++;
+ }
+}
+
+/**
+ * ice_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format
+ * @tlv: Fill the ETS config data in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS CFG TLV
+ */
+static void
+ice_add_ieee_ets_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u8 maxtcwilling = 0;
+ u32 ouisubtype;
+ u16 typelen;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_ETS_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_ETS_CFG);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ if (etscfg->willing)
+ maxtcwilling = BIT(ICE_IEEE_ETS_WILLING_S);
+ maxtcwilling |= etscfg->maxtcs & ICE_IEEE_ETS_MAXTC_M;
+ buf[0] = maxtcwilling;
+
+ /* Begin adding at Priority Assignment Table (offset 1 in buf) */
+ ice_add_ieee_ets_common_tlv(&buf[1], etscfg);
+}
+
+/**
+ * ice_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format
+ * @tlv: Fill ETS Recommended TLV in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS REC TLV
+ */
+static void
+ice_add_ieee_etsrec_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etsrec;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelen;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_ETS_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_ETS_REC);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ etsrec = &dcbcfg->etsrec;
+
+ /* First Octet is reserved */
+ /* Begin adding at Priority Assignment Table (offset 1 in buf) */
+ ice_add_ieee_ets_common_tlv(&buf[1], etsrec);
+}
+
+/**
+ * ice_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format
+ * @tlv: Fill PFC TLV in IEEE format
+ * @dcbcfg: Local store which holds the PFC CFG data
+ *
+ * Prepare IEEE 802.1Qaz PFC CFG TLV
+ */
+static void
+ice_add_ieee_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelen;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_IEEE_PFC_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_PFC_CFG);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ if (dcbcfg->pfc.willing)
+ buf[0] = BIT(ICE_IEEE_PFC_WILLING_S);
+
+ if (dcbcfg->pfc.mbc)
+ buf[0] |= BIT(ICE_IEEE_PFC_MBC_S);
+
+ buf[0] |= dcbcfg->pfc.pfccap & 0xF;
+ buf[1] = dcbcfg->pfc.pfcena;
+}
+
+/**
+ * ice_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format
+ * @tlv: Fill APP TLV in IEEE format
+ * @dcbcfg: Local store which holds the APP CFG data
+ *
+ * Prepare IEEE 802.1Qaz APP CFG TLV
+ */
+static void
+ice_add_ieee_app_pri_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ u16 typelen, len, offset = 0;
+ u8 priority, selector, i = 0;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ /* No APP TLVs then just return */
+ if (dcbcfg->numapps == 0)
+ return;
+ ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_IEEE_SUBTYPE_APP_PRI);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* Move offset to App Priority Table */
+ offset++;
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (i < dcbcfg->numapps) {
+ priority = dcbcfg->app[i].priority & 0x7;
+ selector = dcbcfg->app[i].selector & 0x7;
+ buf[offset] = (priority << ICE_IEEE_APP_PRIO_S) | selector;
+ buf[offset + 1] = (dcbcfg->app[i].prot_id >> 0x8) & 0xFF;
+ buf[offset + 2] = dcbcfg->app[i].prot_id & 0xFF;
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= ICE_DCBX_MAX_APPS)
+ break;
+ }
+ /* len includes size of ouisubtype + 1 reserved + 3*numapps */
+ len = sizeof(tlv->ouisubtype) + 1 + (i * 3);
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) | (len & 0x1FF));
+ tlv->typelen = htons(typelen);
+}
+
+/**
+ * ice_add_dcb_tlv - Add all IEEE TLVs
+ * @tlv: Fill TLV data in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ * @tlvid: Type of IEEE TLV
+ *
+ * Add tlv information
+ */
+static void
+ice_add_dcb_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg,
+ u16 tlvid)
+{
+ switch (tlvid) {
+ case ICE_IEEE_TLV_ID_ETS_CFG:
+ ice_add_ieee_ets_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_ETS_REC:
+ ice_add_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_PFC_CFG:
+ ice_add_ieee_pfc_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_APP_PRI:
+ ice_add_ieee_app_pri_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ice_dcb_cfg_to_lldp - Convert DCB configuration to MIB format
+ * @lldpmib: pointer to the HW struct
+ * @miblen: length of LLDP MIB
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Convert the DCB configuration to MIB format
+ */
+static void
+ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg)
+{
+ u16 len, offset = 0, tlvid = ICE_TLV_ID_START;
+ struct ice_lldp_org_tlv *tlv;
+ u16 typelen;
+
+ tlv = (struct ice_lldp_org_tlv *)lldpmib;
+ while (1) {
+ ice_add_dcb_tlv(tlv, dcbcfg, tlvid++);
+ typelen = ntohs(tlv->typelen);
+ len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
+ if (len)
+ offset += len + 2;
+ /* END TLV or beyond LLDPDU size */
+ if (tlvid >= ICE_TLV_ID_END_OF_LLDPPDU ||
+ offset > ICE_LLDPDU_SIZE)
+ break;
+ /* Move to next TLV */
+ if (len)
+ tlv = (struct ice_lldp_org_tlv *)
+ ((char *)tlv + sizeof(tlv->typelen) + len);
+ }
+ *miblen = offset;
+}
+
+/**
+ * ice_set_dcb_cfg - Set the local LLDP MIB to FW
+ * @pi: port information structure
+ *
+ * Set DCB configuration to the Firmware
+ */
+enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
+{
+ u8 mib_type, *lldpmib = NULL;
+ struct ice_dcbx_cfg *dcbcfg;
+ enum ice_status ret;
+ struct ice_hw *hw;
+ u16 miblen;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+
+ hw = pi->hw;
+
+ /* update the HW local config */
+ dcbcfg = &pi->local_dcbx_cfg;
+ /* Allocate the LLDPDU */
+ lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
+ if (!lldpmib)
+ return ICE_ERR_NO_MEMORY;
+
+ mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
+ if (dcbcfg->app_mode == ICE_DCBX_APPS_NON_WILLING)
+ mib_type |= SET_LOCAL_MIB_TYPE_CEE_NON_WILLING;
+
+ ice_dcb_cfg_to_lldp(lldpmib, &miblen, dcbcfg);
+ ret = ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen,
+ NULL);
+
+ devm_kfree(ice_hw_to_dev(hw), lldpmib);
+
+ return ret;
+}
+
+/**
+ * ice_aq_query_port_ets - query port ets configuration
+ * @pi: port information structure
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @cd: pointer to command details structure or NULL
+ *
+ * query current port ets configuration
+ */
+static enum ice_status
+ice_aq_query_port_ets(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_query_port_ets *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+ cmd = &desc.params.port_ets;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets);
+ cmd->port_teid = pi->root->info.node_teid;
+
+ status = ice_aq_send_cmd(pi->hw, &desc, buf, buf_size, cd);
+ return status;
+}
+
+/**
+ * ice_update_port_tc_tree_cfg - update TC tree configuration
+ * @pi: port information structure
+ * @buf: pointer to buffer
+ *
+ * update the SW DB with the new TC changes
+ */
+static enum ice_status
+ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf)
+{
+ struct ice_sched_node *node, *tc_node;
+ struct ice_aqc_get_elem elem;
+ enum ice_status status = 0;
+ u32 teid1, teid2;
+ u8 i, j;
+
+ if (!pi)
+ return ICE_ERR_PARAM;
+ /* suspend the missing TC nodes */
+ for (i = 0; i < pi->root->num_children; i++) {
+ teid1 = le32_to_cpu(pi->root->children[i]->info.node_teid);
+ ice_for_each_traffic_class(j) {
+ teid2 = le32_to_cpu(buf->tc_node_teid[j]);
+ if (teid1 == teid2)
+ break;
+ }
+ if (j < ICE_MAX_TRAFFIC_CLASS)
+ continue;
+ /* TC is missing */
+ pi->root->children[i]->in_use = false;
+ }
+ /* add the new TC nodes */
+ ice_for_each_traffic_class(j) {
+ teid2 = le32_to_cpu(buf->tc_node_teid[j]);
+ if (teid2 == ICE_INVAL_TEID)
+ continue;
+ /* Is it already present in the tree ? */
+ for (i = 0; i < pi->root->num_children; i++) {
+ tc_node = pi->root->children[i];
+ if (!tc_node)
+ continue;
+ teid1 = le32_to_cpu(tc_node->info.node_teid);
+ if (teid1 == teid2) {
+ tc_node->tc_num = j;
+ tc_node->in_use = true;
+ break;
+ }
+ }
+ if (i < pi->root->num_children)
+ continue;
+ /* new TC */
+ status = ice_sched_query_elem(pi->hw, teid2, &elem);
+ if (!status)
+ status = ice_sched_add_node(pi, 1, &elem.generic[0]);
+ if (status)
+ break;
+ /* update the TC number */
+ node = ice_sched_find_node_by_teid(pi->root, teid2);
+ if (node)
+ node->tc_num = j;
+ }
+ return status;
+}
+
+/**
+ * ice_query_port_ets - query port ets configuration
+ * @pi: port information structure
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @cd: pointer to command details structure or NULL
+ *
+ * query current port ets configuration and update the
+ * SW DB with the TC changes
+ */
+enum ice_status
+ice_query_port_ets(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cd)
+{
+ enum ice_status status;
+
+ mutex_lock(&pi->sched_lock);
+ status = ice_aq_query_port_ets(pi, buf, buf_size, cd);
+ if (!status)
+ status = ice_update_port_tc_tree_cfg(pi, buf);
+ mutex_unlock(&pi->sched_lock);
+ return status;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h
new file mode 100644
index 000000000000..e7d4416e3a66
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_DCB_H_
+#define _ICE_DCB_H_
+
+#include "ice_type.h"
+
+#define ICE_DCBX_STATUS_NOT_STARTED 0
+#define ICE_DCBX_STATUS_IN_PROGRESS 1
+#define ICE_DCBX_STATUS_DONE 2
+#define ICE_DCBX_STATUS_DIS 7
+
+#define ICE_TLV_TYPE_END 0
+#define ICE_TLV_TYPE_ORG 127
+
+#define ICE_IEEE_8021QAZ_OUI 0x0080C2
+#define ICE_IEEE_SUBTYPE_ETS_CFG 9
+#define ICE_IEEE_SUBTYPE_ETS_REC 10
+#define ICE_IEEE_SUBTYPE_PFC_CFG 11
+#define ICE_IEEE_SUBTYPE_APP_PRI 12
+
+#define ICE_CEE_DCBX_OUI 0x001B21
+#define ICE_CEE_DCBX_TYPE 2
+#define ICE_CEE_SUBTYPE_PG_CFG 2
+#define ICE_CEE_SUBTYPE_PFC_CFG 3
+#define ICE_CEE_SUBTYPE_APP_PRI 4
+#define ICE_CEE_MAX_FEAT_TYPE 3
+/* Defines for LLDP TLV header */
+#define ICE_LLDP_TLV_LEN_S 0
+#define ICE_LLDP_TLV_LEN_M (0x01FF << ICE_LLDP_TLV_LEN_S)
+#define ICE_LLDP_TLV_TYPE_S 9
+#define ICE_LLDP_TLV_TYPE_M (0x7F << ICE_LLDP_TLV_TYPE_S)
+#define ICE_LLDP_TLV_SUBTYPE_S 0
+#define ICE_LLDP_TLV_SUBTYPE_M (0xFF << ICE_LLDP_TLV_SUBTYPE_S)
+#define ICE_LLDP_TLV_OUI_S 8
+#define ICE_LLDP_TLV_OUI_M (0xFFFFFFUL << ICE_LLDP_TLV_OUI_S)
+
+/* Defines for IEEE ETS TLV */
+#define ICE_IEEE_ETS_MAXTC_S 0
+#define ICE_IEEE_ETS_MAXTC_M (0x7 << ICE_IEEE_ETS_MAXTC_S)
+#define ICE_IEEE_ETS_CBS_S 6
+#define ICE_IEEE_ETS_CBS_M BIT(ICE_IEEE_ETS_CBS_S)
+#define ICE_IEEE_ETS_WILLING_S 7
+#define ICE_IEEE_ETS_WILLING_M BIT(ICE_IEEE_ETS_WILLING_S)
+#define ICE_IEEE_ETS_PRIO_0_S 0
+#define ICE_IEEE_ETS_PRIO_0_M (0x7 << ICE_IEEE_ETS_PRIO_0_S)
+#define ICE_IEEE_ETS_PRIO_1_S 4
+#define ICE_IEEE_ETS_PRIO_1_M (0x7 << ICE_IEEE_ETS_PRIO_1_S)
+#define ICE_CEE_PGID_PRIO_0_S 0
+#define ICE_CEE_PGID_PRIO_0_M (0xF << ICE_CEE_PGID_PRIO_0_S)
+#define ICE_CEE_PGID_PRIO_1_S 4
+#define ICE_CEE_PGID_PRIO_1_M (0xF << ICE_CEE_PGID_PRIO_1_S)
+#define ICE_CEE_PGID_STRICT 15
+
+/* Defines for IEEE TSA types */
+#define ICE_IEEE_TSA_STRICT 0
+#define ICE_IEEE_TSA_ETS 2
+
+/* Defines for IEEE PFC TLV */
+#define ICE_IEEE_PFC_CAP_S 0
+#define ICE_IEEE_PFC_CAP_M (0xF << ICE_IEEE_PFC_CAP_S)
+#define ICE_IEEE_PFC_MBC_S 6
+#define ICE_IEEE_PFC_MBC_M BIT(ICE_IEEE_PFC_MBC_S)
+#define ICE_IEEE_PFC_WILLING_S 7
+#define ICE_IEEE_PFC_WILLING_M BIT(ICE_IEEE_PFC_WILLING_S)
+
+/* Defines for IEEE APP TLV */
+#define ICE_IEEE_APP_SEL_S 0
+#define ICE_IEEE_APP_SEL_M (0x7 << ICE_IEEE_APP_SEL_S)
+#define ICE_IEEE_APP_PRIO_S 5
+#define ICE_IEEE_APP_PRIO_M (0x7 << ICE_IEEE_APP_PRIO_S)
+
+/* TLV definitions for preparing MIB */
+#define ICE_IEEE_TLV_ID_ETS_CFG 3
+#define ICE_IEEE_TLV_ID_ETS_REC 4
+#define ICE_IEEE_TLV_ID_PFC_CFG 5
+#define ICE_IEEE_TLV_ID_APP_PRI 6
+#define ICE_TLV_ID_END_OF_LLDPPDU 7
+#define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG
+
+#define ICE_IEEE_ETS_TLV_LEN 25
+#define ICE_IEEE_PFC_TLV_LEN 6
+#define ICE_IEEE_APP_TLV_LEN 11
+
+/* IEEE 802.1AB LLDP Organization specific TLV */
+struct ice_lldp_org_tlv {
+ __be16 typelen;
+ __be32 ouisubtype;
+ u8 tlvinfo[1];
+} __packed;
+
+struct ice_cee_tlv_hdr {
+ __be16 typelen;
+ u8 operver;
+ u8 maxver;
+};
+
+struct ice_cee_ctrl_tlv {
+ struct ice_cee_tlv_hdr hdr;
+ __be32 seqno;
+ __be32 ackno;
+};
+
+struct ice_cee_feat_tlv {
+ struct ice_cee_tlv_hdr hdr;
+ u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
+#define ICE_CEE_FEAT_TLV_ENA_M 0x80
+#define ICE_CEE_FEAT_TLV_WILLING_M 0x40
+#define ICE_CEE_FEAT_TLV_ERR_M 0x20
+ u8 subtype;
+ u8 tlvinfo[1];
+};
+
+struct ice_cee_app_prio {
+ __be16 protocol;
+ u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */
+#define ICE_CEE_APP_SELECTOR_M 0x03
+ __be16 lower_oui;
+ u8 prio_map;
+} __packed;
+
+u8 ice_get_dcbx_status(struct ice_hw *hw);
+enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg);
+enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi);
+enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi);
+enum ice_status ice_init_dcb(struct ice_hw *hw);
+enum ice_status
+ice_query_port_ets(struct ice_port_info *pi,
+ struct ice_aqc_port_ets_elem *buf, u16 buf_size,
+ struct ice_sq_cd *cmd_details);
+#ifdef CONFIG_DCB
+enum ice_status
+ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent,
+ struct ice_sq_cd *cd);
+enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
+ bool *dcbx_agent_status, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
+ struct ice_sq_cd *cd);
+#else /* CONFIG_DCB */
+static inline enum ice_status
+ice_aq_stop_lldp(struct ice_hw __always_unused *hw,
+ bool __always_unused shutdown_lldp_agent,
+ struct ice_sq_cd __always_unused *cd)
+{
+ return 0;
+}
+
+static inline enum ice_status
+ice_aq_start_lldp(struct ice_hw __always_unused *hw,
+ struct ice_sq_cd __always_unused *cd)
+{
+ return 0;
+}
+
+static inline enum ice_status
+ice_aq_start_stop_dcbx(struct ice_hw __always_unused *hw,
+ bool __always_unused start_dcbx_agent,
+ bool *dcbx_agent_status,
+ struct ice_sq_cd __always_unused *cd)
+{
+ *dcbx_agent_status = false;
+
+ return 0;
+}
+
+static inline enum ice_status
+ice_aq_cfg_lldp_mib_change(struct ice_hw __always_unused *hw,
+ bool __always_unused ena_update,
+ struct ice_sq_cd __always_unused *cd)
+{
+ return 0;
+}
+
+#endif /* CONFIG_DCB */
+#endif /* _ICE_DCB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
new file mode 100644
index 000000000000..3e81af1884fc
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include "ice_dcb_lib.h"
+
+/**
+ * ice_dcb_get_ena_tc - return bitmap of enabled TCs
+ * @dcbcfg: DCB config to evaluate for enabled TCs
+ */
+u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 i, num_tc, ena_tc = 1;
+
+ num_tc = ice_dcb_get_num_tc(dcbcfg);
+
+ for (i = 0; i < num_tc; i++)
+ ena_tc |= BIT(i);
+
+ return ena_tc;
+}
+
+/**
+ * ice_dcb_get_num_tc - Get the number of TCs from DCBX config
+ * @dcbcfg: config to retrieve number of TCs from
+ */
+u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg)
+{
+ bool tc_unused = false;
+ u8 num_tc = 0;
+ u8 ret = 0;
+ int i;
+
+ /* Scan the ETS Config Priority Table to find traffic classes
+ * enabled and create a bitmask of enabled TCs
+ */
+ for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
+ num_tc |= BIT(dcbcfg->etscfg.prio_table[i]);
+
+ /* Scan bitmask for contiguous TCs starting with TC0 */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ if (num_tc & BIT(i)) {
+ if (!tc_unused) {
+ ret++;
+ } else {
+ pr_err("Non-contiguous TCs - Disabling DCB\n");
+ return 1;
+ }
+ } else {
+ tc_unused = true;
+ }
+ }
+
+ /* There is always at least 1 TC */
+ if (!ret)
+ ret = 1;
+
+ return ret;
+}
+
+/**
+ * ice_vsi_cfg_dcb_rings - Update rings to reflect DCB TC
+ * @vsi: VSI owner of rings being updated
+ */
+void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
+{
+ struct ice_ring *tx_ring, *rx_ring;
+ u16 qoffset, qcount;
+ int i, n;
+
+ if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
+ /* Reset the TC information */
+ for (i = 0; i < vsi->num_txq; i++) {
+ tx_ring = vsi->tx_rings[i];
+ tx_ring->dcb_tc = 0;
+ }
+ for (i = 0; i < vsi->num_rxq; i++) {
+ rx_ring = vsi->rx_rings[i];
+ rx_ring->dcb_tc = 0;
+ }
+ return;
+ }
+
+ ice_for_each_traffic_class(n) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(n)))
+ break;
+
+ qoffset = vsi->tc_cfg.tc_info[n].qoffset;
+ qcount = vsi->tc_cfg.tc_info[n].qcount_tx;
+ for (i = qoffset; i < (qoffset + qcount); i++) {
+ tx_ring = vsi->tx_rings[i];
+ rx_ring = vsi->rx_rings[i];
+ tx_ring->dcb_tc = n;
+ rx_ring->dcb_tc = n;
+ }
+ }
+}
+
+/**
+ * ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs
+ * @pf: pointer to the PF struct
+ *
+ * Assumed caller has already disabled all VSIs before
+ * calling this function. Reconfiguring DCB based on
+ * local_dcbx_cfg.
+ */
+static void ice_pf_dcb_recfg(struct ice_pf *pf)
+{
+ struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
+ u8 tc_map = 0;
+ int v, ret;
+
+ /* Update each VSI */
+ ice_for_each_vsi(pf, v) {
+ if (!pf->vsi[v])
+ continue;
+
+ if (pf->vsi[v]->type == ICE_VSI_PF)
+ tc_map = ice_dcb_get_ena_tc(dcbcfg);
+ else
+ tc_map = ICE_DFLT_TRAFFIC_CLASS;
+
+ ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "Failed to config TC for VSI index: %d\n",
+ pf->vsi[v]->idx);
+ else
+ ice_vsi_map_rings_to_vectors(pf->vsi[v]);
+ }
+}
+
+/**
+ * ice_pf_dcb_cfg - Apply new DCB configuration
+ * @pf: pointer to the PF struct
+ * @new_cfg: DCBX config to apply
+ */
+static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg)
+{
+ struct ice_dcbx_cfg *old_cfg, *curr_cfg;
+ struct ice_aqc_port_ets_elem buf = { 0 };
+ int ret = 0;
+
+ curr_cfg = &pf->hw.port_info->local_dcbx_cfg;
+
+ /* Enable DCB tagging only when more than one TC */
+ if (ice_dcb_get_num_tc(new_cfg) > 1) {
+ dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n");
+ set_bit(ICE_FLAG_DCB_ENA, pf->flags);
+ } else {
+ dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n");
+ clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
+ }
+
+ if (!memcmp(new_cfg, curr_cfg, sizeof(*new_cfg))) {
+ dev_dbg(&pf->pdev->dev, "No change in DCB config required\n");
+ return ret;
+ }
+
+ /* Store old config in case FW config fails */
+ old_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*old_cfg), GFP_KERNEL);
+ memcpy(old_cfg, curr_cfg, sizeof(*old_cfg));
+
+ /* avoid race conditions by holding the lock while disabling and
+ * re-enabling the VSI
+ */
+ rtnl_lock();
+ ice_pf_dis_all_vsi(pf, true);
+
+ memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg));
+ memcpy(&curr_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
+
+ /* Only send new config to HW if we are in SW LLDP mode. Otherwise,
+ * the new config came from the HW in the first place.
+ */
+ if (pf->hw.port_info->is_sw_lldp) {
+ ret = ice_set_dcb_cfg(pf->hw.port_info);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Set DCB Config failed\n");
+ /* Restore previous settings to local config */
+ memcpy(curr_cfg, old_cfg, sizeof(*curr_cfg));
+ goto out;
+ }
+ }
+
+ ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
+ goto out;
+ }
+
+ ice_pf_dcb_recfg(pf);
+
+out:
+ ice_pf_ena_all_vsi(pf, true);
+ rtnl_unlock();
+ devm_kfree(&pf->pdev->dev, old_cfg);
+ return ret;
+}
+
+/**
+ * ice_dcb_rebuild - rebuild DCB post reset
+ * @pf: physical function instance
+ */
+void ice_dcb_rebuild(struct ice_pf *pf)
+{
+ struct ice_aqc_port_ets_elem buf = { 0 };
+ struct ice_dcbx_cfg *prev_cfg;
+ enum ice_status ret;
+ u8 willing;
+
+ ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
+ goto dcb_error;
+ }
+
+ /* If DCB was not enabled previously, we are done */
+ if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
+ return;
+
+ /* Save current willing state and force FW to unwilling */
+ willing = pf->hw.port_info->local_dcbx_cfg.etscfg.willing;
+ pf->hw.port_info->local_dcbx_cfg.etscfg.willing = 0x0;
+ ret = ice_set_dcb_cfg(pf->hw.port_info);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n");
+ goto dcb_error;
+ }
+
+ /* Retrieve DCB config and ensure same as current in SW */
+ prev_cfg = devm_kmemdup(&pf->pdev->dev,
+ &pf->hw.port_info->local_dcbx_cfg,
+ sizeof(*prev_cfg), GFP_KERNEL);
+ if (!prev_cfg) {
+ dev_err(&pf->pdev->dev, "Failed to alloc space for DCB cfg\n");
+ goto dcb_error;
+ }
+
+ ice_init_dcb(&pf->hw);
+ if (memcmp(prev_cfg, &pf->hw.port_info->local_dcbx_cfg,
+ sizeof(*prev_cfg))) {
+ /* difference in cfg detected - disable DCB till next MIB */
+ dev_err(&pf->pdev->dev, "Set local MIB not accurate\n");
+ devm_kfree(&pf->pdev->dev, prev_cfg);
+ goto dcb_error;
+ }
+
+ /* fetched config congruent to previous configuration */
+ devm_kfree(&pf->pdev->dev, prev_cfg);
+
+ /* Configuration replayed - reset willing state to previous */
+ pf->hw.port_info->local_dcbx_cfg.etscfg.willing = willing;
+ ret = ice_set_dcb_cfg(pf->hw.port_info);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Fail restoring prev willing state\n");
+ goto dcb_error;
+ }
+ dev_info(&pf->pdev->dev, "DCB restored after reset\n");
+ ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
+ goto dcb_error;
+ }
+
+ return;
+
+dcb_error:
+ dev_err(&pf->pdev->dev, "Disabling DCB until new settings occur\n");
+ prev_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*prev_cfg), GFP_KERNEL);
+ prev_cfg->etscfg.willing = true;
+ prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
+ prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
+ memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec));
+ ice_pf_dcb_cfg(pf, prev_cfg);
+ devm_kfree(&pf->pdev->dev, prev_cfg);
+}
+
+/**
+ * ice_dcb_init_cfg - set the initial DCB config in SW
+ * @pf: pf to apply config to
+ */
+static int ice_dcb_init_cfg(struct ice_pf *pf)
+{
+ struct ice_dcbx_cfg *newcfg;
+ struct ice_port_info *pi;
+ int ret = 0;
+
+ pi = pf->hw.port_info;
+ newcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*newcfg), GFP_KERNEL);
+ if (!newcfg)
+ return -ENOMEM;
+
+ memcpy(newcfg, &pi->local_dcbx_cfg, sizeof(*newcfg));
+ memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg));
+
+ dev_info(&pf->pdev->dev, "Configuring initial DCB values\n");
+ if (ice_pf_dcb_cfg(pf, newcfg))
+ ret = -EINVAL;
+
+ devm_kfree(&pf->pdev->dev, newcfg);
+
+ return ret;
+}
+
+/**
+ * ice_dcb_sw_default_config - Apply a default DCB config
+ * @pf: pf to apply config to
+ */
+static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf)
+{
+ struct ice_aqc_port_ets_elem buf = { 0 };
+ struct ice_dcbx_cfg *dcbcfg;
+ struct ice_port_info *pi;
+ struct ice_hw *hw;
+ int ret;
+
+ hw = &pf->hw;
+ pi = hw->port_info;
+ dcbcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*dcbcfg), GFP_KERNEL);
+
+ memset(dcbcfg, 0, sizeof(*dcbcfg));
+ memset(&pi->local_dcbx_cfg, 0, sizeof(*dcbcfg));
+
+ dcbcfg->etscfg.willing = 1;
+ dcbcfg->etscfg.maxtcs = 8;
+ dcbcfg->etscfg.tcbwtable[0] = 100;
+ dcbcfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
+
+ memcpy(&dcbcfg->etsrec, &dcbcfg->etscfg,
+ sizeof(dcbcfg->etsrec));
+ dcbcfg->etsrec.willing = 0;
+
+ dcbcfg->pfc.willing = 1;
+ dcbcfg->pfc.pfccap = IEEE_8021QAZ_MAX_TCS;
+
+ dcbcfg->numapps = 1;
+ dcbcfg->app[0].selector = ICE_APP_SEL_ETHTYPE;
+ dcbcfg->app[0].priority = 3;
+ dcbcfg->app[0].prot_id = ICE_APP_PROT_ID_FCOE;
+
+ ret = ice_pf_dcb_cfg(pf, dcbcfg);
+ devm_kfree(&pf->pdev->dev, dcbcfg);
+ if (ret)
+ return ret;
+
+ return ice_query_port_ets(pi, &buf, sizeof(buf), NULL);
+}
+
+/**
+ * ice_init_pf_dcb - initialize DCB for a PF
+ * @pf: pf to initiialize DCB for
+ */
+int ice_init_pf_dcb(struct ice_pf *pf)
+{
+ struct device *dev = &pf->pdev->dev;
+ struct ice_port_info *port_info;
+ struct ice_hw *hw = &pf->hw;
+ int sw_default = 0;
+ int err;
+
+ port_info = hw->port_info;
+
+ /* check if device is DCB capable */
+ if (!hw->func_caps.common_cap.dcb) {
+ dev_dbg(dev, "DCB not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Best effort to put DCBx and LLDP into a good state */
+ port_info->dcbx_status = ice_get_dcbx_status(hw);
+ if (port_info->dcbx_status != ICE_DCBX_STATUS_DONE &&
+ port_info->dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) {
+ bool dcbx_status;
+
+ /* Attempt to start LLDP engine. Ignore errors
+ * as this will error if it is already started
+ */
+ ice_aq_start_lldp(hw, NULL);
+
+ /* Attempt to start DCBX. Ignore errors as this
+ * will error if it is already started
+ */
+ ice_aq_start_stop_dcbx(hw, true, &dcbx_status, NULL);
+ }
+
+ err = ice_init_dcb(hw);
+ if (err) {
+ /* FW LLDP not in usable state, default to SW DCBx/LLDP */
+ dev_info(&pf->pdev->dev, "FW LLDP not in usable state\n");
+ hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED;
+ hw->port_info->is_sw_lldp = true;
+ }
+
+ if (port_info->dcbx_status == ICE_DCBX_STATUS_DIS)
+ dev_info(&pf->pdev->dev, "DCBX disabled\n");
+
+ /* LLDP disabled in FW */
+ if (port_info->is_sw_lldp) {
+ sw_default = 1;
+ dev_info(&pf->pdev->dev, "DCBx/LLDP in SW mode.\n");
+ }
+
+ if (port_info->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
+ sw_default = 1;
+ dev_info(&pf->pdev->dev, "DCBX not started\n");
+ }
+
+ if (sw_default) {
+ err = ice_dcb_sw_dflt_cfg(pf);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "Failed to set local DCB config %d\n", err);
+ err = -EIO;
+ goto dcb_init_err;
+ }
+
+ pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+ set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
+ set_bit(ICE_FLAG_DCB_ENA, pf->flags);
+ return 0;
+ }
+
+ /* DCBX in FW and LLDP enabled in FW */
+ pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE;
+
+ set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
+
+ err = ice_dcb_init_cfg(pf);
+ if (err)
+ goto dcb_init_err;
+
+ dev_info(&pf->pdev->dev, "DCBX offload supported\n");
+ return err;
+
+dcb_init_err:
+ dev_err(dev, "DCB init failed\n");
+ return err;
+}
+
+/**
+ * ice_update_dcb_stats - Update DCB stats counters
+ * @pf: PF whose stats needs to be updated
+ */
+void ice_update_dcb_stats(struct ice_pf *pf)
+{
+ struct ice_hw_port_stats *prev_ps, *cur_ps;
+ struct ice_hw *hw = &pf->hw;
+ u8 pf_id = hw->pf_id;
+ int i;
+
+ prev_ps = &pf->stats_prev;
+ cur_ps = &pf->stats;
+
+ for (i = 0; i < 8; i++) {
+ ice_stat_update32(hw, GLPRT_PXOFFRXC(pf_id, i),
+ pf->stat_prev_loaded,
+ &prev_ps->priority_xoff_rx[i],
+ &cur_ps->priority_xoff_rx[i]);
+ ice_stat_update32(hw, GLPRT_PXONRXC(pf_id, i),
+ pf->stat_prev_loaded,
+ &prev_ps->priority_xon_rx[i],
+ &cur_ps->priority_xon_rx[i]);
+ ice_stat_update32(hw, GLPRT_PXONTXC(pf_id, i),
+ pf->stat_prev_loaded,
+ &prev_ps->priority_xon_tx[i],
+ &cur_ps->priority_xon_tx[i]);
+ ice_stat_update32(hw, GLPRT_PXOFFTXC(pf_id, i),
+ pf->stat_prev_loaded,
+ &prev_ps->priority_xoff_tx[i],
+ &cur_ps->priority_xoff_tx[i]);
+ ice_stat_update32(hw, GLPRT_RXON2OFFCNT(pf_id, i),
+ pf->stat_prev_loaded,
+ &prev_ps->priority_xon_2_xoff[i],
+ &cur_ps->priority_xon_2_xoff[i]);
+ }
+}
+
+/**
+ * ice_tx_prepare_vlan_flags_dcb - prepare VLAN tagging for DCB
+ * @tx_ring: ring to send buffer on
+ * @first: pointer to struct ice_tx_buf
+ */
+int
+ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
+ struct ice_tx_buf *first)
+{
+ struct sk_buff *skb = first->skb;
+
+ if (!test_bit(ICE_FLAG_DCB_ENA, tx_ring->vsi->back->flags))
+ return 0;
+
+ /* Insert 802.1p priority into VLAN header */
+ if ((first->tx_flags & (ICE_TX_FLAGS_HW_VLAN | ICE_TX_FLAGS_SW_VLAN)) ||
+ skb->priority != TC_PRIO_CONTROL) {
+ first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M;
+ /* Mask the lower 3 bits to set the 802.1p priority */
+ first->tx_flags |= (skb->priority & 0x7) <<
+ ICE_TX_FLAGS_VLAN_PR_S;
+ if (first->tx_flags & ICE_TX_FLAGS_SW_VLAN) {
+ struct vlan_ethhdr *vhdr;
+ int rc;
+
+ rc = skb_cow_head(skb, 0);
+ if (rc < 0)
+ return rc;
+ vhdr = (struct vlan_ethhdr *)skb->data;
+ vhdr->h_vlan_TCI = htons(first->tx_flags >>
+ ICE_TX_FLAGS_VLAN_S);
+ } else {
+ first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_dcb_process_lldp_set_mib_change - Process MIB change
+ * @pf: ptr to ice_pf
+ * @event: pointer to the admin queue receive event
+ */
+void
+ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
+ struct ice_rq_event_info *event)
+{
+ if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
+ struct ice_dcbx_cfg *dcbcfg, *prev_cfg;
+ int err;
+
+ prev_cfg = &pf->hw.port_info->local_dcbx_cfg;
+ dcbcfg = devm_kmemdup(&pf->pdev->dev, prev_cfg,
+ sizeof(*dcbcfg), GFP_KERNEL);
+ if (!dcbcfg)
+ return;
+
+ err = ice_lldp_to_dcb_cfg(event->msg_buf, dcbcfg);
+ if (!err)
+ ice_pf_dcb_cfg(pf, dcbcfg);
+
+ devm_kfree(&pf->pdev->dev, dcbcfg);
+
+ /* Get updated DCBx data from firmware */
+ err = ice_get_dcb_cfg(pf->hw.port_info);
+ if (err)
+ dev_err(&pf->pdev->dev,
+ "Failed to get DCB config\n");
+ } else {
+ dev_dbg(&pf->pdev->dev,
+ "MIB Change Event in HOST mode\n");
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
new file mode 100644
index 000000000000..ca7b76faa03c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_DCB_LIB_H_
+#define _ICE_DCB_LIB_H_
+
+#include "ice.h"
+#include "ice_lib.h"
+
+#ifdef CONFIG_DCB
+#define ICE_TC_MAX_BW 100 /* Default Max BW percentage */
+
+void ice_dcb_rebuild(struct ice_pf *pf);
+u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
+u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
+void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
+int ice_init_pf_dcb(struct ice_pf *pf);
+void ice_update_dcb_stats(struct ice_pf *pf);
+int
+ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
+ struct ice_tx_buf *first);
+void
+ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
+ struct ice_rq_event_info *event);
+static inline void
+ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
+{
+ tlan_ctx->cgd_num = ring->dcb_tc;
+}
+#else
+#define ice_dcb_rebuild(pf) do {} while (0)
+
+static inline u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
+{
+ return ICE_DFLT_TRAFFIC_CLASS;
+}
+
+static inline u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
+{
+ return 1;
+}
+
+static inline int ice_init_pf_dcb(struct ice_pf *pf)
+{
+ dev_dbg(&pf->pdev->dev, "DCB not supported\n");
+ return -EOPNOTSUPP;
+}
+
+static inline int
+ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
+ struct ice_tx_buf __always_unused *first)
+{
+ return 0;
+}
+
+#define ice_update_dcb_stats(pf) do {} while (0)
+#define ice_vsi_cfg_dcb_rings(vsi) do {} while (0)
+#define ice_dcb_process_lldp_set_mib_change(pf, event) do {} while (0)
+#define ice_set_cgd_num(tlan_ctx, ring) do {} while (0)
+#endif /* CONFIG_DCB */
+#endif /* _ICE_DCB_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index eb8d149e317c..1341fde8d53f 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -4,6 +4,8 @@
/* ethtool support for ice */
#include "ice.h"
+#include "ice_lib.h"
+#include "ice_dcb_lib.h"
struct ice_stats {
char stat_string[ETH_GSTRING_LEN];
@@ -33,8 +35,14 @@ static int ice_q_stats_len(struct net_device *netdev)
#define ICE_PF_STATS_LEN ARRAY_SIZE(ice_gstrings_pf_stats)
#define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats)
-#define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_VSI_STATS_LEN + \
- ice_q_stats_len(n))
+#define ICE_PFC_STATS_LEN ( \
+ (FIELD_SIZEOF(struct ice_pf, stats.priority_xoff_rx) + \
+ FIELD_SIZEOF(struct ice_pf, stats.priority_xon_rx) + \
+ FIELD_SIZEOF(struct ice_pf, stats.priority_xoff_tx) + \
+ FIELD_SIZEOF(struct ice_pf, stats.priority_xon_tx)) \
+ / sizeof(u64))
+#define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_PFC_STATS_LEN + \
+ ICE_VSI_STATS_LEN + ice_q_stats_len(n))
static const struct ice_stats ice_gstrings_vsi_stats[] = {
ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
@@ -126,6 +134,7 @@ struct ice_priv_flag {
static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_PRIV_FLAG("link-down-on-close", ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA),
+ ICE_PRIV_FLAG("disable-fw-lldp", ICE_FLAG_DISABLE_FW_LLDP),
};
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
@@ -309,6 +318,22 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
}
+ for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.tx-priority-%u-xon", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.tx-priority-%u-xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx-priority-%u-xon", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx-priority-%u-xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
@@ -382,13 +407,19 @@ static u32 ice_get_priv_flags(struct net_device *netdev)
static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ DECLARE_BITMAP(change_flags, ICE_PF_FLAGS_NBITS);
+ DECLARE_BITMAP(orig_flags, ICE_PF_FLAGS_NBITS);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
+ int ret = 0;
u32 i;
if (flags > BIT(ICE_PRIV_FLAG_ARRAY_SIZE))
return -EINVAL;
+ set_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
+
+ bitmap_copy(orig_flags, pf->flags, ICE_PF_FLAGS_NBITS);
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
const struct ice_priv_flag *priv_flag;
@@ -400,7 +431,79 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
clear_bit(priv_flag->bitno, pf->flags);
}
- return 0;
+ bitmap_xor(change_flags, pf->flags, orig_flags, ICE_PF_FLAGS_NBITS);
+
+ if (test_bit(ICE_FLAG_DISABLE_FW_LLDP, change_flags)) {
+ if (test_bit(ICE_FLAG_DISABLE_FW_LLDP, pf->flags)) {
+ enum ice_status status;
+
+ status = ice_aq_cfg_lldp_mib_change(&pf->hw, false,
+ NULL);
+ /* If unregistering for LLDP events fails, this is
+ * not an error state, as there shouldn't be any
+ * events to respond to.
+ */
+ if (status)
+ dev_info(&pf->pdev->dev,
+ "Failed to unreg for LLDP events\n");
+
+ /* The AQ call to stop the FW LLDP agent will generate
+ * an error if the agent is already stopped.
+ */
+ status = ice_aq_stop_lldp(&pf->hw, true, NULL);
+ if (status)
+ dev_warn(&pf->pdev->dev,
+ "Fail to stop LLDP agent\n");
+ /* Use case for having the FW LLDP agent stopped
+ * will likely not need DCB, so failure to init is
+ * not a concern of ethtool
+ */
+ status = ice_init_pf_dcb(pf);
+ if (status)
+ dev_warn(&pf->pdev->dev, "Fail to init DCB\n");
+ } else {
+ enum ice_status status;
+ bool dcbx_agent_status;
+
+ /* AQ command to start FW LLDP agent will return an
+ * error if the agent is already started
+ */
+ status = ice_aq_start_lldp(&pf->hw, NULL);
+ if (status)
+ dev_warn(&pf->pdev->dev,
+ "Fail to start LLDP Agent\n");
+
+ /* AQ command to start FW DCBx agent will fail if
+ * the agent is already started
+ */
+ status = ice_aq_start_stop_dcbx(&pf->hw, true,
+ &dcbx_agent_status,
+ NULL);
+ if (status)
+ dev_dbg(&pf->pdev->dev,
+ "Failed to start FW DCBX\n");
+
+ dev_info(&pf->pdev->dev, "FW DCBX agent is %s\n",
+ dcbx_agent_status ? "ACTIVE" : "DISABLED");
+
+ /* Failure to configure MIB change or init DCB is not
+ * relevant to ethtool. Print notification that
+ * registration/init failed but do not return error
+ * state to ethtool
+ */
+ status = ice_aq_cfg_lldp_mib_change(&pf->hw, false,
+ NULL);
+ if (status)
+ dev_dbg(&pf->pdev->dev,
+ "Fail to reg for MIB change\n");
+
+ status = ice_init_pf_dcb(pf);
+ if (status)
+ dev_dbg(&pf->pdev->dev, "Fail to init DCB\n");
+ }
+ }
+ clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
+ return ret;
}
static int ice_get_sset_count(struct net_device *netdev, int sset)
@@ -486,6 +589,16 @@ ice_get_ethtool_stats(struct net_device *netdev,
data[i++] = (ice_gstrings_pf_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
+
+ for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) {
+ data[i++] = pf->stats.priority_xon_tx[j];
+ data[i++] = pf->stats.priority_xoff_tx[j];
+ }
+
+ for (j = 0; j < ICE_MAX_USER_PRIORITY; j++) {
+ data[i++] = pf->stats.priority_xon_rx[j];
+ data[i++] = pf->stats.priority_xoff_rx[j];
+ }
}
/**
@@ -811,7 +924,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
link_info = &vsi->port_info->phy.link_info;
- /* Initialize supported and advertised settings based on phy settings */
+ /* Initialize supported and advertised settings based on PHY settings */
switch (link_info->phy_type_low) {
case ICE_PHY_TYPE_LOW_100BASE_TX:
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
@@ -921,6 +1034,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
25000baseCR_Full);
break;
case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
ethtool_link_ksettings_add_link_mode(ks, supported,
25000baseCR_Full);
break;
@@ -1137,10 +1251,10 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
*/
static void
ice_get_settings_link_down(struct ethtool_link_ksettings *ks,
- struct net_device __always_unused *netdev)
+ struct net_device *netdev)
{
/* link is down and the driver needs to fall back on
- * supported phy types to figure out what info to display
+ * supported PHY types to figure out what info to display
*/
ice_phy_type_to_ethtool(netdev, ks);
@@ -1156,8 +1270,9 @@ ice_get_settings_link_down(struct ethtool_link_ksettings *ks,
*
* Reports speed/duplex settings based on media_type
*/
-static int ice_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *ks)
+static int
+ice_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ks)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_link_status *hw_link_info;
@@ -1349,7 +1464,7 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks,
} else {
/* If autoneg is currently enabled */
if (p->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) {
- /* If autoneg is supported 10GBASE_T is the only phy
+ /* If autoneg is supported 10GBASE_T is the only PHY
* that can disable it, so otherwise return error
*/
if (ethtool_link_ksettings_test_link_mode(ks,
@@ -1399,14 +1514,13 @@ ice_set_link_ksettings(struct net_device *netdev,
if (!p)
return -EOPNOTSUPP;
- /* Check if this is lan vsi */
- for (idx = 0 ; idx < pf->num_alloc_vsi ; idx++) {
+ /* Check if this is LAN VSI */
+ ice_for_each_vsi(pf, idx)
if (pf->vsi[idx]->type == ICE_VSI_PF) {
if (np->vsi != pf->vsi[idx])
return -EOPNOTSUPP;
break;
}
- }
if (p->phy.media_type != ICE_MEDIA_BASET &&
p->phy.media_type != ICE_MEDIA_FIBER &&
@@ -1464,7 +1578,7 @@ ice_set_link_ksettings(struct net_device *netdev,
if (!abilities)
return -ENOMEM;
- /* Get the current phy config */
+ /* Get the current PHY config */
status = ice_aq_get_phy_caps(p, false, ICE_AQC_REPORT_SW_CFG, abilities,
NULL);
if (status) {
@@ -1559,15 +1673,16 @@ done:
}
/**
- * ice_get_rxnfc - command to get RX flow classification rules
+ * ice_get_rxnfc - command to get Rx flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
* @rule_locs: buffer to rturn Rx flow classification rules
*
* Returns Success if the command is supported.
*/
-static int ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
- u32 __always_unused *rule_locs)
+static int
+ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ u32 __always_unused *rule_locs)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -1821,18 +1936,21 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
struct ice_port_info *pi = np->vsi->port_info;
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_vsi *vsi = np->vsi;
+ struct ice_dcbx_cfg *dcbx_cfg;
enum ice_status status;
/* Initialize pause params */
pause->rx_pause = 0;
pause->tx_pause = 0;
+ dcbx_cfg = &pi->local_dcbx_cfg;
+
pcaps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*pcaps),
GFP_KERNEL);
if (!pcaps)
return;
- /* Get current phy config */
+ /* Get current PHY config */
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
NULL);
if (status)
@@ -1841,6 +1959,10 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
pause->autoneg = ((pcaps->caps & ICE_AQC_PHY_AN_MODE) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
+ if (dcbx_cfg->pfc.pfcena)
+ /* PFC enabled so report LFC as off */
+ goto out;
+
if (pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
pause->tx_pause = 1;
if (pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
@@ -1861,6 +1983,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_link_status *hw_link_info;
struct ice_pf *pf = np->vsi->back;
+ struct ice_dcbx_cfg *dcbx_cfg;
struct ice_vsi *vsi = np->vsi;
struct ice_hw *hw = &pf->hw;
struct ice_port_info *pi;
@@ -1871,6 +1994,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
pi = vsi->port_info;
hw_link_info = &pi->phy.link_info;
+ dcbx_cfg = &pi->local_dcbx_cfg;
link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
/* Changing the port's flow control is not supported if this isn't the
@@ -1893,6 +2017,10 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
}
+ if (dcbx_cfg->pfc.pfcena) {
+ netdev_info(netdev, "Priority flow control enabled. Cannot set link flow control.\n");
+ return -EOPNOTSUPP;
+ }
if (pause->rx_pause && pause->tx_pause)
pi->fc.req_mode = ICE_FC_FULL;
else if (pause->rx_pause && !pause->tx_pause)
@@ -2021,11 +2149,12 @@ out:
* @key: hash key
* @hfunc: hash function
*
- * Returns -EINVAL if the table specifies an invalid queue id, otherwise
+ * Returns -EINVAL if the table specifies an invalid queue ID, otherwise
* returns 0 after programming the table.
*/
-static int ice_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int
+ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
+ const u8 hfunc)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -2087,7 +2216,7 @@ enum ice_container_type {
/**
* ice_get_rc_coalesce - get ITR values for specific ring container
* @ec: ethtool structure to fill with driver's coalesce settings
- * @c_type: container type, RX or TX
+ * @c_type: container type, Rx or Tx
* @rc: ring container that the ITR values will come from
*
* Query the device for ice_ring_container specific ITR values. This is
@@ -2100,12 +2229,18 @@ static int
ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
struct ice_ring_container *rc)
{
- struct ice_pf *pf = rc->ring->vsi->back;
+ struct ice_pf *pf;
+
+ if (!rc->ring)
+ return -EINVAL;
+
+ pf = rc->ring->vsi->back;
switch (c_type) {
case ICE_RX_CONTAINER:
ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting);
ec->rx_coalesce_usecs = rc->itr_setting & ~ICE_ITR_DYNAMIC;
+ ec->rx_coalesce_usecs_high = rc->ring->q_vector->intrl;
break;
case ICE_TX_CONTAINER:
ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc->itr_setting);
@@ -2120,49 +2255,60 @@ ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
}
/**
+ * ice_get_q_coalesce - get a queue's ITR/INTRL (coalesce) settings
+ * @vsi: VSI associated to the queue for getting ITR/INTRL (coalesce) settings
+ * @ec: coalesce settings to program the device with
+ * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
+ *
+ * Return 0 on success, and negative under the following conditions:
+ * 1. Getting Tx or Rx ITR/INTRL (coalesce) settings failed.
+ * 2. The q_num passed in is not a valid number/index for Tx and Rx rings.
+ */
+static int
+ice_get_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
+{
+ if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
+ if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
+ &vsi->rx_rings[q_num]->q_vector->rx))
+ return -EINVAL;
+ if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
+ &vsi->tx_rings[q_num]->q_vector->tx))
+ return -EINVAL;
+ } else if (q_num < vsi->num_rxq) {
+ if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
+ &vsi->rx_rings[q_num]->q_vector->rx))
+ return -EINVAL;
+ } else if (q_num < vsi->num_txq) {
+ if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
+ &vsi->tx_rings[q_num]->q_vector->tx))
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* __ice_get_coalesce - get ITR/INTRL values for the device
* @netdev: pointer to the netdev associated with this query
* @ec: ethtool structure to fill with driver's coalesce settings
* @q_num: queue number to get the coalesce settings for
+ *
+ * If the caller passes in a negative q_num then we return coalesce settings
+ * based on queue number 0, else use the actual q_num passed in.
*/
static int
__ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
int q_num)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- int tx = -EINVAL, rx = -EINVAL;
struct ice_vsi *vsi = np->vsi;
- if (q_num < 0) {
- rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
- &vsi->rx_rings[0]->q_vector->rx);
- tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
- &vsi->tx_rings[0]->q_vector->tx);
-
- goto update_coalesced_frames;
- }
-
- if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
- rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
- &vsi->rx_rings[q_num]->q_vector->rx);
- tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
- &vsi->tx_rings[q_num]->q_vector->tx);
- } else if (q_num < vsi->num_rxq) {
- rx = ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
- &vsi->rx_rings[q_num]->q_vector->rx);
- } else if (q_num < vsi->num_txq) {
- tx = ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
- &vsi->tx_rings[q_num]->q_vector->tx);
- } else {
- /* q_num is invalid for both Rx and Tx queues */
- return -EINVAL;
- }
+ if (q_num < 0)
+ q_num = 0;
-update_coalesced_frames:
- /* either q_num is invalid for both Rx and Tx queues or setting coalesce
- * failed completely
- */
- if (tx && rx)
+ if (ice_get_q_coalesce(vsi, ec, q_num))
return -EINVAL;
if (q_num < vsi->num_txq)
@@ -2180,15 +2326,16 @@ ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
return __ice_get_coalesce(netdev, ec, -1);
}
-static int ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
- struct ethtool_coalesce *ec)
+static int
+ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
+ struct ethtool_coalesce *ec)
{
return __ice_get_coalesce(netdev, ec, q_num);
}
/**
* ice_set_rc_coalesce - set ITR values for specific ring container
- * @c_type: container type, RX or TX
+ * @c_type: container type, Rx or Tx
* @ec: ethtool structure from user to update ITR settings
* @rc: ring container that the ITR values will come from
* @vsi: VSI associated to the ring container
@@ -2213,6 +2360,23 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
switch (c_type) {
case ICE_RX_CONTAINER:
+ if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL ||
+ (ec->rx_coalesce_usecs_high &&
+ ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) {
+ netdev_info(vsi->netdev,
+ "Invalid value, rx-usecs-high valid values are 0 (disabled), %d-%d\n",
+ pf->hw.intrl_gran, ICE_MAX_INTRL);
+ return -EINVAL;
+ }
+
+ if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) {
+ rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high;
+ wr32(&pf->hw, GLINT_RATE(vsi->hw_base_vector +
+ rc->ring->q_vector->v_idx),
+ ice_intrl_usec_to_reg(ec->rx_coalesce_usecs_high,
+ pf->hw.intrl_gran));
+ }
+
if (ec->rx_coalesce_usecs != itr_setting &&
ec->use_adaptive_rx_coalesce) {
netdev_info(vsi->netdev,
@@ -2235,6 +2399,12 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
}
break;
case ICE_TX_CONTAINER:
+ if (ec->tx_coalesce_usecs_high) {
+ netdev_info(vsi->netdev,
+ "setting tx-usecs-high is not supported\n");
+ return -EINVAL;
+ }
+
if (ec->tx_coalesce_usecs != itr_setting &&
ec->use_adaptive_tx_coalesce) {
netdev_info(vsi->netdev,
@@ -2264,54 +2434,77 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
return 0;
}
+/**
+ * ice_set_q_coalesce - set a queue's ITR/INTRL (coalesce) settings
+ * @vsi: VSI associated to the queue that need updating
+ * @ec: coalesce settings to program the device with
+ * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
+ *
+ * Return 0 on success, and negative under the following conditions:
+ * 1. Setting Tx or Rx ITR/INTRL (coalesce) settings failed.
+ * 2. The q_num passed in is not a valid number/index for Tx and Rx rings.
+ */
+static int
+ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
+{
+ if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
+ if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
+ &vsi->rx_rings[q_num]->q_vector->rx,
+ vsi))
+ return -EINVAL;
+
+ if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
+ &vsi->tx_rings[q_num]->q_vector->tx,
+ vsi))
+ return -EINVAL;
+ } else if (q_num < vsi->num_rxq) {
+ if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
+ &vsi->rx_rings[q_num]->q_vector->rx,
+ vsi))
+ return -EINVAL;
+ } else if (q_num < vsi->num_txq) {
+ if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
+ &vsi->tx_rings[q_num]->q_vector->tx,
+ vsi))
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * __ice_set_coalesce - set ITR/INTRL values for the device
+ * @netdev: pointer to the netdev associated with this query
+ * @ec: ethtool structure to fill with driver's coalesce settings
+ * @q_num: queue number to get the coalesce settings for
+ *
+ * If the caller passes in a negative q_num then we set the coalesce settings
+ * for all Tx/Rx queues, else use the actual q_num passed in.
+ */
static int
__ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
int q_num)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- int rx = -EINVAL, tx = -EINVAL;
struct ice_vsi *vsi = np->vsi;
if (q_num < 0) {
int i;
ice_for_each_q_vector(vsi, i) {
- struct ice_q_vector *q_vector = vsi->q_vectors[i];
-
- if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
- &q_vector->rx, vsi) ||
- ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
- &q_vector->tx, vsi))
+ if (ice_set_q_coalesce(vsi, ec, i))
return -EINVAL;
}
-
goto set_work_lmt;
}
- if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
- rx = ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
- &vsi->rx_rings[q_num]->q_vector->rx,
- vsi);
- tx = ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
- &vsi->tx_rings[q_num]->q_vector->tx,
- vsi);
- } else if (q_num < vsi->num_rxq) {
- rx = ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
- &vsi->rx_rings[q_num]->q_vector->rx,
- vsi);
- } else if (q_num < vsi->num_txq) {
- tx = ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
- &vsi->tx_rings[q_num]->q_vector->tx,
- vsi);
- }
-
- /* either q_num is invalid for both Rx and Tx queues or setting coalesce
- * failed completely
- */
- if (rx && tx)
+ if (ice_set_q_coalesce(vsi, ec, q_num))
return -EINVAL;
set_work_lmt:
+
if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
vsi->work_lmt = max(ec->tx_max_coalesced_frames_irq,
ec->rx_max_coalesced_frames_irq);
@@ -2325,8 +2518,9 @@ ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
return __ice_set_coalesce(netdev, ec, -1);
}
-static int ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
- struct ethtool_coalesce *ec)
+static int
+ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
+ struct ethtool_coalesce *ec)
{
return __ice_set_coalesce(netdev, ec, q_num);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 6bf5cc064270..ec25f26069b0 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -49,6 +49,9 @@
#define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31)
#define PF_MBX_ATQT 0x0022E300
+#define PRTDCB_GENS 0x00083020
+#define PRTDCB_GENS_DCBX_STATUS_S 0
+#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0)
@@ -106,6 +109,16 @@
#define VPGEN_VFRTRIG_VFSWR_M BIT(0)
#define PFHMC_ERRORDATA 0x00520500
#define PFHMC_ERRORINFO 0x00520400
+#define GLINT_CTL 0x0016CC54
+#define GLINT_CTL_DIS_AUTOMASK_M BIT(0)
+#define GLINT_CTL_ITR_GRAN_200_S 16
+#define GLINT_CTL_ITR_GRAN_200_M ICE_M(0xF, 16)
+#define GLINT_CTL_ITR_GRAN_100_S 20
+#define GLINT_CTL_ITR_GRAN_100_M ICE_M(0xF, 20)
+#define GLINT_CTL_ITR_GRAN_50_S 24
+#define GLINT_CTL_ITR_GRAN_50_M ICE_M(0xF, 24)
+#define GLINT_CTL_ITR_GRAN_25_S 28
+#define GLINT_CTL_ITR_GRAN_25_M ICE_M(0xF, 28)
#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
#define GLINT_DYN_CTL_INTENA_M BIT(0)
#define GLINT_DYN_CTL_CLEARPBA_M BIT(1)
@@ -150,11 +163,15 @@
#define PFINT_OICR_ENA 0x0016C900
#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4))
#define QINT_RQCTL_MSIX_INDX_S 0
+#define QINT_RQCTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define QINT_RQCTL_ITR_INDX_S 11
+#define QINT_RQCTL_ITR_INDX_M ICE_M(0x3, 11)
#define QINT_RQCTL_CAUSE_ENA_M BIT(30)
#define QINT_TQCTL(_DBQM) (0x00140000 + ((_DBQM) * 4))
#define QINT_TQCTL_MSIX_INDX_S 0
+#define QINT_TQCTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define QINT_TQCTL_ITR_INDX_S 11
+#define QINT_TQCTL_ITR_INDX_M ICE_M(0x3, 11)
#define QINT_TQCTL_CAUSE_ENA_M BIT(30)
#define VPINT_ALLOC(_VF) (0x001D1000 + ((_VF) * 4))
#define VPINT_ALLOC_FIRST_S 0
@@ -168,6 +185,8 @@
#define VPINT_ALLOC_PCI_LAST_S 12
#define VPINT_ALLOC_PCI_LAST_M ICE_M(0x7FF, 12)
#define VPINT_ALLOC_PCI_VALID_M BIT(31)
+#define VPINT_MBX_CTL(_VSI) (0x0016A000 + ((_VSI) * 4))
+#define VPINT_MBX_CTL_CAUSE_ENA_M BIT(30)
#define GLLAN_RCTL_0 0x002941F8
#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
@@ -306,11 +325,16 @@
#define GLPRT_PTC64L(_i) (0x00380B80 + ((_i) * 8))
#define GLPRT_PTC9522H(_i) (0x00380D04 + ((_i) * 8))
#define GLPRT_PTC9522L(_i) (0x00380D00 + ((_i) * 8))
+#define GLPRT_PXOFFRXC(_i, _j) (0x00380500 + ((_i) * 8 + (_j) * 64))
+#define GLPRT_PXOFFTXC(_i, _j) (0x00380F40 + ((_i) * 8 + (_j) * 64))
+#define GLPRT_PXONRXC(_i, _j) (0x00380300 + ((_i) * 8 + (_j) * 64))
+#define GLPRT_PXONTXC(_i, _j) (0x00380D40 + ((_i) * 8 + (_j) * 64))
#define GLPRT_RFC(_i) (0x00380AC0 + ((_i) * 8))
#define GLPRT_RJC(_i) (0x00380B00 + ((_i) * 8))
#define GLPRT_RLEC(_i) (0x00380140 + ((_i) * 8))
#define GLPRT_ROC(_i) (0x00380240 + ((_i) * 8))
#define GLPRT_RUC(_i) (0x00380200 + ((_i) * 8))
+#define GLPRT_RXON2OFFCNT(_i, _j) (0x00380700 + ((_i) * 8 + (_j) * 64))
#define GLPRT_TDOLD(_i) (0x00381280 + ((_i) * 8))
#define GLPRT_UPRCH(_i) (0x00381304 + ((_i) * 8))
#define GLPRT_UPRCL(_i) (0x00381300 + ((_i) * 8))
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index ef4c79b5aa32..510a8c900e61 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -20,7 +20,7 @@ union ice_32byte_rx_desc {
} lo_dword;
union {
__le32 rss; /* RSS Hash */
- __le32 fd_id; /* Flow Director filter id */
+ __le32 fd_id; /* Flow Director filter ID */
} hi_dword;
} qword0;
struct {
@@ -99,7 +99,7 @@ enum ice_rx_ptype_payload_layer {
ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
};
-/* RX Flex Descriptor
+/* Rx Flex Descriptor
* This descriptor is used instead of the legacy version descriptor when
* ice_rlan_ctx.adv_desc is set
*/
@@ -113,7 +113,7 @@ union ice_32b_rx_flex_desc {
} read;
struct {
/* Qword 0 */
- u8 rxdid; /* descriptor builder profile id */
+ u8 rxdid; /* descriptor builder profile ID */
u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
__le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
__le16 pkt_len; /* [15:14] are reserved */
@@ -149,7 +149,7 @@ union ice_32b_rx_flex_desc {
/* Rx Flex Descriptor NIC Profile
* This descriptor corresponds to RxDID 2 which contains
- * metadata fields for RSS, flow id and timestamp info
+ * metadata fields for RSS, flow ID and timestamp info
*/
struct ice_32b_rx_flex_desc_nic {
/* Qword 0 */
@@ -208,23 +208,23 @@ enum ice_flex_rx_mdid {
ICE_RX_MDID_HASH_HIGH,
};
-/* Rx Flag64 packet flag bits */
-enum ice_rx_flg64_bits {
- ICE_RXFLG_PKT_DSI = 0,
- ICE_RXFLG_EVLAN_x8100 = 15,
- ICE_RXFLG_EVLAN_x9100,
- ICE_RXFLG_VLAN_x8100,
- ICE_RXFLG_TNL_MAC = 22,
- ICE_RXFLG_TNL_VLAN,
- ICE_RXFLG_PKT_FRG,
- ICE_RXFLG_FIN = 32,
- ICE_RXFLG_SYN,
- ICE_RXFLG_RST,
- ICE_RXFLG_TNL0 = 38,
- ICE_RXFLG_TNL1,
- ICE_RXFLG_TNL2,
- ICE_RXFLG_UDP_GRE,
- ICE_RXFLG_RSVD = 63
+/* Rx/Tx Flag64 packet flag bits */
+enum ice_flg64_bits {
+ ICE_FLG_PKT_DSI = 0,
+ ICE_FLG_EVLAN_x8100 = 15,
+ ICE_FLG_EVLAN_x9100,
+ ICE_FLG_VLAN_x8100,
+ ICE_FLG_TNL_MAC = 22,
+ ICE_FLG_TNL_VLAN,
+ ICE_FLG_PKT_FRG,
+ ICE_FLG_FIN = 32,
+ ICE_FLG_SYN,
+ ICE_FLG_RST,
+ ICE_FLG_TNL0 = 38,
+ ICE_FLG_TNL1,
+ ICE_FLG_TNL2,
+ ICE_FLG_UDP_GRE,
+ ICE_FLG_RSVD = 63
};
/* for ice_32byte_rx_flex_desc.ptype_flexi_flags0 member */
@@ -322,7 +322,7 @@ enum ice_rlan_ctx_rx_hsplit_1 {
ICE_RLAN_RX_HSPLIT_1_SPLIT_ALWAYS = 2,
};
-/* TX Descriptor */
+/* Tx Descriptor */
struct ice_tx_desc {
__le64 buf_addr; /* Address of descriptor's data buf */
__le64 cmd_type_offset_bsz;
@@ -342,12 +342,12 @@ enum ice_tx_desc_cmd_bits {
ICE_TX_DESC_CMD_EOP = 0x0001,
ICE_TX_DESC_CMD_RS = 0x0002,
ICE_TX_DESC_CMD_IL2TAG1 = 0x0008,
- ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
- ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
- ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
- ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
- ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
- ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020,
+ ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040,
+ ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060,
+ ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100,
+ ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200,
+ ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300,
};
#define ICE_TXD_QW1_OFFSET_S 16
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index fa61203bee26..fbf1eba0cc2a 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -3,6 +3,7 @@
#include "ice.h"
#include "ice_lib.h"
+#include "ice_dcb_lib.h"
/**
* ice_setup_rx_ctx - Configure a receive ring context
@@ -73,7 +74,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
QRXFLXP_CNTXT_RXDID_IDX_M;
- /* increasing context priority to pick up profile id;
+ /* increasing context priority to pick up profile ID;
* default is 0x01; setting to 0x03 to ensure profile
* is programming if prev context is of same priority
*/
@@ -124,6 +125,8 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
/* Transmit Queue Length */
tlan_ctx->qlen = ring->count;
+ ice_set_cgd_num(tlan_ctx, ring);
+
/* PF number */
tlan_ctx->pf_num = hw->pf_id;
@@ -138,7 +141,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
case ICE_VSI_VF:
- /* Firmware expects vmvf_num to be absolute VF id */
+ /* Firmware expects vmvf_num to be absolute VF ID */
tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
@@ -175,17 +178,14 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
int i;
for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
- u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));
-
- if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
- break;
+ if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
+ QRX_CTRL_QENA_STAT_M))
+ return 0;
usleep_range(20, 40);
}
- if (i >= ICE_Q_WAIT_MAX_RETRY)
- return -ETIMEDOUT;
- return 0;
+ return -ETIMEDOUT;
}
/**
@@ -197,19 +197,13 @@ static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- int i, j, ret = 0;
+ int i, ret = 0;
for (i = 0; i < vsi->num_rxq; i++) {
int pf_q = vsi->rxq_map[i];
u32 rx_reg;
- for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) {
- rx_reg = rd32(hw, QRX_CTRL(pf_q));
- if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) ==
- ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1))
- break;
- usleep_range(1000, 2000);
- }
+ rx_reg = rd32(hw, QRX_CTRL(pf_q));
/* Skip if the queue is already in the requested state */
if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
@@ -238,12 +232,11 @@ static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
/**
* ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
* @vsi: VSI pointer
- * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
*
* On error: returns error code (negative)
* On success: returns 0
*/
-static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
+static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
@@ -258,15 +251,11 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
if (!vsi->rx_rings)
goto err_rxrings;
- if (alloc_qvectors) {
- /* allocate memory for q_vector pointers */
- vsi->q_vectors = devm_kcalloc(&pf->pdev->dev,
- vsi->num_q_vectors,
- sizeof(*vsi->q_vectors),
- GFP_KERNEL);
- if (!vsi->q_vectors)
- goto err_vectors;
- }
+ /* allocate memory for q_vector pointers */
+ vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, vsi->num_q_vectors,
+ sizeof(*vsi->q_vectors), GFP_KERNEL);
+ if (!vsi->q_vectors)
+ goto err_vectors;
return 0;
@@ -279,25 +268,49 @@ err_txrings:
}
/**
- * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI
+ * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
* @vsi: the VSI being configured
+ */
+static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
+{
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
+ vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
+ break;
+ default:
+ dev_dbg(&vsi->back->pdev->dev,
+ "Not setting number of Tx/Rx descriptors for VSI type %d\n",
+ vsi->type);
+ break;
+ }
+}
+
+/**
+ * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
+ * @vsi: the VSI being configured
+ * @vf_id: ID of the VF being configured
*
* Return 0 on success and a negative value on error
*/
-static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
+static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
{
struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf = NULL;
+
+ if (vsi->type == ICE_VSI_VF)
+ vsi->vf_id = vf_id;
switch (vsi->type) {
case ICE_VSI_PF:
vsi->alloc_txq = pf->num_lan_tx;
vsi->alloc_rxq = pf->num_lan_rx;
- vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
break;
case ICE_VSI_VF:
- vsi->alloc_txq = pf->num_vf_qps;
- vsi->alloc_rxq = pf->num_vf_qps;
+ vf = &pf->vf[vsi->vf_id];
+ vsi->alloc_txq = vf->num_vf_qs;
+ vsi->alloc_rxq = vf->num_vf_qs;
/* pf->num_vf_msix includes (VF miscellaneous vector +
* data queue interrupts). Since vsi->num_q_vectors is number
* of queues vectors, subtract 1 from the original vector
@@ -306,10 +319,11 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->num_q_vectors = pf->num_vf_msix - 1;
break;
default:
- dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
- vsi->type);
+ dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
break;
}
+
+ ice_vsi_set_num_desc(vsi);
}
/**
@@ -370,16 +384,15 @@ void ice_vsi_delete(struct ice_vsi *vsi)
}
/**
- * ice_vsi_free_arrays - clean up VSI resources
+ * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
* @vsi: pointer to VSI being cleared
- * @free_qvectors: bool to specify if q_vectors should be deallocated
*/
-static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors)
+static void ice_vsi_free_arrays(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
/* free the ring and vector containers */
- if (free_qvectors && vsi->q_vectors) {
+ if (vsi->q_vectors) {
devm_kfree(&pf->pdev->dev, vsi->q_vectors);
vsi->q_vectors = NULL;
}
@@ -427,7 +440,7 @@ int ice_vsi_clear(struct ice_vsi *vsi)
if (vsi->idx < pf->next_vsi)
pf->next_vsi = vsi->idx;
- ice_vsi_free_arrays(vsi, true);
+ ice_vsi_free_arrays(vsi);
mutex_unlock(&pf->sw_mutex);
devm_kfree(&pf->pdev->dev, vsi);
@@ -455,10 +468,12 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
* ice_vsi_alloc - Allocates the next available struct VSI in the PF
* @pf: board private structure
* @type: type of VSI
+ * @vf_id: ID of the VF being configured
*
* returns a pointer to a VSI on success, NULL on failure.
*/
-static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
+static struct ice_vsi *
+ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
{
struct ice_vsi *vsi = NULL;
@@ -484,18 +499,21 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
vsi->idx = pf->next_vsi;
vsi->work_lmt = ICE_DFLT_IRQ_WORK;
- ice_vsi_set_num_qs(vsi);
+ if (type == ICE_VSI_VF)
+ ice_vsi_set_num_qs(vsi, vf_id);
+ else
+ ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
switch (vsi->type) {
case ICE_VSI_PF:
- if (ice_vsi_alloc_arrays(vsi, true))
+ if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
break;
case ICE_VSI_VF:
- if (ice_vsi_alloc_arrays(vsi, true))
+ if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
break;
default:
@@ -547,7 +565,7 @@ static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
/**
* __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
- * @qs_cfg: gathered variables needed for PF->VSI queues assignment
+ * @qs_cfg: gathered variables needed for pf->vsi queues assignment
*
* Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
*/
@@ -579,11 +597,10 @@ err_scatter:
/**
* __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
- * @qs_cfg: gathered variables needed for PF->VSI queues assignment
+ * @qs_cfg: gathered variables needed for pf->vsi queues assignment
*
- * This is an internal function for assigning queues from the PF to VSI and
- * initially tries to find contiguous space. If it is not successful to find
- * contiguous space, then it tries with the scatter approach.
+ * This function first tries to find contiguous space. If it is not successful,
+ * it tries with the scatter approach.
*
* Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
*/
@@ -827,7 +844,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
/* find the (rounded up) power-of-2 of qcount */
pow = order_base_2(qcount_rx);
- for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ ice_for_each_traffic_class(i) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
/* TC is not enabled */
vsi->tc_cfg.tc_info[i].qoffset = 0;
@@ -852,7 +869,18 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
tx_count += tx_numq_tc;
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
}
- vsi->num_rxq = offset;
+
+ /* if offset is non-zero, means it is calculated correctly based on
+ * enabled TCs for a given VSI otherwise qcount_rx will always
+ * be correct and non-zero because it is based off - VSI's
+ * allocated Rx queues which is at least 1 (hence qcount_tx will be
+ * at least 1)
+ */
+ if (offset)
+ vsi->num_rxq = offset;
+ else
+ vsi->num_rxq = qcount_rx;
+
vsi->num_txq = tx_count;
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
@@ -881,6 +909,9 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
{
u8 lut_type, hash_type;
+ struct ice_pf *pf;
+
+ pf = vsi->back;
switch (vsi->type) {
case ICE_VSI_PF:
@@ -894,8 +925,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
default:
- dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
- vsi->type);
+ dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
return;
}
@@ -923,6 +953,7 @@ static int ice_vsi_init(struct ice_vsi *vsi)
if (!ctxt)
return -ENOMEM;
+ ctxt->info = vsi->info;
switch (vsi->type) {
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
@@ -948,6 +979,14 @@ static int ice_vsi_init(struct ice_vsi *vsi)
ctxt->info.sw_id = vsi->port_info->sw_id;
ice_vsi_setup_q_map(vsi, ctxt);
+ /* Enable MAC Antispoof with new VSI being initialized or updated */
+ if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) {
+ ctxt->info.valid_sections |=
+ cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+ ctxt->info.sec_flags |=
+ ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
+ }
+
ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) {
dev_err(&pf->pdev->dev,
@@ -973,10 +1012,11 @@ static int ice_vsi_init(struct ice_vsi *vsi)
static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
{
struct ice_q_vector *q_vector;
+ struct ice_pf *pf = vsi->back;
struct ice_ring *ring;
if (!vsi->q_vectors[v_idx]) {
- dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n",
+ dev_dbg(&pf->pdev->dev, "Queue vector at index %d not found\n",
v_idx);
return;
}
@@ -991,7 +1031,7 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
if (vsi->netdev)
netif_napi_del(&q_vector->napi);
- devm_kfree(&vsi->back->pdev->dev, q_vector);
+ devm_kfree(&pf->pdev->dev, q_vector);
vsi->q_vectors[v_idx] = NULL;
}
@@ -1003,7 +1043,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
{
int v_idx;
- for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+ ice_for_each_q_vector(vsi, v_idx)
ice_free_q_vector(vsi, v_idx);
}
@@ -1143,8 +1183,7 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
num_q_vectors, vsi->idx);
break;
default:
- dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
- vsi->type);
+ dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
break;
}
@@ -1153,7 +1192,7 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
"Failed to get tracking for %d HW vectors for VSI %d, err=%d\n",
num_q_vectors, vsi->vsi_num, vsi->hw_base_vector);
if (vsi->type != ICE_VSI_VF) {
- ice_free_res(vsi->back->sw_irq_tracker,
+ ice_free_res(pf->sw_irq_tracker,
vsi->sw_base_vector, vsi->idx);
pf->num_avail_sw_msix += num_q_vectors;
}
@@ -1215,7 +1254,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->ring_active = false;
ring->vsi = vsi;
ring->dev = &pf->pdev->dev;
- ring->count = vsi->num_desc;
+ ring->count = vsi->num_tx_desc;
vsi->tx_rings[i] = ring;
}
@@ -1234,7 +1273,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->vsi = vsi;
ring->netdev = vsi->netdev;
ring->dev = &pf->pdev->dev;
- ring->count = vsi->num_desc;
+ ring->count = vsi->num_rx_desc;
vsi->rx_rings[i] = ring;
}
@@ -1253,7 +1292,11 @@ err_out:
* through the MSI-X enabling code. On a constrained vector budget, we map Tx
* and Rx rings to the vector as "efficiently" as possible.
*/
+#ifdef CONFIG_DCB
+void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
+#else
static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
+#endif /* CONFIG_DCB */
{
int q_vectors = vsi->num_q_vectors;
int tx_rings_rem, rx_rings_rem;
@@ -1339,7 +1382,6 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
*/
static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
{
- u8 seed[ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE];
struct ice_aqc_get_set_rss_keys *key;
struct ice_pf *pf = vsi->back;
enum ice_status status;
@@ -1361,31 +1403,30 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
vsi->rss_table_size);
if (status) {
- dev_err(&vsi->back->pdev->dev,
+ dev_err(&pf->pdev->dev,
"set_rss_lut failed, error %d\n", status);
err = -EIO;
goto ice_vsi_cfg_rss_exit;
}
- key = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*key), GFP_KERNEL);
+ key = devm_kzalloc(&pf->pdev->dev, sizeof(*key), GFP_KERNEL);
if (!key) {
err = -ENOMEM;
goto ice_vsi_cfg_rss_exit;
}
if (vsi->rss_hkey_user)
- memcpy(seed, vsi->rss_hkey_user,
- ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
+ memcpy(key,
+ (struct ice_aqc_get_set_rss_keys *)vsi->rss_hkey_user,
+ ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
else
- netdev_rss_key_fill((void *)seed,
- ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
- memcpy(&key->standard_rss_key, seed,
- ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE);
+ netdev_rss_key_fill((void *)key,
+ ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
if (status) {
- dev_err(&vsi->back->pdev->dev, "set_rss_key failed, error %d\n",
+ dev_err(&pf->pdev->dev, "set_rss_key failed, error %d\n",
status);
err = -EIO;
}
@@ -1397,12 +1438,12 @@ ice_vsi_cfg_rss_exit:
}
/**
- * ice_add_mac_to_list - Add a mac address filter entry to the list
+ * ice_add_mac_to_list - Add a MAC address filter entry to the list
* @vsi: the VSI to be forwarded to
* @add_list: pointer to the list which contains MAC filter entries
* @macaddr: the MAC address to be added.
*
- * Adds mac address filter entry to the temp list
+ * Adds MAC address filter entry to the temp list
*
* Returns 0 on success or ENOMEM on failure.
*/
@@ -1504,7 +1545,7 @@ void ice_free_fltr_list(struct device *dev, struct list_head *h)
/**
* ice_vsi_add_vlan - Add VSI membership for given VLAN
* @vsi: the VSI being configured
- * @vid: VLAN id to be added
+ * @vid: VLAN ID to be added
*/
int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
{
@@ -1542,7 +1583,7 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
/**
* ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
* @vsi: the VSI being configured
- * @vid: VLAN id to be removed
+ * @vid: VLAN ID to be removed
*
* Returns 0 on success and negative on failure
*/
@@ -1551,7 +1592,8 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
struct ice_fltr_list_entry *list;
struct ice_pf *pf = vsi->back;
LIST_HEAD(tmp_add_list);
- int status = 0;
+ enum ice_status status;
+ int err = 0;
list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
if (!list)
@@ -1567,14 +1609,20 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
INIT_LIST_HEAD(&list->list_entry);
list_add(&list->list_entry, &tmp_add_list);
- if (ice_remove_vlan(&pf->hw, &tmp_add_list)) {
- dev_err(&pf->pdev->dev, "Error removing VLAN %d on vsi %i\n",
- vid, vsi->vsi_num);
- status = -EIO;
+ status = ice_remove_vlan(&pf->hw, &tmp_add_list);
+ if (status == ICE_ERR_DOES_NOT_EXIST) {
+ dev_dbg(&pf->pdev->dev,
+ "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
+ vid, vsi->vsi_num, status);
+ } else if (status) {
+ dev_err(&pf->pdev->dev,
+ "Error removing VLAN %d on vsi %i error: %d\n",
+ vid, vsi->vsi_num, status);
+ err = -EIO;
}
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
- return status;
+ return err;
}
/**
@@ -1586,7 +1634,6 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
*/
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
{
- int err = 0;
u16 i;
if (vsi->type == ICE_VSI_VF)
@@ -1601,14 +1648,19 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
vsi->rx_buf_len = ICE_RXBUF_2048;
setup_rings:
/* set up individual rings */
- for (i = 0; i < vsi->num_rxq && !err; i++)
- err = ice_setup_rx_ctx(vsi->rx_rings[i]);
+ for (i = 0; i < vsi->num_rxq; i++) {
+ int err;
- if (err) {
- dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n");
- return -EIO;
+ err = ice_setup_rx_ctx(vsi->rx_rings[i]);
+ if (err) {
+ dev_err(&vsi->back->pdev->dev,
+ "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
+ i, err);
+ return err;
+ }
}
- return err;
+
+ return 0;
}
/**
@@ -1640,7 +1692,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
num_q_grps = 1;
/* set up and configure the Tx queues for each enabled TC */
- for (tc = 0; tc < ICE_MAX_TRAFFIC_CLASS; tc++) {
+ ice_for_each_traffic_class(tc) {
if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
break;
@@ -1660,10 +1712,10 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
rings[q_idx]->tail =
pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
- num_q_grps, qg_buf, buf_len,
- NULL);
+ i, num_q_grps, qg_buf,
+ buf_len, NULL);
if (status) {
- dev_err(&vsi->back->pdev->dev,
+ dev_err(&pf->pdev->dev,
"Failed to set LAN Tx queue context, error: %d\n",
status);
err = -ENODEV;
@@ -1707,7 +1759,7 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
* This function converts a decimal interrupt rate limit in usecs to the format
* expected by firmware.
*/
-static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
+u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
{
u32 val = intrl / gran;
@@ -1717,17 +1769,49 @@ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
}
/**
+ * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
+ * @hw: board specific structure
+ */
+static void ice_cfg_itr_gran(struct ice_hw *hw)
+{
+ u32 regval = rd32(hw, GLINT_CTL);
+
+ /* no need to update global register if ITR gran is already set */
+ if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
+ (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
+ GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
+ GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
+ GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
+ GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
+ return;
+
+ regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
+ GLINT_CTL_ITR_GRAN_200_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
+ GLINT_CTL_ITR_GRAN_100_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
+ GLINT_CTL_ITR_GRAN_50_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
+ GLINT_CTL_ITR_GRAN_25_M);
+ wr32(hw, GLINT_CTL, regval);
+}
+
+/**
* ice_cfg_itr - configure the initial interrupt throttle values
* @hw: pointer to the HW structure
* @q_vector: interrupt vector that's being configured
- * @vector: HW vector index to apply the interrupt throttling to
*
* Configure interrupt throttling values for the ring containers that are
* associated with the interrupt vector passed in.
*/
static void
-ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
+ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
{
+ ice_cfg_itr_gran(hw);
+
if (q_vector->num_ring_rx) {
struct ice_ring_container *rc = &q_vector->rx;
@@ -1738,8 +1822,7 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
rc->target_itr = ITR_TO_REG(rc->itr_setting);
rc->next_update = jiffies + 1;
rc->current_itr = rc->target_itr;
- rc->latency_range = ICE_LOW_LATENCY;
- wr32(hw, GLINT_ITR(rc->itr_idx, vector),
+ wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
}
@@ -1753,8 +1836,7 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
rc->target_itr = ITR_TO_REG(rc->itr_setting);
rc->next_update = jiffies + 1;
rc->current_itr = rc->target_itr;
- rc->latency_range = ICE_LOW_LATENCY;
- wr32(hw, GLINT_ITR(rc->itr_idx, vector),
+ wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
}
}
@@ -1766,17 +1848,17 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
void ice_vsi_cfg_msix(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
- u16 vector = vsi->hw_base_vector;
struct ice_hw *hw = &pf->hw;
u32 txq = 0, rxq = 0;
int i, q;
- for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
+ for (i = 0; i < vsi->num_q_vectors; i++) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
+ u16 reg_idx = q_vector->reg_idx;
- ice_cfg_itr(hw, q_vector, vector);
+ ice_cfg_itr(hw, q_vector);
- wr32(hw, GLINT_RATE(vector),
+ wr32(hw, GLINT_RATE(reg_idx),
ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
/* Both Transmit Queue Interrupt Cause Control register
@@ -1791,33 +1873,37 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
* tracked for this PF.
*/
for (q = 0; q < q_vector->num_ring_tx; q++) {
- int itr_idx = q_vector->tx.itr_idx;
+ int itr_idx = (q_vector->tx.itr_idx <<
+ QINT_TQCTL_ITR_INDX_S) &
+ QINT_TQCTL_ITR_INDX_M;
u32 val;
if (vsi->type == ICE_VSI_VF)
- val = QINT_TQCTL_CAUSE_ENA_M |
- (itr_idx << QINT_TQCTL_ITR_INDX_S) |
- ((i + 1) << QINT_TQCTL_MSIX_INDX_S);
+ val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
+ (((i + 1) << QINT_TQCTL_MSIX_INDX_S) &
+ QINT_TQCTL_MSIX_INDX_M);
else
- val = QINT_TQCTL_CAUSE_ENA_M |
- (itr_idx << QINT_TQCTL_ITR_INDX_S) |
- (vector << QINT_TQCTL_MSIX_INDX_S);
+ val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
+ ((reg_idx << QINT_TQCTL_MSIX_INDX_S) &
+ QINT_TQCTL_MSIX_INDX_M);
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
txq++;
}
for (q = 0; q < q_vector->num_ring_rx; q++) {
- int itr_idx = q_vector->rx.itr_idx;
+ int itr_idx = (q_vector->rx.itr_idx <<
+ QINT_RQCTL_ITR_INDX_S) &
+ QINT_RQCTL_ITR_INDX_M;
u32 val;
if (vsi->type == ICE_VSI_VF)
- val = QINT_RQCTL_CAUSE_ENA_M |
- (itr_idx << QINT_RQCTL_ITR_INDX_S) |
- ((i + 1) << QINT_RQCTL_MSIX_INDX_S);
+ val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
+ (((i + 1) << QINT_RQCTL_MSIX_INDX_S) &
+ QINT_RQCTL_MSIX_INDX_M);
else
- val = QINT_RQCTL_CAUSE_ENA_M |
- (itr_idx << QINT_RQCTL_ITR_INDX_S) |
- (vector << QINT_RQCTL_MSIX_INDX_S);
+ val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
+ ((reg_idx << QINT_RQCTL_MSIX_INDX_S) &
+ QINT_RQCTL_MSIX_INDX_M);
wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
rxq++;
}
@@ -1848,6 +1934,10 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
*/
ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
+ /* Preserve existing VLAN strip setting */
+ ctxt->info.vlan_flags |= (vsi->info.vlan_flags &
+ ICE_AQ_VSI_VLAN_EMOD_M);
+
ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
@@ -1937,7 +2027,7 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
* ice_vsi_stop_tx_rings - Disable Tx rings
* @vsi: the VSI being configured
* @rst_src: reset source
- * @rel_vmvf_num: Relative id of VF/VM
+ * @rel_vmvf_num: Relative ID of VF/VM
* @rings: Tx ring array to be stopped
* @offset: offset within vsi->txq_map
*/
@@ -1947,10 +2037,10 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
+ int tc, q_idx = 0, err = 0;
+ u16 *q_ids, *q_handles, i;
enum ice_status status;
u32 *q_teids, val;
- u16 *q_ids, i;
- int err = 0;
if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
return -EINVAL;
@@ -1967,50 +2057,69 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
goto err_alloc_q_ids;
}
- /* set up the Tx queue list to be disabled */
- ice_for_each_txq(vsi, i) {
- u16 v_idx;
+ q_handles = devm_kcalloc(&pf->pdev->dev, vsi->num_txq,
+ sizeof(*q_handles), GFP_KERNEL);
+ if (!q_handles) {
+ err = -ENOMEM;
+ goto err_alloc_q_handles;
+ }
- if (!rings || !rings[i] || !rings[i]->q_vector) {
- err = -EINVAL;
- goto err_out;
- }
+ /* set up the Tx queue list to be disabled for each enabled TC */
+ ice_for_each_traffic_class(tc) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
+ break;
- q_ids[i] = vsi->txq_map[i + offset];
- q_teids[i] = rings[i]->txq_teid;
+ for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
+ if (!rings || !rings[q_idx] ||
+ !rings[q_idx]->q_vector) {
+ err = -EINVAL;
+ goto err_out;
+ }
- /* clear cause_ena bit for disabled queues */
- val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx));
- val &= ~QINT_TQCTL_CAUSE_ENA_M;
- wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val);
+ q_ids[i] = vsi->txq_map[q_idx + offset];
+ q_teids[i] = rings[q_idx]->txq_teid;
+ q_handles[i] = i;
- /* software is expected to wait for 100 ns */
- ndelay(100);
+ /* clear cause_ena bit for disabled queues */
+ val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx));
+ val &= ~QINT_TQCTL_CAUSE_ENA_M;
+ wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val);
- /* trigger a software interrupt for the vector associated to
- * the queue to schedule NAPI handler
+ /* software is expected to wait for 100 ns */
+ ndelay(100);
+
+ /* trigger a software interrupt for the vector
+ * associated to the queue to schedule NAPI handler
+ */
+ wr32(hw, GLINT_DYN_CTL(rings[i]->q_vector->reg_idx),
+ GLINT_DYN_CTL_SWINT_TRIG_M |
+ GLINT_DYN_CTL_INTENA_MSK_M);
+ q_idx++;
+ }
+ status = ice_dis_vsi_txq(vsi->port_info, vsi->idx, tc,
+ vsi->num_txq, q_handles, q_ids,
+ q_teids, rst_src, rel_vmvf_num, NULL);
+
+ /* if the disable queue command was exercised during an active
+ * reset flow, ICE_ERR_RESET_ONGOING is returned. This is not
+ * an error as the reset operation disables queues at the
+ * hardware level anyway.
*/
- v_idx = rings[i]->q_vector->v_idx;
- wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx),
- GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
- }
- status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids,
- rst_src, rel_vmvf_num, NULL);
- /* if the disable queue command was exercised during an active reset
- * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as
- * the reset operation disables queues at the hardware level anyway.
- */
- if (status == ICE_ERR_RESET_ONGOING) {
- dev_info(&pf->pdev->dev,
- "Reset in progress. LAN Tx queues already disabled\n");
- } else if (status) {
- dev_err(&pf->pdev->dev,
- "Failed to disable LAN Tx queues, error: %d\n",
- status);
- err = -ENODEV;
+ if (status == ICE_ERR_RESET_ONGOING) {
+ dev_dbg(&pf->pdev->dev,
+ "Reset in progress. LAN Tx queues already disabled\n");
+ } else if (status) {
+ dev_err(&pf->pdev->dev,
+ "Failed to disable LAN Tx queues, error: %d\n",
+ status);
+ err = -ENODEV;
+ }
}
err_out:
+ devm_kfree(&pf->pdev->dev, q_handles);
+
+err_alloc_q_handles:
devm_kfree(&pf->pdev->dev, q_ids);
err_alloc_q_ids:
@@ -2023,10 +2132,11 @@ err_alloc_q_ids:
* ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
* @vsi: the VSI being configured
* @rst_src: reset source
- * @rel_vmvf_num: Relative id of VF/VM
+ * @rel_vmvf_num: Relative ID of VF/VM
*/
-int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi,
- enum ice_disq_rst_src rst_src, u16 rel_vmvf_num)
+int
+ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num)
{
return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings,
0);
@@ -2036,19 +2146,22 @@ int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi,
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on
* @ena: set to true to enable VLAN pruning and false to disable it
+ * @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode
*
* returns 0 if VSI is updated, negative otherwise
*/
-int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
+int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
{
struct ice_vsi_ctx *ctxt;
struct device *dev;
+ struct ice_pf *pf;
int status;
if (!vsi)
return -EINVAL;
- dev = &vsi->back->pdev->dev;
+ pf = vsi->back;
+ dev = &pf->pdev->dev;
ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
@@ -2067,14 +2180,16 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
}
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
- ICE_AQ_VSI_PROP_SW_VALID);
+ if (!vlan_promisc)
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
- status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL);
+ status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
if (status) {
netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
- vsi->back->hw.adminq.sq_last_status);
+ pf->hw.adminq.sq_last_status);
goto err_out;
}
@@ -2089,12 +2204,98 @@ err_out:
return -EIO;
}
+static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
+{
+ struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg;
+
+ vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
+ vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
+}
+
+/**
+ * ice_vsi_set_q_vectors_reg_idx - set the HW register index for all q_vectors
+ * @vsi: VSI to set the q_vectors register index on
+ */
+static int
+ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
+{
+ u16 i;
+
+ if (!vsi || !vsi->q_vectors)
+ return -EINVAL;
+
+ ice_for_each_q_vector(vsi, i) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
+ if (!q_vector) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to set reg_idx on q_vector %d VSI %d\n",
+ i, vsi->vsi_num);
+ goto clear_reg_idx;
+ }
+
+ q_vector->reg_idx = q_vector->v_idx + vsi->hw_base_vector;
+ }
+
+ return 0;
+
+clear_reg_idx:
+ ice_for_each_q_vector(vsi, i) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
+ if (q_vector)
+ q_vector->reg_idx = 0;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * ice_vsi_add_rem_eth_mac - Program VSI ethertype based filter with rule
+ * @vsi: the VSI being configured
+ * @add_rule: boolean value to add or remove ethertype filter rule
+ */
+static void
+ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
+{
+ struct ice_fltr_list_entry *list;
+ struct ice_pf *pf = vsi->back;
+ LIST_HEAD(tmp_add_list);
+ enum ice_status status;
+
+ list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
+ if (!list)
+ return;
+
+ list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
+ list->fltr_info.fltr_act = ICE_DROP_PACKET;
+ list->fltr_info.flag = ICE_FLTR_TX;
+ list->fltr_info.src_id = ICE_SRC_ID_VSI;
+ list->fltr_info.vsi_handle = vsi->idx;
+ list->fltr_info.l_data.ethertype_mac.ethertype = vsi->ethtype;
+
+ INIT_LIST_HEAD(&list->list_entry);
+ list_add(&list->list_entry, &tmp_add_list);
+
+ if (add_rule)
+ status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
+ else
+ status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
+
+ if (status)
+ dev_err(&pf->pdev->dev,
+ "Failure Adding or Removing Ethertype on VSI %i error: %d\n",
+ vsi->vsi_num, status);
+
+ ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
+}
+
/**
* ice_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @pi: pointer to the port_info instance
* @type: VSI type
- * @vf_id: defines VF id to which this VSI connects. This field is meant to be
+ * @vf_id: defines VF ID to which this VSI connects. This field is meant to be
* used only for ICE_VSI_VF VSI type. For other VSI types, should
* fill-in ICE_INVAL_VFID as input.
*
@@ -2112,7 +2313,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_vsi *vsi;
int ret, i;
- vsi = ice_vsi_alloc(pf, type);
+ if (type == ICE_VSI_VF)
+ vsi = ice_vsi_alloc(pf, type, vf_id);
+ else
+ vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID);
+
if (!vsi) {
dev_err(dev, "could not allocate VSI\n");
return NULL;
@@ -2120,6 +2325,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
vsi->port_info = pi;
vsi->vsw = pf->first_sw;
+ if (vsi->type == ICE_VSI_PF)
+ vsi->ethtype = ETH_P_PAUSE;
+
if (vsi->type == ICE_VSI_VF)
vsi->vf_id = vf_id;
@@ -2132,7 +2340,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
/* set RSS capabilities */
ice_vsi_set_rss_params(vsi);
- /* set tc configuration */
+ /* set TC configuration */
ice_vsi_set_tc_cfg(vsi);
/* create the VSI */
@@ -2150,6 +2358,10 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
if (ret)
goto unroll_alloc_q_vector;
+ ret = ice_vsi_set_q_vectors_reg_idx(vsi);
+ if (ret)
+ goto unroll_vector_base;
+
ret = ice_vsi_alloc_rings(vsi);
if (ret)
goto unroll_vector_base;
@@ -2188,6 +2400,10 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
} else {
vsi->hw_base_vector = pf->vf[vf_id].first_vector_idx;
}
+ ret = ice_vsi_set_q_vectors_reg_idx(vsi);
+ if (ret)
+ goto unroll_vector_base;
+
pf->q_left_tx -= vsi->alloc_txq;
pf->q_left_rx -= vsi->alloc_rxq;
break;
@@ -2203,18 +2419,29 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (ret) {
- dev_info(&pf->pdev->dev, "Failed VSI lan queue config\n");
+ dev_err(&pf->pdev->dev,
+ "VSI %d failed lan queue config, error %d\n",
+ vsi->vsi_num, ret);
goto unroll_vector_base;
}
+ /* Add switch rule to drop all Tx Flow Control Frames, of look up
+ * type ETHERTYPE from VSIs, and restrict malicious VF from sending
+ * out PAUSE or PFC frames. If enabled, FW can still send FC frames.
+ * The rule is added once for PF VSI in order to create appropriate
+ * recipe, since VSI/VSI list is ignored with drop action...
+ */
+ if (vsi->type == ICE_VSI_PF)
+ ice_vsi_add_rem_eth_mac(vsi, true);
+
return vsi;
unroll_vector_base:
/* reclaim SW interrupts back to the common pool */
- ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
+ ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
pf->num_avail_sw_msix += vsi->num_q_vectors;
/* reclaim HW interrupt back to the common pool */
- ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
+ ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
pf->num_avail_hw_msix += vsi->num_q_vectors;
unroll_alloc_q_vector:
ice_vsi_free_q_vectors(vsi);
@@ -2281,7 +2508,7 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
return;
vsi->irqs_ready = false;
- for (i = 0; i < vsi->num_q_vectors; i++) {
+ ice_for_each_q_vector(vsi, i) {
u16 vector = i + base;
int irq_num;
@@ -2500,12 +2727,12 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
/* disable each interrupt */
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
- for (i = vsi->hw_base_vector;
- i < (vsi->num_q_vectors + vsi->hw_base_vector); i++)
- wr32(hw, GLINT_DYN_CTL(i), 0);
+ ice_for_each_q_vector(vsi, i)
+ wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
ice_flush(hw);
- for (i = 0; i < vsi->num_q_vectors; i++)
+
+ ice_for_each_q_vector(vsi, i)
synchronize_irq(pf->msix_entries[i + base].vector);
}
}
@@ -2551,22 +2778,23 @@ int ice_vsi_release(struct ice_vsi *vsi)
/* reclaim interrupt vectors back to PF */
if (vsi->type != ICE_VSI_VF) {
/* reclaim SW interrupts back to the common pool */
- ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector,
- vsi->idx);
+ ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
pf->num_avail_sw_msix += vsi->num_q_vectors;
/* reclaim HW interrupts back to the common pool */
- ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector,
- vsi->idx);
+ ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
pf->num_avail_hw_msix += vsi->num_q_vectors;
} else if (test_bit(ICE_VF_STATE_CFG_INTR, vf->vf_states)) {
/* Reclaim VF resources back only while freeing all VFs or
* vector reassignment is requested
*/
- ice_free_res(vsi->back->hw_irq_tracker, vf->first_vector_idx,
+ ice_free_res(pf->hw_irq_tracker, vf->first_vector_idx,
vsi->idx);
pf->num_avail_hw_msix += pf->num_vf_msix;
}
+ if (vsi->type == ICE_VSI_PF)
+ ice_vsi_add_rem_eth_mac(vsi, false);
+
ice_remove_vsi_fltr(&pf->hw, vsi->idx);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_delete(vsi);
@@ -2596,6 +2824,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
int ice_vsi_rebuild(struct ice_vsi *vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_vf *vf = NULL;
struct ice_pf *pf;
int ret, i;
@@ -2603,16 +2832,38 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
return -EINVAL;
pf = vsi->back;
+ if (vsi->type == ICE_VSI_VF)
+ vf = &pf->vf[vsi->vf_id];
+
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_free_q_vectors(vsi);
- ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
- ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
- vsi->sw_base_vector = 0;
+
+ if (vsi->type != ICE_VSI_VF) {
+ /* reclaim SW interrupts back to the common pool */
+ ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+ vsi->sw_base_vector = 0;
+ /* reclaim HW interrupts back to the common pool */
+ ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector,
+ vsi->idx);
+ pf->num_avail_hw_msix += vsi->num_q_vectors;
+ } else {
+ /* Reclaim VF resources back to the common pool for reset and
+ * and rebuild, with vector reassignment
+ */
+ ice_free_res(pf->hw_irq_tracker, vf->first_vector_idx,
+ vsi->idx);
+ pf->num_avail_hw_msix += pf->num_vf_msix;
+ }
vsi->hw_base_vector = 0;
+
ice_vsi_clear_rings(vsi);
- ice_vsi_free_arrays(vsi, false);
- ice_dev_onetime_setup(&vsi->back->hw);
- ice_vsi_set_num_qs(vsi);
+ ice_vsi_free_arrays(vsi);
+ ice_dev_onetime_setup(&pf->hw);
+ if (vsi->type == ICE_VSI_VF)
+ ice_vsi_set_num_qs(vsi, vf->vf_id);
+ else
+ ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
ice_vsi_set_tc_cfg(vsi);
/* Initialize VSI struct elements and create VSI in FW */
@@ -2620,7 +2871,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret < 0)
goto err_vsi;
- ret = ice_vsi_alloc_arrays(vsi, false);
+ ret = ice_vsi_alloc_arrays(vsi);
if (ret < 0)
goto err_vsi;
@@ -2634,6 +2885,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret)
goto err_vectors;
+ ret = ice_vsi_set_q_vectors_reg_idx(vsi);
+ if (ret)
+ goto err_vectors;
+
ret = ice_vsi_alloc_rings(vsi);
if (ret)
goto err_vectors;
@@ -2643,7 +2898,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
* receive traffic on first queue. Hence no need to capture
* return value
*/
- if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags))
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_vsi_cfg_rss_lut_key(vsi);
break;
case ICE_VSI_VF:
@@ -2655,12 +2910,16 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret)
goto err_vectors;
+ ret = ice_vsi_set_q_vectors_reg_idx(vsi);
+ if (ret)
+ goto err_vectors;
+
ret = ice_vsi_alloc_rings(vsi);
if (ret)
goto err_vectors;
- vsi->back->q_left_tx -= vsi->alloc_txq;
- vsi->back->q_left_rx -= vsi->alloc_rxq;
+ pf->q_left_tx -= vsi->alloc_txq;
+ pf->q_left_rx -= vsi->alloc_rxq;
break;
default:
break;
@@ -2673,8 +2932,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (ret) {
- dev_info(&vsi->back->pdev->dev,
- "Failed VSI lan queue config\n");
+ dev_err(&pf->pdev->dev,
+ "VSI %d failed lan queue config, error %d\n",
+ vsi->vsi_num, ret);
goto err_vectors;
}
return 0;
@@ -2690,7 +2950,7 @@ err_rings:
}
err_vsi:
ice_vsi_clear(vsi);
- set_bit(__ICE_RESET_FAILED, vsi->back->state);
+ set_bit(__ICE_RESET_FAILED, pf->state);
return ret;
}
@@ -2705,3 +2965,125 @@ bool ice_is_reset_in_progress(unsigned long *state)
test_bit(__ICE_CORER_REQ, state) ||
test_bit(__ICE_GLOBR_REQ, state);
}
+
+#ifdef CONFIG_DCB
+/**
+ * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
+ * @vsi: VSI being configured
+ * @ctx: the context buffer returned from AQ VSI update command
+ */
+static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
+{
+ vsi->info.mapping_flags = ctx->info.mapping_flags;
+ memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
+ sizeof(vsi->info.q_mapping));
+ memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
+ sizeof(vsi->info.tc_mapping));
+}
+
+/**
+ * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
+ * @vsi: the VSI being configured
+ * @ena_tc: TC map to be enabled
+ */
+static void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
+{
+ struct net_device *netdev = vsi->netdev;
+ struct ice_pf *pf = vsi->back;
+ struct ice_dcbx_cfg *dcbcfg;
+ u8 netdev_tc;
+ int i;
+
+ if (!netdev)
+ return;
+
+ if (!ena_tc) {
+ netdev_reset_tc(netdev);
+ return;
+ }
+
+ if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
+ return;
+
+ dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
+
+ ice_for_each_traffic_class(i)
+ if (vsi->tc_cfg.ena_tc & BIT(i))
+ netdev_set_tc_queue(netdev,
+ vsi->tc_cfg.tc_info[i].netdev_tc,
+ vsi->tc_cfg.tc_info[i].qcount_tx,
+ vsi->tc_cfg.tc_info[i].qoffset);
+
+ for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
+ u8 ets_tc = dcbcfg->etscfg.prio_table[i];
+
+ /* Get the mapped netdev TC# for the UP */
+ netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
+ netdev_set_prio_tc_map(netdev, i, netdev_tc);
+ }
+}
+
+/**
+ * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
+ * @vsi: VSI to be configured
+ * @ena_tc: TC bitmap
+ *
+ * VSI queues expected to be quiesced before calling this function
+ */
+int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
+{
+ u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_vsi_ctx *ctx;
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ int i, ret = 0;
+ u8 num_tc = 0;
+
+ ice_for_each_traffic_class(i) {
+ /* build bitmap of enabled TCs */
+ if (ena_tc & BIT(i))
+ num_tc++;
+ /* populate max_txqs per TC */
+ max_txqs[i] = pf->num_lan_tx;
+ }
+
+ vsi->tc_cfg.ena_tc = ena_tc;
+ vsi->tc_cfg.numtc = num_tc;
+
+ ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->vf_num = 0;
+ ctx->info = vsi->info;
+
+ ice_vsi_setup_q_map(vsi, ctx);
+
+ /* must to indicate which section of VSI context are being modified */
+ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
+ status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
+ if (status) {
+ dev_info(&pf->pdev->dev, "Failed VSI Update\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+
+ if (status) {
+ dev_err(&pf->pdev->dev,
+ "VSI %d failed TC config, error %d\n",
+ vsi->vsi_num, status);
+ ret = -EIO;
+ goto out;
+ }
+ ice_vsi_update_q_map(vsi, ctx);
+ vsi->info.valid_sections = 0;
+
+ ice_vsi_cfg_netdev_tc(vsi, ena_tc);
+out:
+ devm_kfree(&pf->pdev->dev, ctx);
+ return ret;
+}
+#endif /* CONFIG_DCB */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 7988a53729a9..a91d3553cc89 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -35,12 +35,16 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num);
-int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
+int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc);
void ice_vsi_delete(struct ice_vsi *vsi);
int ice_vsi_clear(struct ice_vsi *vsi);
+#ifdef CONFIG_DCB
+int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
+#endif /* CONFIG_DCB */
+
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
enum ice_vsi_type type, u16 vf_id);
@@ -62,6 +66,10 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
void ice_vsi_put_qs(struct ice_vsi *vsi);
+#ifdef CONFIG_DCB
+void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
+#endif /* CONFIG_DCB */
+
void ice_vsi_dis_irq(struct ice_vsi *vsi);
void ice_vsi_free_irq(struct ice_vsi *vsi);
@@ -70,8 +78,7 @@ void ice_vsi_free_rx_rings(struct ice_vsi *vsi);
void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
-int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
-
int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
+u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 47cc3f905b7f..7843abf4d44d 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -7,8 +7,9 @@
#include "ice.h"
#include "ice_lib.h"
+#include "ice_dcb_lib.h"
-#define DRV_VERSION "0.7.2-k"
+#define DRV_VERSION "0.7.4-k"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
const char ice_drv_ver[] = DRV_VERSION;
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -30,7 +31,6 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
static struct workqueue_struct *ice_wq;
static const struct net_device_ops ice_netdev_ops;
-static void ice_pf_dis_all_vsi(struct ice_pf *pf);
static void ice_rebuild(struct ice_pf *pf);
static void ice_vsi_release_all(struct ice_pf *pf);
@@ -113,14 +113,14 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
}
/**
- * ice_add_mac_to_sync_list - creates list of mac addresses to be synced
+ * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
* @netdev: the net device on which the sync is happening
- * @addr: mac address to sync
+ * @addr: MAC address to sync
*
* This is a callback function which is called by the in kernel device sync
* functions (like __dev_uc_sync, __dev_mc_sync, etc). This function only
* populates the tmp_sync_list, which is later used by ice_add_mac to add the
- * mac filters from the hardware.
+ * MAC filters from the hardware.
*/
static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
{
@@ -134,14 +134,14 @@ static int ice_add_mac_to_sync_list(struct net_device *netdev, const u8 *addr)
}
/**
- * ice_add_mac_to_unsync_list - creates list of mac addresses to be unsynced
+ * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
* @netdev: the net device on which the unsync is happening
- * @addr: mac address to unsync
+ * @addr: MAC address to unsync
*
* This is a callback function which is called by the in kernel device unsync
* functions (like __dev_uc_unsync, __dev_mc_unsync, etc). This function only
* populates the tmp_unsync_list, which is later used by ice_remove_mac to
- * delete the mac filters from the hardware.
+ * delete the MAC filters from the hardware.
*/
static int ice_add_mac_to_unsync_list(struct net_device *netdev, const u8 *addr)
{
@@ -168,6 +168,39 @@ static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
}
/**
+ * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
+ * @vsi: the VSI being configured
+ * @promisc_m: mask of promiscuous config bits
+ * @set_promisc: enable or disable promisc flag request
+ *
+ */
+static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ enum ice_status status = 0;
+
+ if (vsi->type != ICE_VSI_PF)
+ return 0;
+
+ if (vsi->vlan_ena) {
+ status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
+ set_promisc);
+ } else {
+ if (set_promisc)
+ status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
+ 0);
+ else
+ status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
+ 0);
+ }
+
+ if (status)
+ return -EIO;
+
+ return 0;
+}
+
+/**
* ice_vsi_sync_fltr - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
*
@@ -182,6 +215,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
struct ice_hw *hw = &pf->hw;
enum ice_status status = 0;
u32 changed_flags = 0;
+ u8 promisc_m;
int err = 0;
if (!vsi->netdev)
@@ -211,7 +245,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
netif_addr_unlock_bh(netdev);
}
- /* Remove mac addresses in the unsync list */
+ /* Remove MAC addresses in the unsync list */
status = ice_remove_mac(hw, &vsi->tmp_unsync_list);
ice_free_fltr_list(dev, &vsi->tmp_unsync_list);
if (status) {
@@ -223,12 +257,16 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
}
- /* Add mac addresses in the sync list */
+ /* Add MAC addresses in the sync list */
status = ice_add_mac(hw, &vsi->tmp_sync_list);
ice_free_fltr_list(dev, &vsi->tmp_sync_list);
- if (status) {
+ /* If filter is added successfully or already exists, do not go into
+ * 'if' condition and report it as error. Instead continue processing
+ * rest of the function.
+ */
+ if (status && status != ICE_ERR_ALREADY_EXISTS) {
netdev_err(netdev, "Failed to add MAC filters\n");
- /* If there is no more space for new umac filters, vsi
+ /* If there is no more space for new umac filters, VSI
* should go into promiscuous mode. There should be some
* space reserved for promiscuous filters.
*/
@@ -245,49 +283,56 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
}
/* check for changes in promiscuous modes */
- if (changed_flags & IFF_ALLMULTI)
- netdev_warn(netdev, "Unsupported configuration\n");
+ if (changed_flags & IFF_ALLMULTI) {
+ if (vsi->current_netdev_flags & IFF_ALLMULTI) {
+ if (vsi->vlan_ena)
+ promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
+ else
+ promisc_m = ICE_MCAST_PROMISC_BITS;
+
+ err = ice_cfg_promisc(vsi, promisc_m, true);
+ if (err) {
+ netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
+ vsi->vsi_num);
+ vsi->current_netdev_flags &= ~IFF_ALLMULTI;
+ goto out_promisc;
+ }
+ } else if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
+ if (vsi->vlan_ena)
+ promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
+ else
+ promisc_m = ICE_MCAST_PROMISC_BITS;
+
+ err = ice_cfg_promisc(vsi, promisc_m, false);
+ if (err) {
+ netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
+ vsi->vsi_num);
+ vsi->current_netdev_flags |= IFF_ALLMULTI;
+ goto out_promisc;
+ }
+ }
+ }
if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) {
clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
if (vsi->current_netdev_flags & IFF_PROMISC) {
- /* Apply TX filter rule to get traffic from VMs */
- status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
- ICE_FLTR_TX);
- if (status) {
- netdev_err(netdev, "Error setting default VSI %i tx rule\n",
- vsi->vsi_num);
- vsi->current_netdev_flags &= ~IFF_PROMISC;
- err = -EIO;
- goto out_promisc;
- }
- /* Apply RX filter rule to get traffic from wire */
+ /* Apply Rx filter rule to get traffic from wire */
status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
ICE_FLTR_RX);
if (status) {
- netdev_err(netdev, "Error setting default VSI %i rx rule\n",
+ netdev_err(netdev, "Error setting default VSI %i Rx rule\n",
vsi->vsi_num);
vsi->current_netdev_flags &= ~IFF_PROMISC;
err = -EIO;
goto out_promisc;
}
} else {
- /* Clear TX filter rule to stop traffic from VMs */
- status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
- ICE_FLTR_TX);
- if (status) {
- netdev_err(netdev, "Error clearing default VSI %i tx rule\n",
- vsi->vsi_num);
- vsi->current_netdev_flags |= IFF_PROMISC;
- err = -EIO;
- goto out_promisc;
- }
- /* Clear RX filter to remove traffic from wire */
+ /* Clear Rx filter to remove traffic from wire */
status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
ICE_FLTR_RX);
if (status) {
- netdev_err(netdev, "Error clearing default VSI %i rx rule\n",
+ netdev_err(netdev, "Error clearing default VSI %i Rx rule\n",
vsi->vsi_num);
vsi->current_netdev_flags |= IFF_PROMISC;
err = -EIO;
@@ -322,7 +367,7 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf)
clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
- for (v = 0; v < pf->num_alloc_vsi; v++)
+ ice_for_each_vsi(pf, v)
if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
ice_vsi_sync_fltr(pf->vsi[v])) {
/* come back and try again later */
@@ -332,6 +377,51 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf)
}
/**
+ * ice_dis_vsi - pause a VSI
+ * @vsi: the VSI being paused
+ * @locked: is the rtnl_lock already held
+ */
+static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
+{
+ if (test_bit(__ICE_DOWN, vsi->state))
+ return;
+
+ set_bit(__ICE_NEEDS_RESTART, vsi->state);
+
+ if (vsi->type == ICE_VSI_PF && vsi->netdev) {
+ if (netif_running(vsi->netdev)) {
+ if (!locked) {
+ rtnl_lock();
+ vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ rtnl_unlock();
+ } else {
+ vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ }
+ } else {
+ ice_vsi_close(vsi);
+ }
+ }
+}
+
+/**
+ * ice_pf_dis_all_vsi - Pause all VSIs on a PF
+ * @pf: the PF
+ * @locked: is the rtnl_lock already held
+ */
+#ifdef CONFIG_DCB
+void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
+#else
+static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
+#endif /* CONFIG_DCB */
+{
+ int v;
+
+ ice_for_each_vsi(pf, v)
+ if (pf->vsi[v])
+ ice_dis_vsi(pf->vsi[v], locked);
+}
+
+/**
* ice_prepare_for_reset - prep for the core to reset
* @pf: board private structure
*
@@ -342,12 +432,16 @@ ice_prepare_for_reset(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
+ /* already prepared for reset */
+ if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
+ return;
+
/* Notify VFs of impending reset */
if (ice_check_sq_alive(hw, &hw->mailboxq))
ice_vc_notify_reset(pf);
/* disable the VSIs and their queues that are not already DOWN */
- ice_pf_dis_all_vsi(pf);
+ ice_pf_dis_all_vsi(pf, false);
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
@@ -394,6 +488,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_rebuild(pf);
clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
clear_bit(__ICE_PFR_REQ, pf->state);
+ ice_reset_all_vfs(pf, true);
}
}
@@ -416,10 +511,15 @@ static void ice_reset_subtask(struct ice_pf *pf)
* for the reset now), poll for reset done, rebuild and return.
*/
if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) {
- clear_bit(__ICE_GLOBR_RECV, pf->state);
- clear_bit(__ICE_CORER_RECV, pf->state);
- if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
- ice_prepare_for_reset(pf);
+ /* Perform the largest reset requested */
+ if (test_and_clear_bit(__ICE_CORER_RECV, pf->state))
+ reset_type = ICE_RESET_CORER;
+ if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state))
+ reset_type = ICE_RESET_GLOBR;
+ /* return if no valid reset type requested */
+ if (reset_type == ICE_RESET_INVAL)
+ return;
+ ice_prepare_for_reset(pf);
/* make sure we are ready to rebuild */
if (ice_check_reset(&pf->hw)) {
@@ -429,13 +529,14 @@ static void ice_reset_subtask(struct ice_pf *pf)
pf->hw.reset_ongoing = false;
ice_rebuild(pf);
/* clear bit to resume normal operations, but
- * ICE_NEEDS_RESTART bit is set incase rebuild failed
+ * ICE_NEEDS_RESTART bit is set in case rebuild failed
*/
clear_bit(__ICE_RESET_OICR_RECV, pf->state);
clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
clear_bit(__ICE_PFR_REQ, pf->state);
clear_bit(__ICE_CORER_REQ, pf->state);
clear_bit(__ICE_GLOBR_REQ, pf->state);
+ ice_reset_all_vfs(pf, true);
}
return;
@@ -469,6 +570,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
const char *speed;
const char *fc;
+ if (!vsi)
+ return;
+
if (vsi->current_isup == isup)
return;
@@ -519,6 +623,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
case ICE_FC_RX_PAUSE:
fc = "RX";
break;
+ case ICE_FC_NONE:
+ fc = "None";
+ break;
default:
fc = "Unknown";
break;
@@ -529,21 +636,22 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
}
/**
- * ice_vsi_link_event - update the vsi's netdev
- * @vsi: the vsi on which the link event occurred
- * @link_up: whether or not the vsi needs to be set up or down
+ * ice_vsi_link_event - update the VSI's netdev
+ * @vsi: the VSI on which the link event occurred
+ * @link_up: whether or not the VSI needs to be set up or down
*/
static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
{
- if (!vsi || test_bit(__ICE_DOWN, vsi->state))
+ if (!vsi)
+ return;
+
+ if (test_bit(__ICE_DOWN, vsi->state) || !vsi->netdev)
return;
if (vsi->type == ICE_VSI_PF) {
- if (!vsi->netdev) {
- dev_dbg(&vsi->back->pdev->dev,
- "vsi->netdev is not initialized!\n");
+ if (link_up == netif_carrier_ok(vsi->netdev))
return;
- }
+
if (link_up) {
netif_carrier_on(vsi->netdev);
netif_tx_wake_all_queues(vsi->netdev);
@@ -558,61 +666,51 @@ static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
* ice_link_event - process the link event
* @pf: pf that the link event is associated with
* @pi: port_info for the port that the link event is associated with
+ * @link_up: true if the physical link is up and false if it is down
+ * @link_speed: current link speed received from the link event
*
- * Returns -EIO if ice_get_link_status() fails
- * Returns 0 on success
+ * Returns 0 on success and negative on failure
*/
static int
-ice_link_event(struct ice_pf *pf, struct ice_port_info *pi)
+ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
+ u16 link_speed)
{
- u8 new_link_speed, old_link_speed;
struct ice_phy_info *phy_info;
- bool new_link_same_as_old;
- bool new_link, old_link;
- u8 lport;
- u16 v;
+ struct ice_vsi *vsi;
+ u16 old_link_speed;
+ bool old_link;
+ int result;
phy_info = &pi->phy;
phy_info->link_info_old = phy_info->link_info;
- /* Force ice_get_link_status() to update link info */
- phy_info->get_link_info = true;
- old_link = (phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
+ old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP);
old_link_speed = phy_info->link_info_old.link_speed;
- lport = pi->lport;
- if (ice_get_link_status(pi, &new_link)) {
+ /* update the link info structures and re-enable link events,
+ * don't bail on failure due to other book keeping needed
+ */
+ result = ice_update_link_info(pi);
+ if (result)
dev_dbg(&pf->pdev->dev,
- "Could not get link status for port %d\n", lport);
- return -EIO;
- }
-
- new_link_speed = phy_info->link_info.link_speed;
+ "Failed to update link status and re-enable link events for port %d\n",
+ pi->lport);
- new_link_same_as_old = (new_link == old_link &&
- new_link_speed == old_link_speed);
-
- ice_for_each_vsi(pf, v) {
- struct ice_vsi *vsi = pf->vsi[v];
-
- if (!vsi || !vsi->port_info)
- continue;
+ /* if the old link up/down and speed is the same as the new */
+ if (link_up == old_link && link_speed == old_link_speed)
+ return result;
- if (new_link_same_as_old &&
- (test_bit(__ICE_DOWN, vsi->state) ||
- new_link == netif_carrier_ok(vsi->netdev)))
- continue;
+ vsi = ice_find_vsi_by_type(pf, ICE_VSI_PF);
+ if (!vsi || !vsi->port_info)
+ return -EINVAL;
- if (vsi->port_info->lport == lport) {
- ice_print_link_msg(vsi, new_link);
- ice_vsi_link_event(vsi, new_link);
- }
- }
+ ice_vsi_link_event(vsi, link_up);
+ ice_print_link_msg(vsi, link_up);
- if (!new_link_same_as_old && pf->num_alloc_vfs)
+ if (pf->num_alloc_vfs)
ice_vc_notify_link_state(pf);
- return 0;
+ return result;
}
/**
@@ -635,19 +733,73 @@ static void ice_watchdog_subtask(struct ice_pf *pf)
pf->serv_tmr_prev = jiffies;
- if (ice_link_event(pf, pf->hw.port_info))
- dev_dbg(&pf->pdev->dev, "ice_link_event failed\n");
-
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
*/
ice_update_pf_stats(pf);
- for (i = 0; i < pf->num_alloc_vsi; i++)
+ ice_for_each_vsi(pf, i)
if (pf->vsi[i] && pf->vsi[i]->netdev)
ice_update_vsi_stats(pf->vsi[i]);
}
/**
+ * ice_init_link_events - enable/initialize link events
+ * @pi: pointer to the port_info instance
+ *
+ * Returns -EIO on failure, 0 on success
+ */
+static int ice_init_link_events(struct ice_port_info *pi)
+{
+ u16 mask;
+
+ mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
+ ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
+
+ if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
+ dev_dbg(ice_hw_to_dev(pi->hw),
+ "Failed to set link event mask for port %d\n",
+ pi->lport);
+ return -EIO;
+ }
+
+ if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
+ dev_dbg(ice_hw_to_dev(pi->hw),
+ "Failed to enable link events for port %d\n",
+ pi->lport);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_handle_link_event - handle link event via ARQ
+ * @pf: pf that the link event is associated with
+ * @event: event structure containing link status info
+ */
+static int
+ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ struct ice_aqc_get_link_status_data *link_data;
+ struct ice_port_info *port_info;
+ int status;
+
+ link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf;
+ port_info = pf->hw.port_info;
+ if (!port_info)
+ return -EINVAL;
+
+ status = ice_link_event(pf, port_info,
+ !!(link_data->link_info & ICE_AQ_LINK_UP),
+ le16_to_cpu(link_data->link_speed));
+ if (status)
+ dev_dbg(&pf->pdev->dev,
+ "Could not process link event, error %d\n", status);
+
+ return status;
+}
+
+/**
* __ice_clean_ctrlq - helper function to clean controlq rings
* @pf: ptr to struct ice_pf
* @q_type: specific Control queue type
@@ -750,12 +902,20 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
opcode = le16_to_cpu(event.desc.opcode);
switch (opcode) {
+ case ice_aqc_opc_get_link_status:
+ if (ice_handle_link_event(pf, &event))
+ dev_err(&pf->pdev->dev,
+ "Could not handle link event\n");
+ break;
case ice_mbx_opc_send_msg_to_pf:
ice_vc_process_vf_msg(pf, &event);
break;
case ice_aqc_opc_fw_logging:
ice_output_fw_log(hw, &event.desc, event.msg_buf);
break;
+ case ice_aqc_opc_lldp_set_mib_change:
+ ice_dcb_process_lldp_set_mib_change(pf, &event);
+ break;
default:
dev_dbg(&pf->pdev->dev,
"%s Receive Queue unknown event 0x%04x ignored\n",
@@ -877,6 +1037,18 @@ static void ice_service_task_stop(struct ice_pf *pf)
}
/**
+ * ice_service_task_restart - restart service task and schedule works
+ * @pf: board private structure
+ *
+ * This function is needed for suspend and resume works (e.g WoL scenario)
+ */
+static void ice_service_task_restart(struct ice_pf *pf)
+{
+ clear_bit(__ICE_SERVICE_DIS, pf->state);
+ ice_service_task_schedule(pf);
+}
+
+/**
* ice_service_timer - timer callback to schedule service task
* @t: pointer to timer_list
*/
@@ -901,7 +1073,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
u32 reg;
int i;
- if (!test_bit(__ICE_MDD_EVENT_PENDING, pf->state))
+ if (!test_and_clear_bit(__ICE_MDD_EVENT_PENDING, pf->state))
return;
/* find what triggered the MDD event */
@@ -993,10 +1165,12 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
struct ice_vf *vf = &pf->vf[i];
+ mdd_detected = false;
+
reg = rd32(hw, VP_MDET_TX_PQM(i));
if (reg & VP_MDET_TX_PQM_VALID_M) {
wr32(hw, VP_MDET_TX_PQM(i), 0xFFFF);
- vf->num_mdd_events++;
+ mdd_detected = true;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i);
}
@@ -1004,7 +1178,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, VP_MDET_TX_TCLAN(i));
if (reg & VP_MDET_TX_TCLAN_VALID_M) {
wr32(hw, VP_MDET_TX_TCLAN(i), 0xFFFF);
- vf->num_mdd_events++;
+ mdd_detected = true;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i);
}
@@ -1012,7 +1186,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, VP_MDET_TX_TDPU(i));
if (reg & VP_MDET_TX_TDPU_VALID_M) {
wr32(hw, VP_MDET_TX_TDPU(i), 0xFFFF);
- vf->num_mdd_events++;
+ mdd_detected = true;
dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
i);
}
@@ -1020,26 +1194,19 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
reg = rd32(hw, VP_MDET_RX(i));
if (reg & VP_MDET_RX_VALID_M) {
wr32(hw, VP_MDET_RX(i), 0xFFFF);
- vf->num_mdd_events++;
+ mdd_detected = true;
dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
i);
}
- if (vf->num_mdd_events > ICE_DFLT_NUM_MDD_EVENTS_ALLOWED) {
- dev_info(&pf->pdev->dev,
- "Too many MDD events on VF %d, disabled\n", i);
+ if (mdd_detected) {
+ vf->num_mdd_events++;
dev_info(&pf->pdev->dev,
"Use PF Control I/F to re-enable the VF\n");
set_bit(ICE_VF_STATE_DIS, vf->vf_states);
}
}
- /* re-enable MDD interrupt cause */
- clear_bit(__ICE_MDD_EVENT_PENDING, pf->state);
- reg = rd32(hw, PFINT_OICR_ENA);
- reg |= PFINT_OICR_MAL_DETECT_M;
- wr32(hw, PFINT_OICR_ENA, reg);
- ice_flush(hw);
}
/**
@@ -1089,7 +1256,7 @@ static void ice_service_task(struct work_struct *work)
/**
* ice_set_ctrlq_len - helper function to set controlq length
- * @hw: pointer to the hw instance
+ * @hw: pointer to the HW instance
*/
static void ice_set_ctrlq_len(struct ice_hw *hw)
{
@@ -1111,8 +1278,9 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
* This is a callback function used by the irq_set_affinity_notifier function
* so that we may register to receive changes to the irq affinity masks.
*/
-static void ice_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
+static void
+ice_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
{
struct ice_q_vector *q_vector =
container_of(notify, struct ice_q_vector, affinity_notify);
@@ -1142,7 +1310,7 @@ static int ice_vsi_ena_irq(struct ice_vsi *vsi)
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
int i;
- for (i = 0; i < vsi->num_q_vectors; i++)
+ ice_for_each_q_vector(vsi, i)
ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]);
}
@@ -1184,10 +1352,9 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
/* skip this unused q_vector */
continue;
}
- err = devm_request_irq(&pf->pdev->dev,
- pf->msix_entries[base + vector].vector,
- vsi->irq_handler, 0, q_vector->name,
- q_vector);
+ err = devm_request_irq(&pf->pdev->dev, irq_num,
+ vsi->irq_handler, 0,
+ q_vector->name, q_vector);
if (err) {
netdev_err(vsi->netdev,
"MSIX request_irq failed, error: %d\n", err);
@@ -1328,7 +1495,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
rd32(hw, PFHMC_ERRORDATA));
}
- /* Report and mask off any remaining unexpected interrupts */
+ /* Report any remaining unexpected interrupts */
oicr &= ena_mask;
if (oicr) {
dev_dbg(&pf->pdev->dev, "unhandled interrupt oicr=0x%08x\n",
@@ -1342,12 +1509,9 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
set_bit(__ICE_PFR_REQ, pf->state);
ice_service_task_schedule(pf);
}
- ena_mask &= ~oicr;
}
ret = IRQ_HANDLED;
- /* re-enable interrupt causes that are not handled during this pass */
- wr32(hw, PFINT_OICR_ENA, ena_mask);
if (!test_bit(__ICE_DOWN, pf->state)) {
ice_service_task_schedule(pf);
ice_irq_dynamic_ena(hw, NULL, NULL);
@@ -1406,23 +1570,23 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
/**
* ice_ena_ctrlq_interrupts - enable control queue interrupts
* @hw: pointer to HW structure
- * @v_idx: HW vector index to associate the control queue interrupts with
+ * @reg_idx: HW vector index to associate the control queue interrupts with
*/
-static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 v_idx)
+static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
{
u32 val;
- val = ((v_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
+ val = ((reg_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
PFINT_OICR_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_OICR_CTL, val);
/* enable Admin queue Interrupt causes */
- val = ((v_idx & PFINT_FW_CTL_MSIX_INDX_M) |
+ val = ((reg_idx & PFINT_FW_CTL_MSIX_INDX_M) |
PFINT_FW_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_FW_CTL, val);
/* enable Mailbox queue Interrupt causes */
- val = ((v_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
+ val = ((reg_idx & PFINT_MBX_CTL_MSIX_INDX_M) |
PFINT_MBX_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_MBX_CTL, val);
@@ -1510,7 +1674,7 @@ void ice_napi_del(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+ ice_for_each_q_vector(vsi, v_idx)
netif_napi_del(&vsi->q_vectors[v_idx]->napi);
}
@@ -1529,7 +1693,7 @@ static void ice_napi_add(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
+ ice_for_each_q_vector(vsi, v_idx)
netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
ice_napi_poll, NAPI_POLL_WEIGHT);
}
@@ -1649,18 +1813,20 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
}
/**
- * ice_vlan_rx_add_vid - Add a vlan id filter to HW offload
+ * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
* @netdev: network interface to be adjusted
* @proto: unused protocol
- * @vid: vlan id to be added
+ * @vid: VLAN ID to be added
*
- * net_device_ops implementation for adding vlan ids
+ * net_device_ops implementation for adding VLAN IDs
*/
-static int ice_vlan_rx_add_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
+static int
+ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
+ u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ int ret;
if (vid >= VLAN_N_VID) {
netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
@@ -1673,33 +1839,39 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
/* Enable VLAN pruning when VLAN 0 is added */
if (unlikely(!vid)) {
- int ret = ice_cfg_vlan_pruning(vsi, true);
-
+ ret = ice_cfg_vlan_pruning(vsi, true, false);
if (ret)
return ret;
}
- /* Add all VLAN ids including 0 to the switch filter. VLAN id 0 is
+ /* Add all VLAN IDs including 0 to the switch filter. VLAN ID 0 is
* needed to continue allowing all untagged packets since VLAN prune
* list is applied to all packets by the switch
*/
- return ice_vsi_add_vlan(vsi, vid);
+ ret = ice_vsi_add_vlan(vsi, vid);
+ if (!ret) {
+ vsi->vlan_ena = true;
+ set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
+ }
+
+ return ret;
}
/**
- * ice_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
+ * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
* @netdev: network interface to be adjusted
* @proto: unused protocol
- * @vid: vlan id to be removed
+ * @vid: VLAN ID to be removed
*
- * net_device_ops implementation for removing vlan ids
+ * net_device_ops implementation for removing VLAN IDs
*/
-static int ice_vlan_rx_kill_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
+static int
+ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
+ u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- int status;
+ int ret;
if (vsi->info.pvid)
return -EINVAL;
@@ -1707,15 +1879,17 @@ static int ice_vlan_rx_kill_vid(struct net_device *netdev,
/* Make sure ice_vsi_kill_vlan is successful before updating VLAN
* information
*/
- status = ice_vsi_kill_vlan(vsi, vid);
- if (status)
- return status;
+ ret = ice_vsi_kill_vlan(vsi, vid);
+ if (ret)
+ return ret;
/* Disable VLAN pruning when VLAN 0 is removed */
if (unlikely(!vid))
- status = ice_cfg_vlan_pruning(vsi, false);
+ ret = ice_cfg_vlan_pruning(vsi, false, false);
- return status;
+ vsi->vlan_ena = false;
+ set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
+ return ret;
}
/**
@@ -2033,23 +2207,6 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
}
/**
- * ice_verify_itr_gran - verify driver's assumption of ITR granularity
- * @pf: pointer to the PF structure
- *
- * There is no error returned here because the driver will be able to handle a
- * different ITR granularity, but interrupt moderation will not be accurate if
- * the driver's assumptions are not verified. This assumption is made so we can
- * use constants in the hot path instead of accessing structure members.
- */
-static void ice_verify_itr_gran(struct ice_pf *pf)
-{
- if (pf->hw.itr_gran != (ICE_ITR_GRAN_S << 1))
- dev_warn(&pf->pdev->dev,
- "%d ITR granularity assumption is invalid, actual ITR granularity is %d. Interrupt moderation will be inaccurate!\n",
- (ICE_ITR_GRAN_S << 1), pf->hw.itr_gran);
-}
-
-/**
* ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
* @pf: pointer to the PF structure
*
@@ -2072,9 +2229,10 @@ static void ice_verify_cacheline_size(struct ice_pf *pf)
*
* Returns 0 on success, negative on failure
*/
-static int ice_probe(struct pci_dev *pdev,
- const struct pci_device_id __always_unused *ent)
+static int
+ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
{
+ struct device *dev = &pdev->dev;
struct ice_pf *pf;
struct ice_hw *hw;
int err;
@@ -2086,20 +2244,20 @@ static int ice_probe(struct pci_dev *pdev,
err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
if (err) {
- dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err);
+ dev_err(dev, "BAR0 I/O map error %d\n", err);
return err;
}
- pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL);
+ pf = devm_kzalloc(dev, sizeof(*pf), GFP_KERNEL);
if (!pf)
return -ENOMEM;
/* set up for high or low dma */
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (err)
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
+ dev_err(dev, "DMA configuration failed: 0x%x\n", err);
return err;
}
@@ -2133,17 +2291,26 @@ static int ice_probe(struct pci_dev *pdev,
err = ice_init_hw(hw);
if (err) {
- dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err);
+ dev_err(dev, "ice_init_hw failed: %d\n", err);
err = -EIO;
goto err_exit_unroll;
}
- dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n",
+ dev_info(dev, "firmware %d.%d.%05d api %d.%d\n",
hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
hw->api_maj_ver, hw->api_min_ver);
ice_init_pf(pf);
+ err = ice_init_pf_dcb(pf);
+ if (err) {
+ clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
+
+ /* do not fail overall init if DCB init fails */
+ err = 0;
+ }
+
ice_determine_q_usage(pf);
pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
@@ -2152,8 +2319,8 @@ static int ice_probe(struct pci_dev *pdev,
goto err_init_pf_unroll;
}
- pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi,
- sizeof(*pf->vsi), GFP_KERNEL);
+ pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
+ GFP_KERNEL);
if (!pf->vsi) {
err = -ENOMEM;
goto err_init_pf_unroll;
@@ -2161,8 +2328,7 @@ static int ice_probe(struct pci_dev *pdev,
err = ice_init_interrupt_scheme(pf);
if (err) {
- dev_err(&pdev->dev,
- "ice_init_interrupt_scheme failed: %d\n", err);
+ dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
goto err_init_interrupt_unroll;
}
@@ -2178,15 +2344,13 @@ static int ice_probe(struct pci_dev *pdev,
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
err = ice_req_irq_msix_misc(pf);
if (err) {
- dev_err(&pdev->dev,
- "setup of misc vector failed: %d\n", err);
+ dev_err(dev, "setup of misc vector failed: %d\n", err);
goto err_init_interrupt_unroll;
}
}
/* create switch struct for the switch element created by FW on boot */
- pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(*pf->first_sw),
- GFP_KERNEL);
+ pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
if (!pf->first_sw) {
err = -ENOMEM;
goto err_msix_misc_unroll;
@@ -2204,8 +2368,7 @@ static int ice_probe(struct pci_dev *pdev,
err = ice_setup_pf_sw(pf);
if (err) {
- dev_err(&pdev->dev,
- "probe failed due to setup pf switch:%d\n", err);
+ dev_err(dev, "probe failed due to setup pf switch:%d\n", err);
goto err_alloc_sw_unroll;
}
@@ -2214,8 +2377,13 @@ static int ice_probe(struct pci_dev *pdev,
/* since everything is good, start the service timer */
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
+ err = ice_init_link_events(pf->hw.port_info);
+ if (err) {
+ dev_err(dev, "ice_init_link_events failed: %d\n", err);
+ goto err_alloc_sw_unroll;
+ }
+
ice_verify_cacheline_size(pf);
- ice_verify_itr_gran(pf);
return 0;
@@ -2227,7 +2395,7 @@ err_msix_misc_unroll:
ice_free_irq_msix_misc(pf);
err_init_interrupt_unroll:
ice_clear_interrupt_scheme(pf);
- devm_kfree(&pdev->dev, pf->vsi);
+ devm_kfree(dev, pf->vsi);
err_init_pf_unroll:
ice_deinit_pf(pf);
ice_deinit_hw(hw);
@@ -2272,6 +2440,136 @@ static void ice_remove(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
}
+/**
+ * ice_pci_err_detected - warning that PCI error has been detected
+ * @pdev: PCI device information struct
+ * @err: the type of PCI error
+ *
+ * Called to warn that something happened on the PCI bus and the error handling
+ * is in progress. Allows the driver to gracefully prepare/handle PCI errors.
+ */
+static pci_ers_result_t
+ice_pci_err_detected(struct pci_dev *pdev, enum pci_channel_state err)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+
+ if (!pf) {
+ dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
+ __func__, err);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ if (!test_bit(__ICE_SUSPENDED, pf->state)) {
+ ice_service_task_stop(pf);
+
+ if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
+ set_bit(__ICE_PFR_REQ, pf->state);
+ ice_prepare_for_reset(pf);
+ }
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * ice_pci_err_slot_reset - a PCI slot reset has just happened
+ * @pdev: PCI device information struct
+ *
+ * Called to determine if the driver can recover from the PCI slot reset by
+ * using a register read to determine if the device is recoverable.
+ */
+static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+ pci_ers_result_t result;
+ int err;
+ u32 reg;
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset, error %d\n",
+ err);
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ pci_wake_from_d3(pdev, false);
+
+ /* Check for life */
+ reg = rd32(&pf->hw, GLGEN_RTRIG);
+ if (!reg)
+ result = PCI_ERS_RESULT_RECOVERED;
+ else
+ result = PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ err = pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (err)
+ dev_dbg(&pdev->dev,
+ "pci_cleanup_aer_uncorrect_error_status failed, error %d\n",
+ err);
+ /* non-fatal, continue */
+
+ return result;
+}
+
+/**
+ * ice_pci_err_resume - restart operations after PCI error recovery
+ * @pdev: PCI device information struct
+ *
+ * Called to allow the driver to bring things back up after PCI error and/or
+ * reset recovery have finished
+ */
+static void ice_pci_err_resume(struct pci_dev *pdev)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+
+ if (!pf) {
+ dev_err(&pdev->dev,
+ "%s failed, device is unrecoverable\n", __func__);
+ return;
+ }
+
+ if (test_bit(__ICE_SUSPENDED, pf->state)) {
+ dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
+ __func__);
+ return;
+ }
+
+ ice_do_reset(pf, ICE_RESET_PFR);
+ ice_service_task_restart(pf);
+ mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
+}
+
+/**
+ * ice_pci_err_reset_prepare - prepare device driver for PCI reset
+ * @pdev: PCI device information struct
+ */
+static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+
+ if (!test_bit(__ICE_SUSPENDED, pf->state)) {
+ ice_service_task_stop(pf);
+
+ if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
+ set_bit(__ICE_PFR_REQ, pf->state);
+ ice_prepare_for_reset(pf);
+ }
+ }
+}
+
+/**
+ * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
+ * @pdev: PCI device information struct
+ */
+static void ice_pci_err_reset_done(struct pci_dev *pdev)
+{
+ ice_pci_err_resume(pdev);
+}
+
/* ice_pci_tbl - PCI Device ID Table
*
* Wildcard entries (PCI_ANY_ID) should come last
@@ -2289,12 +2587,21 @@ static const struct pci_device_id ice_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
+static const struct pci_error_handlers ice_pci_err_handler = {
+ .error_detected = ice_pci_err_detected,
+ .slot_reset = ice_pci_err_slot_reset,
+ .reset_prepare = ice_pci_err_reset_prepare,
+ .reset_done = ice_pci_err_reset_done,
+ .resume = ice_pci_err_resume
+};
+
static struct pci_driver ice_driver = {
.name = KBUILD_MODNAME,
.id_table = ice_pci_tbl,
.probe = ice_probe,
.remove = ice_remove,
.sriov_configure = ice_sriov_configure,
+ .err_handler = &ice_pci_err_handler
};
/**
@@ -2341,7 +2648,7 @@ static void __exit ice_module_exit(void)
module_exit(ice_module_exit);
/**
- * ice_set_mac_address - NDO callback to set mac address
+ * ice_set_mac_address - NDO callback to set MAC address
* @netdev: network interface device structure
* @pi: pointer to an address structure
*
@@ -2378,14 +2685,14 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
return -EBUSY;
}
- /* When we change the mac address we also have to change the mac address
- * based filter rules that were created previously for the old mac
+ /* When we change the MAC address we also have to change the MAC address
+ * based filter rules that were created previously for the old MAC
* address. So first, we remove the old filter rule using ice_remove_mac
* and then create a new filter rule using ice_add_mac. Note that for
- * both these operations, we first need to form a "list" of mac
- * addresses (even though in this case, we have only 1 mac address to be
+ * both these operations, we first need to form a "list" of MAC
+ * addresses (even though in this case, we have only 1 MAC address to be
* added/removed) and this done using ice_add_mac_to_list. Depending on
- * the ensuing operation this "list" of mac addresses is either to be
+ * the ensuing operation this "list" of MAC addresses is either to be
* added or removed from the filter.
*/
err = ice_add_mac_to_list(vsi, &r_mac_list, netdev->dev_addr);
@@ -2423,12 +2730,12 @@ free_lists:
return err;
}
- /* change the netdev's mac address */
+ /* change the netdev's MAC address */
memcpy(netdev->dev_addr, mac, netdev->addr_len);
netdev_dbg(vsi->netdev, "updated mac address to %pM\n",
netdev->dev_addr);
- /* write new mac address to the firmware */
+ /* write new MAC address to the firmware */
flags = ICE_AQC_MAN_MAC_UPDATE_LAA_WOL;
status = ice_aq_manage_mac_write(hw, mac, flags, NULL);
if (status) {
@@ -2470,7 +2777,7 @@ static void ice_set_rx_mode(struct net_device *netdev)
* @tb: pointer to array of nladdr (unused)
* @dev: the net device pointer
* @addr: the MAC address entry being added
- * @vid: VLAN id
+ * @vid: VLAN ID
* @flags: instructions from stack about fdb operation
* @extack: netlink extended ack
*/
@@ -2510,11 +2817,12 @@ ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
* @tb: pointer to array of nladdr (unused)
* @dev: the net device pointer
* @addr: the MAC address entry being added
- * @vid: VLAN id
+ * @vid: VLAN ID
*/
-static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
- struct net_device *dev, const unsigned char *addr,
- __always_unused u16 vid)
+static int
+ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
+ struct net_device *dev, const unsigned char *addr,
+ __always_unused u16 vid)
{
int err;
@@ -2538,13 +2846,16 @@ static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
*/
-static int ice_set_features(struct net_device *netdev,
- netdev_features_t features)
+static int
+ice_set_features(struct net_device *netdev, netdev_features_t features)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
int ret = 0;
+ /* Multiple features can be changed in one call so keep features in
+ * separate if/else statements to guarantee each feature is checked
+ */
if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH))
ret = ice_vsi_manage_rss_lut(vsi, true);
else if (!(features & NETIF_F_RXHASH) &&
@@ -2557,8 +2868,9 @@ static int ice_set_features(struct net_device *netdev,
else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) &&
(netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
ret = ice_vsi_manage_vlan_stripping(vsi, false);
- else if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
- !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_TX) &&
+ !(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
ret = ice_vsi_manage_vlan_insertion(vsi);
else if (!(features & NETIF_F_HW_VLAN_CTAG_TX) &&
(netdev->features & NETIF_F_HW_VLAN_CTAG_TX))
@@ -2568,8 +2880,8 @@ static int ice_set_features(struct net_device *netdev,
}
/**
- * ice_vsi_vlan_setup - Setup vlan offload properties on a VSI
- * @vsi: VSI to setup vlan properties for
+ * ice_vsi_vlan_setup - Setup VLAN offload properties on a VSI
+ * @vsi: VSI to setup VLAN properties for
*/
static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
{
@@ -2601,6 +2913,7 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
if (err)
return err;
}
+ ice_vsi_cfg_dcb_rings(vsi);
err = ice_vsi_cfg_lan_txqs(vsi);
if (!err)
@@ -2620,7 +2933,7 @@ static void ice_napi_enable_all(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+ ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
if (q_vector->rx.ring || q_vector->tx.ring)
@@ -2666,7 +2979,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
ice_service_task_schedule(pf);
- return err;
+ return 0;
}
/**
@@ -2693,8 +3006,8 @@ int ice_up(struct ice_vsi *vsi)
* This function fetches stats from the ring considering the atomic operations
* that needs to be performed to read u64 values in 32 bit machine.
*/
-static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts,
- u64 *bytes)
+static void
+ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
{
unsigned int start;
*pkts = 0;
@@ -2911,6 +3224,8 @@ static void ice_update_pf_stats(struct ice_pf *pf)
ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded,
&prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
+ ice_update_dcb_stats(pf);
+
ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded,
&prev_ps->crc_errors, &cur_ps->crc_errors);
@@ -2992,7 +3307,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+ ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
if (q_vector->rx.ring || q_vector->tx.ring)
@@ -3276,7 +3591,7 @@ static void ice_vsi_release_all(struct ice_pf *pf)
if (!pf->vsi)
return;
- for (i = 0; i < pf->num_alloc_vsi; i++) {
+ ice_for_each_vsi(pf, i) {
if (!pf->vsi[i])
continue;
@@ -3289,47 +3604,31 @@ static void ice_vsi_release_all(struct ice_pf *pf)
}
/**
- * ice_dis_vsi - pause a VSI
- * @vsi: the VSI being paused
+ * ice_ena_vsi - resume a VSI
+ * @vsi: the VSI being resume
* @locked: is the rtnl_lock already held
*/
-static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
+static int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
{
- if (test_bit(__ICE_DOWN, vsi->state))
- return;
+ int err = 0;
- set_bit(__ICE_NEEDS_RESTART, vsi->state);
+ if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
+ return err;
+
+ clear_bit(__ICE_NEEDS_RESTART, vsi->state);
+
+ if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ struct net_device *netd = vsi->netdev;
- if (vsi->type == ICE_VSI_PF && vsi->netdev) {
if (netif_running(vsi->netdev)) {
- if (!locked) {
+ if (locked) {
+ err = netd->netdev_ops->ndo_open(netd);
+ } else {
rtnl_lock();
- vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ err = netd->netdev_ops->ndo_open(netd);
rtnl_unlock();
- } else {
- vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
}
} else {
- ice_vsi_close(vsi);
- }
- }
-}
-
-/**
- * ice_ena_vsi - resume a VSI
- * @vsi: the VSI being resume
- */
-static int ice_ena_vsi(struct ice_vsi *vsi)
-{
- int err = 0;
-
- if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) &&
- vsi->netdev) {
- if (netif_running(vsi->netdev)) {
- rtnl_lock();
- err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
- rtnl_unlock();
- } else {
err = ice_vsi_open(vsi);
}
}
@@ -3338,29 +3637,21 @@ static int ice_ena_vsi(struct ice_vsi *vsi)
}
/**
- * ice_pf_dis_all_vsi - Pause all VSIs on a PF
- * @pf: the PF
- */
-static void ice_pf_dis_all_vsi(struct ice_pf *pf)
-{
- int v;
-
- ice_for_each_vsi(pf, v)
- if (pf->vsi[v])
- ice_dis_vsi(pf->vsi[v], false);
-}
-
-/**
* ice_pf_ena_all_vsi - Resume all VSIs on a PF
* @pf: the PF
+ * @locked: is the rtnl_lock already held
*/
-static int ice_pf_ena_all_vsi(struct ice_pf *pf)
+#ifdef CONFIG_DCB
+int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
+#else
+static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
+#endif /* CONFIG_DCB */
{
int v;
ice_for_each_vsi(pf, v)
if (pf->vsi[v])
- if (ice_ena_vsi(pf->vsi[v]))
+ if (ice_ena_vsi(pf->vsi[v], locked))
return -EIO;
return 0;
@@ -3375,16 +3666,12 @@ static int ice_vsi_rebuild_all(struct ice_pf *pf)
int i;
/* loop through pf->vsi array and reinit the VSI if found */
- for (i = 0; i < pf->num_alloc_vsi; i++) {
+ ice_for_each_vsi(pf, i) {
int err;
if (!pf->vsi[i])
continue;
- /* VF VSI rebuild isn't supported yet */
- if (pf->vsi[i]->type == ICE_VSI_VF)
- continue;
-
err = ice_vsi_rebuild(pf->vsi[i]);
if (err) {
dev_err(&pf->pdev->dev,
@@ -3412,7 +3699,7 @@ static int ice_vsi_replay_all(struct ice_pf *pf)
int i;
/* loop through pf->vsi array and replay the VSI if found */
- for (i = 0; i < pf->num_alloc_vsi; i++) {
+ ice_for_each_vsi(pf, i) {
if (!pf->vsi[i])
continue;
@@ -3479,6 +3766,8 @@ static void ice_rebuild(struct ice_pf *pf)
if (err)
goto err_sched_init_port;
+ ice_dcb_rebuild(pf);
+
/* reset search_hint of irq_trackers to 0 since interrupts are
* reclaimed and could be allocated from beginning during VSI rebuild
*/
@@ -3512,7 +3801,7 @@ static void ice_rebuild(struct ice_pf *pf)
}
/* restart the VSIs that were rebuilt and running before the reset */
- err = ice_pf_ena_all_vsi(pf);
+ err = ice_pf_ena_all_vsi(pf, false);
if (err) {
dev_err(&pf->pdev->dev, "error enabling VSIs\n");
/* no need to disable VSIs in tear down path in ice_rebuild()
@@ -3521,9 +3810,7 @@ static void ice_rebuild(struct ice_pf *pf)
goto err_vsi_rebuild;
}
- ice_reset_all_vfs(pf, true);
-
- for (i = 0; i < pf->num_alloc_vsi; i++) {
+ ice_for_each_vsi(pf, i) {
bool link_up;
if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF)
@@ -3710,7 +3997,7 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
/**
* ice_bridge_getlink - Get the hardware bridge mode
* @skb: skb buff
- * @pid: process id
+ * @pid: process ID
* @seq: RTNL message seq
* @dev: the netdev being configured
* @filter_mask: filter mask passed in
@@ -3909,8 +4196,7 @@ static void ice_tx_timeout(struct net_device *netdev)
/* Read interrupt register */
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
val = rd32(hw,
- GLINT_DYN_CTL(tx_ring->q_vector->v_idx +
- tx_ring->vsi->hw_base_vector));
+ GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 413fdbbcc4d0..62571d33d0d6 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -5,7 +5,7 @@
/**
* ice_aq_read_nvm
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @module_typeid: module pointer location in words from the NVM beginning
* @offset: byte offset from the module beginning
* @length: length of the section to be read (in bytes from the offset)
@@ -235,7 +235,7 @@ ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
/**
* ice_init_nvm - initializes NVM setting
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* This function reads and populates NVM settings such as Shadow RAM size,
* max_timeout, and blank_nvm_mode
@@ -248,7 +248,7 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
u32 fla, gens_stat;
u8 sr_size;
- /* The SR size is stored regardless of the nvm programming mode
+ /* The SR size is stored regardless of the NVM programming mode
* as the blank mode may be used in the factory line.
*/
gens_stat = rd32(hw, GLNVM_GENS);
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 56049739a250..8d49f83be7a5 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -43,9 +43,9 @@ ice_sched_add_root_node(struct ice_port_info *pi,
/**
* ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
* @start_node: pointer to the starting ice_sched_node struct in a sub-tree
- * @teid: node teid to search
+ * @teid: node TEID to search
*
- * This function searches for a node matching the teid in the scheduling tree
+ * This function searches for a node matching the TEID in the scheduling tree
* from the SW DB. The search is recursive and is restricted by the number of
* layers it has searched through; stopping at the max supported layer.
*
@@ -66,7 +66,7 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
return NULL;
- /* Check if teid matches to any of the children nodes */
+ /* Check if TEID matches to any of the children nodes */
for (i = 0; i < start_node->num_children; i++)
if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
return start_node->children[i];
@@ -86,7 +86,7 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
/**
* ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @cmd_opc: cmd opcode
* @elems_req: number of elements to request
* @buf: pointer to buffer
@@ -118,7 +118,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
/**
* ice_aq_query_sched_elems - query scheduler elements
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @elems_req: number of elements to query
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@@ -127,7 +127,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
*
* Query scheduling elements (0x0404)
*/
-static enum ice_status
+enum ice_status
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_get_elem *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd)
@@ -138,31 +138,6 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
}
/**
- * ice_sched_query_elem - query element information from hw
- * @hw: pointer to the hw struct
- * @node_teid: node teid to be queried
- * @buf: buffer to element information
- *
- * This function queries HW element information
- */
-static enum ice_status
-ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
- struct ice_aqc_get_elem *buf)
-{
- u16 buf_size, num_elem_ret = 0;
- enum ice_status status;
-
- buf_size = sizeof(*buf);
- memset(buf, 0, buf_size);
- buf->generic[0].node_teid = cpu_to_le32(node_teid);
- status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
- NULL);
- if (status || num_elem_ret != 1)
- ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
- return status;
-}
-
-/**
* ice_sched_add_node - Insert the Tx scheduler node in SW DB
* @pi: port information structure
* @layer: Scheduler layer of the node
@@ -226,7 +201,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
/**
* ice_aq_delete_sched_elems - delete scheduler elements
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @grps_req: number of groups to delete
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@@ -246,13 +221,13 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
}
/**
- * ice_sched_remove_elems - remove nodes from hw
- * @hw: pointer to the hw struct
+ * ice_sched_remove_elems - remove nodes from HW
+ * @hw: pointer to the HW struct
* @parent: pointer to the parent node
* @num_nodes: number of nodes
* @node_teids: array of node teids to be deleted
*
- * This function remove nodes from hw
+ * This function remove nodes from HW
*/
static enum ice_status
ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
@@ -276,7 +251,8 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
&num_groups_removed, NULL);
if (status || num_groups_removed != 1)
- ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n");
+ ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
+ hw->adminq.sq_last_status);
devm_kfree(ice_hw_to_dev(hw), buf);
return status;
@@ -284,7 +260,7 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
/**
* ice_sched_get_first_node - get the first node of the given layer
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @parent: pointer the base node of the subtree
* @layer: layer number
*
@@ -360,12 +336,8 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
u32 teid = le32_to_cpu(node->info.node_teid);
- enum ice_status status;
- status = ice_sched_remove_elems(hw, node->parent, 1, &teid);
- if (status)
- ice_debug(hw, ICE_DBG_SCHED,
- "remove element failed %d\n", status);
+ ice_sched_remove_elems(hw, node->parent, 1, &teid);
}
parent = node->parent;
/* root has no parent */
@@ -409,7 +381,7 @@ err_exit:
/**
* ice_aq_get_dflt_topo - gets default scheduler topology
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @lport: logical port number
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@@ -439,7 +411,7 @@ ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
/**
* ice_aq_add_sched_elems - adds scheduling element
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @grps_req: the number of groups that are requested to be added
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@@ -460,7 +432,7 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
/**
* ice_aq_suspend_sched_elems - suspend scheduler elements
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @elems_req: number of elements to suspend
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@@ -481,7 +453,7 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
/**
* ice_aq_resume_sched_elems - resume scheduler elements
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @elems_req: number of elements to resume
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
@@ -502,7 +474,7 @@ ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
/**
* ice_aq_query_sched_res - query scheduler resource
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @buf_size: buffer size in bytes
* @buf: pointer to buffer
* @cd: pointer to command details structure or NULL
@@ -521,13 +493,13 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
}
/**
- * ice_sched_suspend_resume_elems - suspend or resume hw nodes
- * @hw: pointer to the hw struct
+ * ice_sched_suspend_resume_elems - suspend or resume HW nodes
+ * @hw: pointer to the HW struct
* @num_nodes: number of nodes
* @node_teids: array of node teids to be suspended or resumed
* @suspend: true means suspend / false means resume
*
- * This function suspends or resumes hw nodes
+ * This function suspends or resumes HW nodes
*/
static enum ice_status
ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
@@ -561,10 +533,54 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
}
/**
- * ice_sched_clear_agg - clears the agg related information
+ * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC
+ * @hw: pointer to the HW struct
+ * @vsi_handle: VSI handle
+ * @tc: TC number
+ * @new_numqs: number of queues
+ */
+static enum ice_status
+ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+ struct ice_q_ctx *q_ctx;
+
+ vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi_ctx)
+ return ICE_ERR_PARAM;
+ /* allocate LAN queue contexts */
+ if (!vsi_ctx->lan_q_ctx[tc]) {
+ vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
+ new_numqs,
+ sizeof(*q_ctx),
+ GFP_KERNEL);
+ if (!vsi_ctx->lan_q_ctx[tc])
+ return ICE_ERR_NO_MEMORY;
+ vsi_ctx->num_lan_q_entries[tc] = new_numqs;
+ return 0;
+ }
+ /* num queues are increased, update the queue contexts */
+ if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
+ u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
+
+ q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
+ sizeof(*q_ctx), GFP_KERNEL);
+ if (!q_ctx)
+ return ICE_ERR_NO_MEMORY;
+ memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
+ prev_num * sizeof(*q_ctx));
+ devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
+ vsi_ctx->lan_q_ctx[tc] = q_ctx;
+ vsi_ctx->num_lan_q_entries[tc] = new_numqs;
+ }
+ return 0;
+}
+
+/**
+ * ice_sched_clear_agg - clears the aggregator related information
* @hw: pointer to the hardware structure
*
- * This function removes agg list and free up agg related memory
+ * This function removes aggregator list and free up aggregator related memory
* previously allocated.
*/
void ice_sched_clear_agg(struct ice_hw *hw)
@@ -622,7 +638,7 @@ void ice_sched_clear_port(struct ice_port_info *pi)
/**
* ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Cleanup scheduling elements from SW DB for all the ports
*/
@@ -646,16 +662,16 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
}
/**
- * ice_sched_add_elems - add nodes to hw and SW DB
+ * ice_sched_add_elems - add nodes to HW and SW DB
* @pi: port information structure
* @tc_node: pointer to the branch node
* @parent: pointer to the parent node
* @layer: layer number to add nodes
* @num_nodes: number of nodes
* @num_nodes_added: pointer to num nodes added
- * @first_node_teid: if new nodes are added then return the teid of first node
+ * @first_node_teid: if new nodes are added then return the TEID of first node
*
- * This function add nodes to hw as well as to SW DB for a given layer
+ * This function add nodes to HW as well as to SW DB for a given layer
*/
static enum ice_status
ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
@@ -697,7 +713,8 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
&num_groups_added, NULL);
if (status || num_groups_added != 1) {
- ice_debug(hw, ICE_DBG_SCHED, "add elements failed\n");
+ ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
+ hw->adminq.sq_last_status);
devm_kfree(ice_hw_to_dev(hw), buf);
return ICE_ERR_CFG;
}
@@ -748,7 +765,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
* @parent: pointer to parent node
* @layer: layer number to add nodes
* @num_nodes: number of nodes to be added
- * @first_node_teid: pointer to the first node teid
+ * @first_node_teid: pointer to the first node TEID
* @num_nodes_added: pointer to number of nodes added
*
* This function add nodes to a given layer.
@@ -800,7 +817,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
*num_nodes_added += num_added;
}
- /* Don't modify the first node teid memory if the first node was
+ /* Don't modify the first node TEID memory if the first node was
* added already in the above call. Instead send some temp
* memory for all other recursive calls.
*/
@@ -832,7 +849,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
/**
* ice_sched_get_qgrp_layer - get the current queue group layer number
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* This function returns the current queue group layer number
*/
@@ -844,7 +861,7 @@ static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
/**
* ice_sched_get_vsi_layer - get the current VSI layer number
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* This function returns the current VSI layer number
*/
@@ -855,7 +872,7 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
* 7 4
* 5 or less sw_entry_point_layer
*/
- /* calculate the vsi layer based on number of layers. */
+ /* calculate the VSI layer based on number of layers. */
if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
@@ -973,7 +990,7 @@ enum ice_status ice_sched_init_port(struct ice_port_info *pi)
goto err_init_port;
}
- /* If the last node is a leaf node then the index of the Q group
+ /* If the last node is a leaf node then the index of the queue group
* layer is two less than the number of elements.
*/
if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
@@ -1082,7 +1099,7 @@ sched_query_out:
/**
* ice_sched_find_node_in_subtree - Find node in part of base node subtree
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @base: pointer to the base node
* @node: pointer to the node to search
*
@@ -1114,13 +1131,13 @@ ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
}
/**
- * ice_sched_get_free_qparent - Get a free lan or rdma q group node
+ * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc: branch number
- * @owner: lan or rdma
+ * @owner: LAN or RDMA
*
- * This function retrieves a free lan or rdma q group node
+ * This function retrieves a free LAN or RDMA queue group node
*/
struct ice_sched_node *
ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
@@ -1138,11 +1155,11 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
if (!vsi_ctx)
return NULL;
vsi_node = vsi_ctx->sched.vsi_node[tc];
- /* validate invalid VSI id */
+ /* validate invalid VSI ID */
if (!vsi_node)
goto lan_q_exit;
- /* get the first q group node from VSI sub-tree */
+ /* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi->hw, vsi_node, qgrp_layer);
while (qgrp_node) {
/* make sure the qgroup node is part of the VSI subtree */
@@ -1158,12 +1175,12 @@ lan_q_exit:
}
/**
- * ice_sched_get_vsi_node - Get a VSI node based on VSI id
- * @hw: pointer to the hw struct
+ * ice_sched_get_vsi_node - Get a VSI node based on VSI ID
+ * @hw: pointer to the HW struct
* @tc_node: pointer to the TC node
* @vsi_handle: software VSI handle
*
- * This function retrieves a VSI node for a given VSI id from a given
+ * This function retrieves a VSI node for a given VSI ID from a given
* TC branch
*/
static struct ice_sched_node *
@@ -1188,7 +1205,7 @@ ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
/**
* ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @num_qs: number of queues
* @num_nodes: num nodes array
*
@@ -1204,7 +1221,7 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
qgl = ice_sched_get_qgrp_layer(hw);
vsil = ice_sched_get_vsi_layer(hw);
- /* calculate num nodes from q group to VSI layer */
+ /* calculate num nodes from queue group to VSI layer */
for (i = qgl; i > vsil; i--) {
/* round to the next integer if there is a remainder */
num = DIV_ROUND_UP(num, hw->max_children[i]);
@@ -1220,10 +1237,10 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
* @vsi_handle: software VSI handle
* @tc_node: pointer to the TC node
* @num_nodes: pointer to the num nodes that needs to be added per layer
- * @owner: node owner (lan or rdma)
+ * @owner: node owner (LAN or RDMA)
*
* This function adds the VSI child nodes to tree. It gets called for
- * lan and rdma separately.
+ * LAN and RDMA separately.
*/
static enum ice_status
ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
@@ -1271,44 +1288,8 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
}
/**
- * ice_sched_rm_vsi_child_nodes - remove VSI child nodes from the tree
- * @pi: port information structure
- * @vsi_node: pointer to the VSI node
- * @num_nodes: pointer to the num nodes that needs to be removed per layer
- * @owner: node owner (lan or rdma)
- *
- * This function removes the VSI child nodes from the tree. It gets called for
- * lan and rdma separately.
- */
-static void
-ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi,
- struct ice_sched_node *vsi_node, u16 *num_nodes,
- u8 owner)
-{
- struct ice_sched_node *node, *next;
- u8 i, qgl, vsil;
- u16 num;
-
- qgl = ice_sched_get_qgrp_layer(pi->hw);
- vsil = ice_sched_get_vsi_layer(pi->hw);
-
- for (i = qgl; i > vsil; i--) {
- num = num_nodes[i];
- node = ice_sched_get_first_node(pi->hw, vsi_node, i);
- while (node && num) {
- next = node->sibling;
- if (node->owner == owner && !node->num_children) {
- ice_free_sched_node(pi, node);
- num--;
- }
- node = next;
- }
- }
-}
-
-/**
* ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @tc_node: pointer to TC node
* @num_nodes: pointer to num nodes array
*
@@ -1427,7 +1408,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
/* calculate number of supported nodes needed for this VSI */
ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
- /* add vsi supported nodes to tc subtree */
+ /* add VSI supported nodes to TC subtree */
return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
num_nodes);
}
@@ -1446,7 +1427,6 @@ static enum ice_status
ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
u8 tc, u16 new_numqs, u8 owner)
{
- u16 prev_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
struct ice_sched_node *vsi_node;
struct ice_sched_node *tc_node;
@@ -1454,7 +1434,6 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
u16 prev_numqs;
- u8 i;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
@@ -1468,41 +1447,30 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
if (!vsi_ctx)
return ICE_ERR_PARAM;
- if (owner == ICE_SCHED_NODE_OWNER_LAN)
- prev_numqs = vsi_ctx->sched.max_lanq[tc];
- else
- return ICE_ERR_PARAM;
-
- /* num queues are not changed */
- if (prev_numqs == new_numqs)
+ prev_numqs = vsi_ctx->sched.max_lanq[tc];
+ /* num queues are not changed or less than the previous number */
+ if (new_numqs <= prev_numqs)
+ return status;
+ status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
+ if (status)
return status;
-
- /* calculate number of nodes based on prev/new number of qs */
- if (prev_numqs)
- ice_sched_calc_vsi_child_nodes(hw, prev_numqs, prev_num_nodes);
if (new_numqs)
ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
-
- if (prev_numqs > new_numqs) {
- for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
- new_num_nodes[i] = prev_num_nodes[i] - new_num_nodes[i];
-
- ice_sched_rm_vsi_child_nodes(pi, vsi_node, new_num_nodes,
- owner);
- } else {
- for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
- new_num_nodes[i] -= prev_num_nodes[i];
-
- status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
- new_num_nodes, owner);
- if (status)
- return status;
- }
-
+ /* Keep the max number of queue configuration all the time. Update the
+ * tree only if number of queues > previous number of queues. This may
+ * leave some extra nodes in the tree if number of queues < previous
+ * number but that wouldn't harm anything. Removing those extra nodes
+ * may complicate the code if those nodes are part of SRL or
+ * individually rate limited.
+ */
+ status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
+ new_num_nodes, owner);
+ if (status)
+ return status;
vsi_ctx->sched.max_lanq[tc] = new_numqs;
- return status;
+ return 0;
}
/**
@@ -1511,7 +1479,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
* @vsi_handle: software VSI handle
* @tc: TC number
* @maxqs: max number of queues
- * @owner: lan or rdma
+ * @owner: LAN or RDMA
* @enable: TC enabled or disabled
*
* This function adds/updates VSI nodes based on the number of queues. If TC is
@@ -1527,6 +1495,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
+ ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
return ICE_ERR_PARAM;
@@ -1535,7 +1504,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
return ICE_ERR_PARAM;
vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
- /* suspend the VSI if tc is not enabled */
+ /* suspend the VSI if TC is not enabled */
if (!enable) {
if (vsi_node && vsi_node->in_use) {
u32 teid = le32_to_cpu(vsi_node->info.node_teid);
@@ -1586,7 +1555,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
}
/**
- * ice_sched_rm_agg_vsi_entry - remove agg related VSI info entry
+ * ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry
* @pi: port information structure
* @vsi_handle: software VSI handle
*
@@ -1646,8 +1615,9 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
{
enum ice_status status = ICE_ERR_PARAM;
struct ice_vsi_ctx *vsi_ctx;
- u8 i, j = 0;
+ u8 i;
+ ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
return status;
mutex_lock(&pi->sched_lock);
@@ -1655,8 +1625,9 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
if (!vsi_ctx)
goto exit_sched_rm_vsi_cfg;
- for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ ice_for_each_traffic_class(i) {
struct ice_sched_node *vsi_node, *tc_node;
+ u8 j = 0;
tc_node = ice_sched_get_tc_node(pi, i);
if (!tc_node)
@@ -1689,7 +1660,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
ice_free_sched_node(pi, vsi_node);
vsi_ctx->sched.vsi_node[i] = NULL;
- /* clean up agg related vsi info if any */
+ /* clean up aggregator related VSI info if any */
ice_sched_rm_agg_vsi_info(pi, vsi_handle);
}
if (owner == ICE_SCHED_NODE_OWNER_LAN)
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index bee8221ad146..3902a8ad3025 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -24,6 +24,10 @@ struct ice_sched_agg_info {
};
/* FW AQ command calls */
+enum ice_status
+ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
+ struct ice_aqc_get_elem *buf, u16 buf_size,
+ u16 *elems_ret, struct ice_sq_cd *cd);
enum ice_status ice_sched_init_port(struct ice_port_info *pi);
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
void ice_sched_clear_port(struct ice_port_info *pi);
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
index 683f48824a29..17afe6acb18a 100644
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ b/drivers/net/ethernet/intel/ice/ice_status.h
@@ -12,6 +12,7 @@ enum ice_status {
ICE_ERR_PARAM = -1,
ICE_ERR_NOT_IMPL = -2,
ICE_ERR_NOT_READY = -3,
+ ICE_ERR_NOT_SUPPORTED = -4,
ICE_ERR_BAD_PTR = -5,
ICE_ERR_INVAL_SIZE = -6,
ICE_ERR_DEVICE_NOT_SUPPORTED = -8,
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 09d1c314b68f..9f1f595ae7e6 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -19,7 +19,7 @@
* byte 6 = 0x2: to identify it as locally administered SA MAC
* byte 12 = 0x81 & byte 13 = 0x00:
* In case of VLAN filter first two bytes defines ether type (0x8100)
- * and remaining two bytes are placeholder for programming a given VLAN id
+ * and remaining two bytes are placeholder for programming a given VLAN ID
* In case of Ether type filter it is treated as header without VLAN tag
* and byte 12 and 13 is used to program a given Ether type instead
*/
@@ -51,7 +51,7 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
/**
* ice_aq_alloc_free_res - command to allocate/free resources
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @num_entries: number of resource entries in buffer
* @buf: Indirect buffer to hold data parameters and response
* @buf_size: size of buffer for indirect commands
@@ -87,7 +87,7 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
/**
* ice_init_def_sw_recp - initialize the recipe book keeping tables
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Allocate memory for the entire recipe table and initialize the structures/
* entries corresponding to basic recipes.
@@ -163,7 +163,7 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
/**
* ice_aq_add_vsi
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_ctx: pointer to a VSI context struct
* @cd: pointer to command details structure or NULL
*
@@ -206,7 +206,7 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
/**
* ice_aq_free_vsi
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_ctx: pointer to a VSI context struct
* @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
* @cd: pointer to command details structure or NULL
@@ -242,7 +242,7 @@ ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
/**
* ice_aq_update_vsi
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_ctx: pointer to a VSI context struct
* @cd: pointer to command details structure or NULL
*
@@ -279,7 +279,7 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
/**
* ice_is_vsi_valid - check whether the VSI is valid or not
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: VSI handle
*
* check whether the VSI is valid or not
@@ -290,11 +290,11 @@ bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
}
/**
- * ice_get_hw_vsi_num - return the hw VSI number
- * @hw: pointer to the hw struct
+ * ice_get_hw_vsi_num - return the HW VSI number
+ * @hw: pointer to the HW struct
* @vsi_handle: VSI handle
*
- * return the hw VSI number
+ * return the HW VSI number
* Caution: call this function only if VSI is valid (ice_is_vsi_valid)
*/
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
@@ -304,7 +304,7 @@ u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
/**
* ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: VSI handle
*
* return the VSI context entry for a given VSI handle
@@ -316,21 +316,42 @@ struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
/**
* ice_save_vsi_ctx - save the VSI context for a given VSI handle
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: VSI handle
* @vsi: VSI context pointer
*
* save the VSI context entry for a given VSI handle
*/
-static void ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle,
- struct ice_vsi_ctx *vsi)
+static void
+ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
{
hw->vsi_ctx[vsi_handle] = vsi;
}
/**
+ * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
+ * @hw: pointer to the HW struct
+ * @vsi_handle: VSI handle
+ */
+static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
+{
+ struct ice_vsi_ctx *vsi;
+ u8 i;
+
+ vsi = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi)
+ return;
+ ice_for_each_traffic_class(i) {
+ if (vsi->lan_q_ctx[i]) {
+ devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
+ vsi->lan_q_ctx[i] = NULL;
+ }
+ }
+}
+
+/**
* ice_clear_vsi_ctx - clear the VSI context entry
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: VSI handle
*
* clear the VSI context entry
@@ -341,6 +362,7 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
vsi = ice_get_vsi_ctx(hw, vsi_handle);
if (vsi) {
+ ice_clear_vsi_q_ctx(hw, vsi_handle);
devm_kfree(ice_hw_to_dev(hw), vsi);
hw->vsi_ctx[vsi_handle] = NULL;
}
@@ -348,7 +370,7 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
/**
* ice_clear_all_vsi_ctx - clear all the VSI context entries
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*/
void ice_clear_all_vsi_ctx(struct ice_hw *hw)
{
@@ -360,7 +382,7 @@ void ice_clear_all_vsi_ctx(struct ice_hw *hw)
/**
* ice_add_vsi - add VSI context to the hardware and VSI handle list
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: unique VSI handle provided by drivers
* @vsi_ctx: pointer to a VSI context struct
* @cd: pointer to command details structure or NULL
@@ -383,7 +405,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
return status;
tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!tmp_vsi_ctx) {
- /* Create a new vsi context */
+ /* Create a new VSI context */
tmp_vsi_ctx = devm_kzalloc(ice_hw_to_dev(hw),
sizeof(*tmp_vsi_ctx), GFP_KERNEL);
if (!tmp_vsi_ctx) {
@@ -398,12 +420,12 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
}
- return status;
+ return 0;
}
/**
* ice_free_vsi- free VSI context from hardware and VSI handle list
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: unique VSI handle
* @vsi_ctx: pointer to a VSI context struct
* @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
@@ -428,7 +450,7 @@ ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
/**
* ice_update_vsi
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle: unique VSI handle
* @vsi_ctx: pointer to a VSI context struct
* @cd: pointer to command details structure or NULL
@@ -447,8 +469,8 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
/**
* ice_aq_alloc_free_vsi_list
- * @hw: pointer to the hw struct
- * @vsi_list_id: VSI list id returned or used for lookup
+ * @hw: pointer to the HW struct
+ * @vsi_list_id: VSI list ID returned or used for lookup
* @lkup_type: switch rule filter lookup type
* @opc: switch rules population command type - pass in the command opcode
*
@@ -504,7 +526,7 @@ ice_aq_alloc_free_vsi_list_exit:
/**
* ice_aq_sw_rules - add/update/remove switch rules
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @rule_list: pointer to switch rule population list
* @rule_list_sz: total size of the rule list in bytes
* @num_rules: number of switch rules in the rule_list
@@ -643,21 +665,43 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
fi->fltr_act == ICE_FWD_TO_Q ||
fi->fltr_act == ICE_FWD_TO_QGRP)) {
- fi->lb_en = true;
- /* Do not set lan_en to TRUE if
+ /* Setting LB for prune actions will result in replicated
+ * packets to the internal switch that will be dropped.
+ */
+ if (fi->lkup_type != ICE_SW_LKUP_VLAN)
+ fi->lb_en = true;
+
+ /* Set lan_en to TRUE if
* 1. The switch is a VEB AND
* 2
- * 2.1 The lookup is MAC with unicast addr for MAC, OR
- * 2.2 The lookup is MAC_VLAN with unicast addr for MAC
+ * 2.1 The lookup is a directional lookup like ethertype,
+ * promiscuous, ethertype-MAC, promiscuous-VLAN
+ * and default-port OR
+ * 2.2 The lookup is VLAN, OR
+ * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
+ * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
+ *
+ * OR
+ *
+ * The switch is a VEPA.
*
- * In all other cases, the LAN enable has to be set to true.
+ * In all other cases, the LAN enable has to be set to false.
*/
- if (!(hw->evb_veb &&
- ((fi->lkup_type == ICE_SW_LKUP_MAC &&
- is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
- (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
- is_unicast_ether_addr(fi->l_data.mac_vlan.mac_addr)))))
+ if (hw->evb_veb) {
+ if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
+ fi->lkup_type == ICE_SW_LKUP_PROMISC ||
+ fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
+ fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+ fi->lkup_type == ICE_SW_LKUP_DFLT ||
+ fi->lkup_type == ICE_SW_LKUP_VLAN ||
+ (fi->lkup_type == ICE_SW_LKUP_MAC &&
+ !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
+ (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
+ !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
+ fi->lan_en = true;
+ } else {
fi->lan_en = true;
+ }
}
}
@@ -799,7 +843,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
* @hw: pointer to the hardware structure
* @m_ent: the management entry for which sw marker needs to be added
* @sw_marker: sw marker to tag the Rx descriptor with
- * @l_id: large action resource id
+ * @l_id: large action resource ID
*
* Create a large action to hold software marker and update the switch rule
* entry pointed by m_ent with newly created large action
@@ -811,8 +855,8 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
/* For software marker we need 3 large actions
* 1. FWD action: FWD TO VSI or VSI LIST
- * 2. GENERIC VALUE action to hold the profile id
- * 3. GENERIC VALUE action to hold the software marker id
+ * 2. GENERIC VALUE action to hold the profile ID
+ * 3. GENERIC VALUE action to hold the software marker ID
*/
const u16 num_lg_acts = 3;
enum ice_status status;
@@ -875,13 +919,13 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
ice_aqc_opc_update_sw_rules);
- /* Update the action to point to the large action id */
+ /* Update the action to point to the large action ID */
rx_tx->pdata.lkup_tx_rx.act =
cpu_to_le32(ICE_SINGLE_ACT_PTR |
((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
ICE_SINGLE_ACT_PTR_VAL_M));
- /* Use the filter rule id of the previously created rule with single
+ /* Use the filter rule ID of the previously created rule with single
* act. Once the update happens, hardware will treat this as large
* action
*/
@@ -904,10 +948,10 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
* @hw: pointer to the hardware structure
* @vsi_handle_arr: array of VSI handles to set in the VSI mapping
* @num_vsi: number of VSI handles in the array
- * @vsi_list_id: VSI list id generated as part of allocate resource
+ * @vsi_list_id: VSI list ID generated as part of allocate resource
*
- * Helper function to create a new entry of VSI list id to VSI mapping
- * using the given VSI list id
+ * Helper function to create a new entry of VSI list ID to VSI mapping
+ * using the given VSI list ID
*/
static struct ice_vsi_list_map_info *
ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
@@ -935,13 +979,13 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
* @hw: pointer to the hardware structure
* @vsi_handle_arr: array of VSI handles to form a VSI list
* @num_vsi: number of VSI handles in the array
- * @vsi_list_id: VSI list id generated as part of allocate resource
+ * @vsi_list_id: VSI list ID generated as part of allocate resource
* @remove: Boolean value to indicate if this is a remove action
* @opc: switch rules population command type - pass in the command opcode
* @lkup_type: lookup type of the filter
*
* Call AQ command to add a new switch rule or update existing switch rule
- * using the given VSI list id
+ * using the given VSI list ID
*/
static enum ice_status
ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
@@ -998,7 +1042,7 @@ exit:
/**
* ice_create_vsi_list_rule - Creates and populates a VSI list rule
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
* @vsi_handle_arr: array of VSI handles to form a VSI list
* @num_vsi: number of VSI handles in the array
* @vsi_list_id: stores the ID of the VSI list to be created
@@ -1092,7 +1136,7 @@ ice_create_pkt_fwd_rule_exit:
* @f_info: filter information for switch rule
*
* Call AQ command to update a previously created switch rule with a
- * VSI list id
+ * VSI list ID
*/
static enum ice_status
ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
@@ -1119,7 +1163,7 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
/**
* ice_update_sw_rule_bridge_mode
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Updates unicast switch filter rules based on VEB/VEPA mode
*/
@@ -1174,7 +1218,7 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
* Allocate a new VSI list and add two VSIs
* to this list using switch rule command
* Update the previously created switch rule with the
- * newly created VSI list id
+ * newly created VSI list ID
* if a VSI list was previously created
* Add the new VSI to the previously created VSI list set
* using the update switch rule command
@@ -1255,7 +1299,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
return 0;
/* Update the previously created VSI list set with
- * the new VSI id passed in
+ * the new VSI ID passed in
*/
vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
opcode = ice_aqc_opc_update_sw_rules;
@@ -1263,7 +1307,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
vsi_list_id, false, opcode,
new_fltr->lkup_type);
- /* update VSI list mapping info with new VSI id */
+ /* update VSI list mapping info with new VSI ID */
if (!status)
set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
}
@@ -1305,7 +1349,7 @@ ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
* @hw: pointer to the hardware structure
* @recp_id: lookup type for which VSI lists needs to be searched
* @vsi_handle: VSI handle to be found in VSI list
- * @vsi_list_id: VSI list id found containing vsi_handle
+ * @vsi_list_id: VSI list ID found containing vsi_handle
*
* Helper function to search a VSI list with single entry containing given VSI
* handle element. This can be extended further to search VSI list with more
@@ -1336,7 +1380,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
/**
* ice_add_rule_internal - add rule for a given lookup type
* @hw: pointer to the hardware structure
- * @recp_id: lookup type (recipe id) for which rule has to be added
+ * @recp_id: lookup type (recipe ID) for which rule has to be added
* @f_entry: structure containing MAC forwarding information
*
* Adds or updates the rule lists for a given recipe
@@ -1381,7 +1425,7 @@ ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
/**
* ice_remove_vsi_list_rule
* @hw: pointer to the hardware structure
- * @vsi_list_id: VSI list id generated as part of allocate resource
+ * @vsi_list_id: VSI list ID generated as part of allocate resource
* @lkup_type: switch rule filter lookup type
*
* The VSI list should be emptied before this function is called to remove the
@@ -1506,7 +1550,7 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
/**
* ice_remove_rule_internal - Remove a filter rule of a given type
* @hw: pointer to the hardware structure
- * @recp_id: recipe id for which the rule needs to removed
+ * @recp_id: recipe ID for which the rule needs to removed
* @f_entry: rule entry containing filter information
*/
static enum ice_status
@@ -1556,7 +1600,7 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
if (status)
goto exit;
- /* if vsi count goes to zero after updating the vsi list */
+ /* if VSI count goes to zero after updating the VSI list */
if (list_elem->vsi_count == 0)
remove_rule = true;
}
@@ -1634,7 +1678,7 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
return ICE_ERR_PARAM;
hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
- /* update the src in case it is vsi num */
+ /* update the src in case it is VSI num */
if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
return ICE_ERR_PARAM;
m_list_itr->fltr_info.src = hw_vsi_id;
@@ -1710,7 +1754,7 @@ ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
((u8 *)r_iter + (elem_sent * s_rule_size));
}
- /* Fill up rule id based on the value returned from FW */
+ /* Fill up rule ID based on the value returned from FW */
r_iter = s_rule;
list_for_each_entry(m_list_itr, m_list, list_entry) {
struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
@@ -1770,7 +1814,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
new_fltr = &f_entry->fltr_info;
- /* VLAN id should only be 12 bits */
+ /* VLAN ID should only be 12 bits */
if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
return ICE_ERR_PARAM;
@@ -1828,7 +1872,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
}
}
} else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
- /* Update existing VSI list to add new VSI id only if it used
+ /* Update existing VSI list to add new VSI ID only if it used
* by one VLAN rule.
*/
cur_fltr = &v_list_itr->fltr_info;
@@ -1838,7 +1882,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
/* If VLAN rule exists and VSI list being used by this rule is
* referenced by more than 1 VLAN rule. Then create a new VSI
* list appending previous VSI with new VSI and update existing
- * VLAN rule to point to new VSI list id
+ * VLAN rule to point to new VSI list ID
*/
struct ice_fltr_info tmp_fltr;
u16 vsi_handle_arr[2];
@@ -1926,6 +1970,65 @@ ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
}
/**
+ * ice_add_eth_mac - Add ethertype and MAC based filter rule
+ * @hw: pointer to the hardware structure
+ * @em_list: list of ether type MAC filter, MAC is optional
+ */
+enum ice_status
+ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
+{
+ struct ice_fltr_list_entry *em_list_itr;
+
+ if (!em_list || !hw)
+ return ICE_ERR_PARAM;
+
+ list_for_each_entry(em_list_itr, em_list, list_entry) {
+ enum ice_sw_lkup_type l_type =
+ em_list_itr->fltr_info.lkup_type;
+
+ if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
+ l_type != ICE_SW_LKUP_ETHERTYPE)
+ return ICE_ERR_PARAM;
+
+ em_list_itr->fltr_info.flag = ICE_FLTR_TX;
+ em_list_itr->status = ice_add_rule_internal(hw, l_type,
+ em_list_itr);
+ if (em_list_itr->status)
+ return em_list_itr->status;
+ }
+ return 0;
+}
+
+/**
+ * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
+ * @hw: pointer to the hardware structure
+ * @em_list: list of ethertype or ethertype MAC entries
+ */
+enum ice_status
+ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list)
+{
+ struct ice_fltr_list_entry *em_list_itr, *tmp;
+
+ if (!em_list || !hw)
+ return ICE_ERR_PARAM;
+
+ list_for_each_entry_safe(em_list_itr, tmp, em_list, list_entry) {
+ enum ice_sw_lkup_type l_type =
+ em_list_itr->fltr_info.lkup_type;
+
+ if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
+ l_type != ICE_SW_LKUP_ETHERTYPE)
+ return ICE_ERR_PARAM;
+
+ em_list_itr->status = ice_remove_rule_internal(hw, l_type,
+ em_list_itr);
+ if (em_list_itr->status)
+ return em_list_itr->status;
+ }
+ return 0;
+}
+
+/**
* ice_rem_sw_rule_info
* @hw: pointer to the hardware structure
* @rule_head: pointer to the switch list structure that we want to delete
@@ -2170,7 +2273,7 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
struct ice_fltr_mgmt_list_entry *fm_entry;
enum ice_status status = 0;
- /* check to make sure VSI id is valid and within boundary */
+ /* check to make sure VSI ID is valid and within boundary */
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
@@ -2190,6 +2293,291 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
}
/**
+ * ice_determine_promisc_mask
+ * @fi: filter info to parse
+ *
+ * Helper function to determine which ICE_PROMISC_ mask corresponds
+ * to given filter into.
+ */
+static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
+{
+ u16 vid = fi->l_data.mac_vlan.vlan_id;
+ u8 *macaddr = fi->l_data.mac.mac_addr;
+ bool is_tx_fltr = false;
+ u8 promisc_mask = 0;
+
+ if (fi->flag == ICE_FLTR_TX)
+ is_tx_fltr = true;
+
+ if (is_broadcast_ether_addr(macaddr))
+ promisc_mask |= is_tx_fltr ?
+ ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
+ else if (is_multicast_ether_addr(macaddr))
+ promisc_mask |= is_tx_fltr ?
+ ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
+ else if (is_unicast_ether_addr(macaddr))
+ promisc_mask |= is_tx_fltr ?
+ ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
+ if (vid)
+ promisc_mask |= is_tx_fltr ?
+ ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
+
+ return promisc_mask;
+}
+
+/**
+ * ice_remove_promisc - Remove promisc based filter rules
+ * @hw: pointer to the hardware structure
+ * @recp_id: recipe ID for which the rule needs to removed
+ * @v_list: list of promisc entries
+ */
+static enum ice_status
+ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
+ struct list_head *v_list)
+{
+ struct ice_fltr_list_entry *v_list_itr, *tmp;
+
+ list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
+ v_list_itr->status =
+ ice_remove_rule_internal(hw, recp_id, v_list_itr);
+ if (v_list_itr->status)
+ return v_list_itr->status;
+ }
+ return 0;
+}
+
+/**
+ * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to clear mode
+ * @promisc_mask: mask of promiscuous config bits to clear
+ * @vid: VLAN ID to clear VLAN promiscuous
+ */
+enum ice_status
+ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_list_entry *fm_entry, *tmp;
+ struct list_head remove_list_head;
+ struct ice_fltr_mgmt_list_entry *itr;
+ struct list_head *rule_head;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ enum ice_status status = 0;
+ u8 recipe_id;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ if (vid)
+ recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
+ else
+ recipe_id = ICE_SW_LKUP_PROMISC;
+
+ rule_head = &sw->recp_list[recipe_id].filt_rules;
+ rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
+
+ INIT_LIST_HEAD(&remove_list_head);
+
+ mutex_lock(rule_lock);
+ list_for_each_entry(itr, rule_head, list_entry) {
+ u8 fltr_promisc_mask = 0;
+
+ if (!ice_vsi_uses_fltr(itr, vsi_handle))
+ continue;
+
+ fltr_promisc_mask |=
+ ice_determine_promisc_mask(&itr->fltr_info);
+
+ /* Skip if filter is not completely specified by given mask */
+ if (fltr_promisc_mask & ~promisc_mask)
+ continue;
+
+ status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
+ &remove_list_head,
+ &itr->fltr_info);
+ if (status) {
+ mutex_unlock(rule_lock);
+ goto free_fltr_list;
+ }
+ }
+ mutex_unlock(rule_lock);
+
+ status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
+
+free_fltr_list:
+ list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
+ list_del(&fm_entry->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), fm_entry);
+ }
+
+ return status;
+}
+
+/**
+ * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to configure
+ * @promisc_mask: mask of promiscuous config bits
+ * @vid: VLAN ID to set VLAN promiscuous
+ */
+enum ice_status
+ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
+{
+ enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
+ struct ice_fltr_list_entry f_list_entry;
+ struct ice_fltr_info new_fltr;
+ enum ice_status status = 0;
+ bool is_tx_fltr;
+ u16 hw_vsi_id;
+ int pkt_type;
+ u8 recipe_id;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+
+ memset(&new_fltr, 0, sizeof(new_fltr));
+
+ if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
+ new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
+ new_fltr.l_data.mac_vlan.vlan_id = vid;
+ recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
+ } else {
+ new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
+ recipe_id = ICE_SW_LKUP_PROMISC;
+ }
+
+ /* Separate filters must be set for each direction/packet type
+ * combination, so we will loop over the mask value, store the
+ * individual type, and clear it out in the input mask as it
+ * is found.
+ */
+ while (promisc_mask) {
+ u8 *mac_addr;
+
+ pkt_type = 0;
+ is_tx_fltr = false;
+
+ if (promisc_mask & ICE_PROMISC_UCAST_RX) {
+ promisc_mask &= ~ICE_PROMISC_UCAST_RX;
+ pkt_type = UCAST_FLTR;
+ } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
+ promisc_mask &= ~ICE_PROMISC_UCAST_TX;
+ pkt_type = UCAST_FLTR;
+ is_tx_fltr = true;
+ } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
+ promisc_mask &= ~ICE_PROMISC_MCAST_RX;
+ pkt_type = MCAST_FLTR;
+ } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
+ promisc_mask &= ~ICE_PROMISC_MCAST_TX;
+ pkt_type = MCAST_FLTR;
+ is_tx_fltr = true;
+ } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
+ promisc_mask &= ~ICE_PROMISC_BCAST_RX;
+ pkt_type = BCAST_FLTR;
+ } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
+ promisc_mask &= ~ICE_PROMISC_BCAST_TX;
+ pkt_type = BCAST_FLTR;
+ is_tx_fltr = true;
+ }
+
+ /* Check for VLAN promiscuous flag */
+ if (promisc_mask & ICE_PROMISC_VLAN_RX) {
+ promisc_mask &= ~ICE_PROMISC_VLAN_RX;
+ } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
+ promisc_mask &= ~ICE_PROMISC_VLAN_TX;
+ is_tx_fltr = true;
+ }
+
+ /* Set filter DA based on packet type */
+ mac_addr = new_fltr.l_data.mac.mac_addr;
+ if (pkt_type == BCAST_FLTR) {
+ eth_broadcast_addr(mac_addr);
+ } else if (pkt_type == MCAST_FLTR ||
+ pkt_type == UCAST_FLTR) {
+ /* Use the dummy ether header DA */
+ ether_addr_copy(mac_addr, dummy_eth_header);
+ if (pkt_type == MCAST_FLTR)
+ mac_addr[0] |= 0x1; /* Set multicast bit */
+ }
+
+ /* Need to reset this to zero for all iterations */
+ new_fltr.flag = 0;
+ if (is_tx_fltr) {
+ new_fltr.flag |= ICE_FLTR_TX;
+ new_fltr.src = hw_vsi_id;
+ } else {
+ new_fltr.flag |= ICE_FLTR_RX;
+ new_fltr.src = hw->port_info->lport;
+ }
+
+ new_fltr.fltr_act = ICE_FWD_TO_VSI;
+ new_fltr.vsi_handle = vsi_handle;
+ new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
+ f_list_entry.fltr_info = new_fltr;
+
+ status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
+ if (status)
+ goto set_promisc_exit;
+ }
+
+set_promisc_exit:
+ return status;
+}
+
+/**
+ * ice_set_vlan_vsi_promisc
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to configure
+ * @promisc_mask: mask of promiscuous config bits
+ * @rm_vlan_promisc: Clear VLANs VSI promisc mode
+ *
+ * Configure VSI with all associated VLANs to given promiscuous mode(s)
+ */
+enum ice_status
+ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ bool rm_vlan_promisc)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_list_entry *list_itr, *tmp;
+ struct list_head vsi_list_head;
+ struct list_head *vlan_head;
+ struct mutex *vlan_lock; /* Lock to protect filter rule list */
+ enum ice_status status;
+ u16 vlan_id;
+
+ INIT_LIST_HEAD(&vsi_list_head);
+ vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
+ vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
+ mutex_lock(vlan_lock);
+ status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
+ &vsi_list_head);
+ mutex_unlock(vlan_lock);
+ if (status)
+ goto free_fltr_list;
+
+ list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
+ vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
+ if (rm_vlan_promisc)
+ status = ice_clear_vsi_promisc(hw, vsi_handle,
+ promisc_mask, vlan_id);
+ else
+ status = ice_set_vsi_promisc(hw, vsi_handle,
+ promisc_mask, vlan_id);
+ if (status)
+ break;
+ }
+
+free_fltr_list:
+ list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
+ list_del(&list_itr->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), list_itr);
+ }
+ return status;
+}
+
+/**
* ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to remove filters from
@@ -2224,12 +2612,14 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
case ICE_SW_LKUP_VLAN:
ice_remove_vlan(hw, &remove_list_head);
break;
+ case ICE_SW_LKUP_PROMISC:
+ case ICE_SW_LKUP_PROMISC_VLAN:
+ ice_remove_promisc(hw, lkup, &remove_list_head);
+ break;
case ICE_SW_LKUP_MAC_VLAN:
case ICE_SW_LKUP_ETHERTYPE:
case ICE_SW_LKUP_ETHERTYPE_MAC:
- case ICE_SW_LKUP_PROMISC:
case ICE_SW_LKUP_DFLT:
- case ICE_SW_LKUP_PROMISC_VLAN:
case ICE_SW_LKUP_LAST:
default:
ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
@@ -2263,7 +2653,7 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
* ice_replay_vsi_fltr - Replay filters for requested VSI
* @hw: pointer to the hardware structure
* @vsi_handle: driver VSI handle
- * @recp_id: Recipe id for which rules need to be replayed
+ * @recp_id: Recipe ID for which rules need to be replayed
* @list_head: list for which filters need to be replayed
*
* Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
@@ -2287,7 +2677,7 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
f_entry.fltr_info = itr->fltr_info;
if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
itr->fltr_info.vsi_handle == vsi_handle) {
- /* update the src in case it is vsi num */
+ /* update the src in case it is VSI num */
if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
f_entry.fltr_info.src = hw_vsi_id;
status = ice_add_rule_internal(hw, recp_id, &f_entry);
@@ -2302,7 +2692,7 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
f_entry.fltr_info.vsi_handle = vsi_handle;
f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
- /* update the src in case it is vsi num */
+ /* update the src in case it is VSI num */
if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
f_entry.fltr_info.src = hw_vsi_id;
if (recp_id == ICE_SW_LKUP_VLAN)
@@ -2342,7 +2732,7 @@ enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
/**
* ice_rm_all_sw_replay_rule_info - deletes filter replay rules
- * @hw: pointer to the hw struct
+ * @hw: pointer to the HW struct
*
* Deletes the filter replay rules.
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index d5ef0bd58bf9..732b0b9b2e15 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -9,6 +9,13 @@
#define ICE_SW_CFG_MAX_BUF_LEN 2048
#define ICE_DFLT_VSI_INVAL 0xff
#define ICE_VSI_INVAL_ID 0xffff
+#define ICE_INVAL_Q_HANDLE 0xFFFF
+#define ICE_INVAL_Q_HANDLE 0xFFFF
+
+/* VSI queue context structure */
+struct ice_q_ctx {
+ u16 q_handle;
+};
/* VSI context structure for add/get/update/free operations */
struct ice_vsi_ctx {
@@ -20,6 +27,8 @@ struct ice_vsi_ctx {
struct ice_sched_vsi_info sched;
u8 alloc_from_pool;
u8 vf_num;
+ u16 num_lan_q_entries[ICE_MAX_TRAFFIC_CLASS];
+ struct ice_q_ctx *lan_q_ctx[ICE_MAX_TRAFFIC_CLASS];
};
enum ice_sw_fwd_act_type {
@@ -44,7 +53,7 @@ enum ice_sw_lkup_type {
ICE_SW_LKUP_LAST
};
-/* type of filter src id */
+/* type of filter src ID */
enum ice_src_id {
ICE_SRC_ID_UNKNOWN = 0,
ICE_SRC_ID_VSI,
@@ -95,8 +104,8 @@ struct ice_fltr_info {
/* Depending on filter action */
union {
- /* queue id in case of ICE_FWD_TO_Q and starting
- * queue id in case of ICE_FWD_TO_QGRP.
+ /* queue ID in case of ICE_FWD_TO_Q and starting
+ * queue ID in case of ICE_FWD_TO_QGRP.
*/
u16 q_id:11;
u16 hw_vsi_id:10;
@@ -143,7 +152,7 @@ struct ice_sw_recipe {
DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
};
-/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */
+/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list ID */
struct ice_vsi_list_map_info {
struct list_head list_entry;
DECLARE_BITMAP(vsi_map, ICE_MAX_VSI);
@@ -165,7 +174,7 @@ struct ice_fltr_list_entry {
* used for VLAN membership.
*/
struct ice_fltr_mgmt_list_entry {
- /* back pointer to VSI list id to VSI list mapping */
+ /* back pointer to VSI list ID to VSI list mapping */
struct ice_vsi_list_map_info *vsi_list_info;
u16 vsi_count;
#define ICE_INVAL_LG_ACT_INDEX 0xffff
@@ -178,6 +187,17 @@ struct ice_fltr_mgmt_list_entry {
u8 counter_index;
};
+enum ice_promisc_flags {
+ ICE_PROMISC_UCAST_RX = 0x1,
+ ICE_PROMISC_UCAST_TX = 0x2,
+ ICE_PROMISC_MCAST_RX = 0x4,
+ ICE_PROMISC_MCAST_TX = 0x8,
+ ICE_PROMISC_BCAST_RX = 0x10,
+ ICE_PROMISC_BCAST_TX = 0x20,
+ ICE_PROMISC_VLAN_RX = 0x40,
+ ICE_PROMISC_VLAN_TX = 0x80,
+};
+
/* VSI related commands */
enum ice_status
ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
@@ -198,11 +218,27 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
+enum ice_status
+ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list);
+enum ice_status
+ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
+enum ice_status
+ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
+
+/* Promisc/defport setup for VSIs */
enum ice_status
ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction);
+enum ice_status
+ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid);
+enum ice_status
+ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid);
+enum ice_status
+ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ bool rm_vlan_promisc);
enum ice_status ice_init_def_sw_recp(struct ice_hw *hw);
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 1af21bbe180e..2364eaf33d23 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -6,6 +6,7 @@
#include <linux/prefetch.h>
#include <linux/mm.h>
#include "ice.h"
+#include "ice_dcb_lib.h"
#define ICE_RX_HDR_SIZE 256
@@ -100,8 +101,8 @@ void ice_free_tx_ring(struct ice_ring *tx_ring)
*
* Returns true if there's any budget left (e.g. the clean is finished)
*/
-static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring,
- int napi_budget)
+static bool
+ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget)
{
unsigned int total_bytes = 0, total_pkts = 0;
unsigned int budget = vsi->work_lmt;
@@ -236,9 +237,9 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring)
if (!tx_ring->tx_buf)
return -ENOMEM;
- /* round up to nearest 4K */
+ /* round up to nearest page */
tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
- 4096);
+ PAGE_SIZE);
tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
GFP_KERNEL);
if (!tx_ring->desc) {
@@ -282,8 +283,17 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
if (!rx_buf->page)
continue;
- dma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE);
- __free_pages(rx_buf->page, 0);
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(dev, rx_buf->dma,
+ rx_buf->page_offset,
+ ICE_RXBUF_2048, DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
rx_buf->page = NULL;
rx_buf->page_offset = 0;
@@ -339,9 +349,9 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
if (!rx_ring->rx_buf)
return -ENOMEM;
- /* round up to nearest 4K */
- rx_ring->size = rx_ring->count * sizeof(union ice_32byte_rx_desc);
- rx_ring->size = ALIGN(rx_ring->size, 4096);
+ /* round up to nearest page */
+ rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
+ PAGE_SIZE);
rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
GFP_KERNEL);
if (!rx_ring->desc) {
@@ -389,8 +399,8 @@ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
* Returns true if the page was successfully allocated or
* reused.
*/
-static bool ice_alloc_mapped_page(struct ice_ring *rx_ring,
- struct ice_rx_buf *bi)
+static bool
+ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
{
struct page *page = bi->page;
dma_addr_t dma;
@@ -409,7 +419,8 @@ static bool ice_alloc_mapped_page(struct ice_ring *rx_ring,
}
/* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
@@ -423,6 +434,8 @@ static bool ice_alloc_mapped_page(struct ice_ring *rx_ring,
bi->dma = dma;
bi->page = page;
bi->page_offset = 0;
+ page_ref_add(page, USHRT_MAX - 1);
+ bi->pagecnt_bias = USHRT_MAX;
return true;
}
@@ -444,7 +457,7 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
if (!rx_ring->netdev || !cleaned_count)
return false;
- /* get the RX descriptor and buffer based on next_to_use */
+ /* get the Rx descriptor and buffer based on next_to_use */
rx_desc = ICE_RX_DESC(rx_ring, ntu);
bi = &rx_ring->rx_buf[ntu];
@@ -452,6 +465,12 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
if (!ice_alloc_mapped_page(rx_ring, bi))
goto no_bufs;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ ICE_RXBUF_2048,
+ DMA_FROM_DEVICE);
+
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
@@ -497,61 +516,43 @@ static bool ice_page_is_reserved(struct page *page)
}
/**
- * ice_add_rx_frag - Add contents of Rx buffer to sk_buff
- * @rx_buf: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
- * @skb: sk_buf to place the data into
- *
- * This function will add the data contained in rx_buf->page to the skb.
- * This is done either through a direct copy if the data in the buffer is
- * less than the skb header size, otherwise it will just attach the page as
- * a frag to the skb.
+ * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
+ * @rx_buf: Rx buffer to adjust
+ * @size: Size of adjustment
*
- * The function will then update the page offset if necessary and return
- * true if the buffer can be reused by the adapter.
+ * Update the offset within page so that Rx buf will be ready to be reused.
+ * For systems with PAGE_SIZE < 8192 this function will flip the page offset
+ * so the second half of page assigned to Rx buffer will be used, otherwise
+ * the offset is moved by the @size bytes
*/
-static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
- union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb)
+static void
+ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
{
#if (PAGE_SIZE < 8192)
- unsigned int truesize = ICE_RXBUF_2048;
+ /* flip page offset to other buffer */
+ rx_buf->page_offset ^= size;
#else
- unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
- unsigned int truesize;
-#endif /* PAGE_SIZE < 8192) */
-
- struct page *page;
- unsigned int size;
-
- size = le16_to_cpu(rx_desc->wb.pkt_len) &
- ICE_RX_FLX_DESC_PKT_LEN_M;
-
- page = rx_buf->page;
+ /* move offset up to the next cache line */
+ rx_buf->page_offset += size;
+#endif
+}
+/**
+ * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
+ * @rx_buf: buffer containing the page
+ *
+ * If page is reusable, we have a green light for calling ice_reuse_rx_page,
+ * which will assign the current buffer to the buffer that next_to_alloc is
+ * pointing to; otherwise, the DMA mapping needs to be destroyed and
+ * page freed
+ */
+static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
+{
#if (PAGE_SIZE >= 8192)
- truesize = ALIGN(size, L1_CACHE_BYTES);
-#endif /* PAGE_SIZE >= 8192) */
-
- /* will the data fit in the skb we allocated? if so, just
- * copy it as it is pretty small anyway
- */
- if (size <= ICE_RX_HDR_SIZE && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buf->page_offset;
-
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
-
- /* page is not reserved, we can reuse buffer as-is */
- if (likely(!ice_page_is_reserved(page)))
- return true;
-
- /* this page cannot be reused so discard it */
- __free_pages(page, 0);
- return false;
- }
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buf->page_offset, size, truesize);
+ unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
+#endif
+ unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
+ struct page *page = rx_buf->page;
/* avoid re-using remote pages */
if (unlikely(ice_page_is_reserved(page)))
@@ -559,36 +560,61 @@ static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
+ if (unlikely((page_count(page) - pagecnt_bias) > 1))
return false;
-
- /* flip page offset to other buffer */
- rx_buf->page_offset ^= truesize;
#else
- /* move offset up to the next cache line */
- rx_buf->page_offset += truesize;
-
if (rx_buf->page_offset > last_offset)
return false;
#endif /* PAGE_SIZE < 8192) */
- /* Even if we own the page, we are not allowed to use atomic_set()
- * This would break get_page_unless_zero() users.
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
*/
- get_page(rx_buf->page);
+ if (unlikely(pagecnt_bias == 1)) {
+ page_ref_add(page, USHRT_MAX - 1);
+ rx_buf->pagecnt_bias = USHRT_MAX;
+ }
return true;
}
/**
+ * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
+ * @rx_buf: buffer containing page to add
+ * @skb: sk_buff to place the data into
+ * @size: packet length from rx_desc
+ *
+ * This function will add the data contained in rx_buf->page to the skb.
+ * It will just attach the page as a frag to the skb.
+ * The function will then update the page offset.
+ */
+static void
+ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
+ unsigned int size)
+{
+#if (PAGE_SIZE >= 8192)
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#else
+ unsigned int truesize = ICE_RXBUF_2048;
+#endif
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
+ rx_buf->page_offset, size, truesize);
+
+ /* page is being used so we must update the page offset */
+ ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+}
+
+/**
* ice_reuse_rx_page - page flip buffer and store it back on the ring
* @rx_ring: Rx descriptor ring to store buffers on
* @old_buf: donor buffer to have page reused
*
* Synchronizes page for reuse by the adapter
*/
-static void ice_reuse_rx_page(struct ice_ring *rx_ring,
- struct ice_rx_buf *old_buf)
+static void
+ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
{
u16 nta = rx_ring->next_to_alloc;
struct ice_rx_buf *new_buf;
@@ -599,121 +625,132 @@ static void ice_reuse_rx_page(struct ice_ring *rx_ring,
nta++;
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
- /* transfer page from old buffer to new buffer */
- *new_buf = *old_buf;
+ /* Transfer page from old buffer to new buffer.
+ * Move each member individually to avoid possible store
+ * forwarding stalls and unnecessary copy of skb.
+ */
+ new_buf->dma = old_buf->dma;
+ new_buf->page = old_buf->page;
+ new_buf->page_offset = old_buf->page_offset;
+ new_buf->pagecnt_bias = old_buf->pagecnt_bias;
}
/**
- * ice_fetch_rx_buf - Allocate skb and populate it
+ * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
* @rx_ring: Rx descriptor ring to transact packets on
- * @rx_desc: descriptor containing info written by hardware
+ * @skb: skb to be used
+ * @size: size of buffer to add to skb
*
- * This function allocates an skb on the fly, and populates it with the page
- * data from the current receive descriptor, taking care to set up the skb
- * correctly, as well as handling calling the page recycle function if
- * necessary.
+ * This function will pull an Rx buffer from the ring and synchronize it
+ * for use by the CPU.
*/
-static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc)
+static struct ice_rx_buf *
+ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
+ const unsigned int size)
{
struct ice_rx_buf *rx_buf;
- struct sk_buff *skb;
- struct page *page;
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
- page = rx_buf->page;
- prefetchw(page);
+ prefetchw(rx_buf->page);
+ *skb = rx_buf->skb;
- skb = rx_buf->skb;
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
+ rx_buf->page_offset, size,
+ DMA_FROM_DEVICE);
- if (likely(!skb)) {
- u8 *page_addr = page_address(page) + rx_buf->page_offset;
+ /* We have pulled a buffer for use, so decrement pagecnt_bias */
+ rx_buf->pagecnt_bias--;
- /* prefetch first cache line of first page */
- prefetch(page_addr);
+ return rx_buf;
+}
+
+/**
+ * ice_construct_skb - Allocate skb and populate it
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @rx_buf: Rx buffer to pull data from
+ * @size: the length of the packet
+ *
+ * This function allocates an skb. It then populates it with the page
+ * data from the current receive descriptor, taking care to set up the
+ * skb correctly.
+ */
+static struct sk_buff *
+ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ unsigned int size)
+{
+ void *va = page_address(rx_buf->page) + rx_buf->page_offset;
+ unsigned int headlen;
+ struct sk_buff *skb;
+
+ /* prefetch first cache line of first page */
+ prefetch(va);
#if L1_CACHE_BYTES < 128
- prefetch((void *)(page_addr + L1_CACHE_BYTES));
+ prefetch((u8 *)va + L1_CACHE_BYTES);
#endif /* L1_CACHE_BYTES */
- /* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- ICE_RX_HDR_SIZE,
- GFP_ATOMIC | __GFP_NOWARN);
- if (unlikely(!skb)) {
- rx_ring->rx_stats.alloc_buf_failed++;
- return NULL;
- }
-
- /* we will be copying header into skb->data in
- * pskb_may_pull so it is in our interest to prefetch
- * it now to avoid a possible cache miss
- */
- prefetchw(skb->data);
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
- skb_record_rx_queue(skb, rx_ring->q_index);
- } else {
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
- rx_buf->page_offset,
- ICE_RXBUF_2048,
- DMA_FROM_DEVICE);
+ skb_record_rx_queue(skb, rx_ring->q_index);
+ /* Determine available headroom for copy */
+ headlen = size;
+ if (headlen > ICE_RX_HDR_SIZE)
+ headlen = eth_get_headlen(skb->dev, va, ICE_RX_HDR_SIZE);
- rx_buf->skb = NULL;
- }
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
- /* pull page into skb */
- if (ice_add_rx_frag(rx_buf, rx_desc, skb)) {
- /* hand second half of page back to the ring */
- ice_reuse_rx_page(rx_ring, rx_buf);
- rx_ring->rx_stats.page_reuse_count++;
+ /* if we exhaust the linear part then add what is left as a frag */
+ size -= headlen;
+ if (size) {
+#if (PAGE_SIZE >= 8192)
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#else
+ unsigned int truesize = ICE_RXBUF_2048;
+#endif
+ skb_add_rx_frag(skb, 0, rx_buf->page,
+ rx_buf->page_offset + headlen, size, truesize);
+ /* buffer is used by skb, update page_offset */
+ ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
} else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
- DMA_FROM_DEVICE);
+ /* buffer is unused, reset bias back to rx_buf; data was copied
+ * onto skb's linear part so there's no need for adjusting
+ * page offset and we can reuse this buffer as-is
+ */
+ rx_buf->pagecnt_bias++;
}
- /* clear contents of buffer_info */
- rx_buf->page = NULL;
-
return skb;
}
/**
- * ice_pull_tail - ice specific version of skb_pull_tail
- * @skb: pointer to current skb being adjusted
+ * ice_put_rx_buf - Clean up used buffer and either recycle or free
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @rx_buf: Rx buffer to pull data from
*
- * This function is an ice specific version of __pskb_pull_tail. The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
+ * This function will clean up the contents of the rx_buf. It will
+ * either recycle the buffer or unmap it and free the associated resources.
*/
-static void ice_pull_tail(struct sk_buff *skb)
+static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned int pull_len;
- unsigned char *va;
-
- /* it is valid to use page_address instead of kmap since we are
- * working with pages allocated out of the lomem pool per
- * alloc_page(GFP_ATOMIC)
- */
- va = skb_frag_address(frag);
-
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, ICE_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+ /* hand second half of page back to the ring */
+ if (ice_can_reuse_rx_page(rx_buf)) {
+ ice_reuse_rx_page(rx_ring, rx_buf);
+ rx_ring->rx_stats.page_reuse_count++;
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
+ }
- /* update all of the pointers */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
+ /* clear contents of buffer_info */
+ rx_buf->page = NULL;
+ rx_buf->skb = NULL;
}
/**
@@ -730,10 +767,6 @@ static void ice_pull_tail(struct sk_buff *skb)
*/
static bool ice_cleanup_headers(struct sk_buff *skb)
{
- /* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
- ice_pull_tail(skb);
-
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
return true;
@@ -751,8 +784,8 @@ static bool ice_cleanup_headers(struct sk_buff *skb)
* The status_error_len doesn't need to be shifted because it begins
* at offset zero.
*/
-static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc,
- const u16 stat_err_bits)
+static bool
+ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
{
return !!(rx_desc->wb.status_error0 &
cpu_to_le16(stat_err_bits));
@@ -769,9 +802,9 @@ static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc,
* sk_buff in the next buffer to be chained and return true indicating
* that this is in fact a non-EOP buffer.
*/
-static bool ice_is_non_eop(struct ice_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb)
+static bool
+ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb)
{
u32 ntc = rx_ring->next_to_clean + 1;
@@ -838,8 +871,9 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
*
* skb->protocol must be set before this function is called
*/
-static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
- union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
+static void
+ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
+ union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
{
struct ice_rx_ptype_decoded decoded;
u32 rx_error, rx_status;
@@ -909,9 +943,10 @@ checksum_fail:
* order to populate the hash, checksum, VLAN, protocol, and
* other fields within the skb.
*/
-static void ice_process_skb_fields(struct ice_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u8 ptype)
+static void
+ice_process_skb_fields(struct ice_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb, u8 ptype)
{
ice_rx_hash(rx_ring, rx_desc, skb, ptype);
@@ -925,18 +960,17 @@ static void ice_process_skb_fields(struct ice_ring *rx_ring,
* ice_receive_skb - Send a completed packet up the stack
* @rx_ring: Rx ring in play
* @skb: packet to send up
- * @vlan_tag: vlan tag for packet
+ * @vlan_tag: VLAN tag for packet
*
* This function sends the completed packet (via. skb) up the stack using
- * gro receive functions (with/without vlan tag)
+ * gro receive functions (with/without VLAN tag)
*/
-static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb,
- u16 vlan_tag)
+static void
+ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
{
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- (vlan_tag & VLAN_VID_MASK)) {
+ (vlan_tag & VLAN_VID_MASK))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- }
napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
@@ -958,10 +992,12 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
bool failure = false;
- /* start the loop to process RX packets bounded by 'budget' */
+ /* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
+ struct ice_rx_buf *rx_buf;
struct sk_buff *skb;
+ unsigned int size;
u16 stat_err_bits;
u16 vlan_tag = 0;
u8 rx_ptype;
@@ -973,7 +1009,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
cleaned_count = 0;
}
- /* get the RX desc from RX ring based on 'next_to_clean' */
+ /* get the Rx desc from Rx ring based on 'next_to_clean' */
rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
/* status_error_len will always be zero for unused descriptors
@@ -991,11 +1027,24 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
*/
dma_rmb();
+ size = le16_to_cpu(rx_desc->wb.pkt_len) &
+ ICE_RX_FLX_DESC_PKT_LEN_M;
+
+ rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
/* allocate (if needed) and populate skb */
- skb = ice_fetch_rx_buf(rx_ring, rx_desc);
- if (!skb)
+ if (skb)
+ ice_add_rx_frag(rx_buf, skb, size);
+ else
+ skb = ice_construct_skb(rx_ring, rx_buf, size);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buf_failed++;
+ rx_buf->pagecnt_bias++;
break;
+ }
+ ice_put_rx_buf(rx_ring, rx_buf);
cleaned_count++;
/* skip if it is NOP desc */
@@ -1049,17 +1098,247 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
}
/**
+ * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic
+ * @port_info: port_info structure containing the current link speed
+ * @avg_pkt_size: average size of Tx or Rx packets based on clean routine
+ * @itr: itr value to update
+ *
+ * Calculate how big of an increment should be applied to the ITR value passed
+ * in based on wmem_default, SKB overhead, Ethernet overhead, and the current
+ * link speed.
+ *
+ * The following is a calculation derived from:
+ * wmem_default / (size + overhead) = desired_pkts_per_int
+ * rate / bits_per_byte / (size + Ethernet overhead) = pkt_rate
+ * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
+ *
+ * Assuming wmem_default is 212992 and overhead is 640 bytes per
+ * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
+ * formula down to:
+ *
+ * wmem_default * bits_per_byte * usecs_per_sec pkt_size + 24
+ * ITR = -------------------------------------------- * --------------
+ * rate pkt_size + 640
+ */
+static unsigned int
+ice_adjust_itr_by_size_and_speed(struct ice_port_info *port_info,
+ unsigned int avg_pkt_size,
+ unsigned int itr)
+{
+ switch (port_info->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_100GB:
+ itr += DIV_ROUND_UP(17 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
+ case ICE_AQ_LINK_SPEED_50GB:
+ itr += DIV_ROUND_UP(34 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
+ case ICE_AQ_LINK_SPEED_40GB:
+ itr += DIV_ROUND_UP(43 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
+ case ICE_AQ_LINK_SPEED_25GB:
+ itr += DIV_ROUND_UP(68 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
+ case ICE_AQ_LINK_SPEED_20GB:
+ itr += DIV_ROUND_UP(85 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
+ case ICE_AQ_LINK_SPEED_10GB:
+ /* fall through */
+ default:
+ itr += DIV_ROUND_UP(170 * (avg_pkt_size + 24),
+ avg_pkt_size + 640);
+ break;
+ }
+
+ if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
+ itr &= ICE_ITR_ADAPTIVE_LATENCY;
+ itr += ICE_ITR_ADAPTIVE_MAX_USECS;
+ }
+
+ return itr;
+}
+
+/**
+ * ice_update_itr - update the adaptive ITR value based on statistics
+ * @q_vector: structure containing interrupt and ring information
+ * @rc: structure containing ring performance data
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ */
+static void
+ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
+{
+ unsigned long next_update = jiffies;
+ unsigned int packets, bytes, itr;
+ bool container_is_rx;
+
+ if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
+ return;
+
+ /* If itr_countdown is set it means we programmed an ITR within
+ * the last 4 interrupt cycles. This has a side effect of us
+ * potentially firing an early interrupt. In order to work around
+ * this we need to throw out any data received for a few
+ * interrupts following the update.
+ */
+ if (q_vector->itr_countdown) {
+ itr = rc->target_itr;
+ goto clear_counts;
+ }
+
+ container_is_rx = (&q_vector->rx == rc);
+ /* For Rx we want to push the delay up and default to low latency.
+ * for Tx we want to pull the delay down and default to high latency.
+ */
+ itr = container_is_rx ?
+ ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
+ ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
+
+ /* If we didn't update within up to 1 - 2 jiffies we can assume
+ * that either packets are coming in so slow there hasn't been
+ * any work, or that there is so much work that NAPI is dealing
+ * with interrupt moderation and we don't need to do anything.
+ */
+ if (time_after(next_update, rc->next_update))
+ goto clear_counts;
+
+ packets = rc->total_pkts;
+ bytes = rc->total_bytes;
+
+ if (container_is_rx) {
+ /* If Rx there are 1 to 4 packets and bytes are less than
+ * 9000 assume insufficient data to use bulk rate limiting
+ * approach unless Tx is already in bulk rate limiting. We
+ * are likely latency driven.
+ */
+ if (packets && packets < 4 && bytes < 9000 &&
+ (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
+ itr = ICE_ITR_ADAPTIVE_LATENCY;
+ goto adjust_by_size_and_speed;
+ }
+ } else if (packets < 4) {
+ /* If we have Tx and Rx ITR maxed and Tx ITR is running in
+ * bulk mode and we are receiving 4 or fewer packets just
+ * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
+ * that the Rx can relax.
+ */
+ if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
+ (q_vector->rx.target_itr & ICE_ITR_MASK) ==
+ ICE_ITR_ADAPTIVE_MAX_USECS)
+ goto clear_counts;
+ } else if (packets > 32) {
+ /* If we have processed over 32 packets in a single interrupt
+ * for Tx assume we need to switch over to "bulk" mode.
+ */
+ rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
+ }
+
+ /* We have no packets to actually measure against. This means
+ * either one of the other queues on this vector is active or
+ * we are a Tx queue doing TSO with too high of an interrupt rate.
+ *
+ * Between 4 and 56 we can assume that our current interrupt delay
+ * is only slightly too low. As such we should increase it by a small
+ * fixed amount.
+ */
+ if (packets < 56) {
+ itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
+ if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
+ itr &= ICE_ITR_ADAPTIVE_LATENCY;
+ itr += ICE_ITR_ADAPTIVE_MAX_USECS;
+ }
+ goto clear_counts;
+ }
+
+ if (packets <= 256) {
+ itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
+ itr &= ICE_ITR_MASK;
+
+ /* Between 56 and 112 is our "goldilocks" zone where we are
+ * working out "just right". Just report that our current
+ * ITR is good for us.
+ */
+ if (packets <= 112)
+ goto clear_counts;
+
+ /* If packet count is 128 or greater we are likely looking
+ * at a slight overrun of the delay we want. Try halving
+ * our delay to see if that will cut the number of packets
+ * in half per interrupt.
+ */
+ itr >>= 1;
+ itr &= ICE_ITR_MASK;
+ if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
+ itr = ICE_ITR_ADAPTIVE_MIN_USECS;
+
+ goto clear_counts;
+ }
+
+ /* The paths below assume we are dealing with a bulk ITR since
+ * number of packets is greater than 256. We are just going to have
+ * to compute a value and try to bring the count under control,
+ * though for smaller packet sizes there isn't much we can do as
+ * NAPI polling will likely be kicking in sooner rather than later.
+ */
+ itr = ICE_ITR_ADAPTIVE_BULK;
+
+adjust_by_size_and_speed:
+
+ /* based on checks above packets cannot be 0 so division is safe */
+ itr = ice_adjust_itr_by_size_and_speed(q_vector->vsi->port_info,
+ bytes / packets, itr);
+
+clear_counts:
+ /* write back value */
+ rc->target_itr = itr;
+
+ /* next update should occur within next jiffy */
+ rc->next_update = next_update + 1;
+
+ rc->total_bytes = 0;
+ rc->total_pkts = 0;
+}
+
+/**
* ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
* @itr_idx: interrupt throttling index
- * @reg_itr: interrupt throttling value adjusted based on ITR granularity
+ * @itr: interrupt throttling value in usecs
*/
-static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr)
+static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
{
+ /* The itr value is reported in microseconds, and the register value is
+ * recorded in 2 microsecond units. For this reason we only need to
+ * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
+ * granularity as a shift instead of division. The mask makes sure the
+ * ITR value is never odd so we don't accidentally write into the field
+ * prior to the ITR field.
+ */
+ itr &= ICE_ITR_MASK;
+
return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
(itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
- (reg_itr << GLINT_DYN_CTL_INTERVAL_S);
+ (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
}
+/* The act of updating the ITR will cause it to immediately trigger. In order
+ * to prevent this from throwing off adaptive update statistics we defer the
+ * update so that it can only happen so often. So after either Tx or Rx are
+ * updated we make the adaptive scheme wait until either the ITR completely
+ * expires via the next_update expiration or we have been through at least
+ * 3 interrupts.
+ */
+#define ITR_COUNTDOWN_START 3
+
/**
* ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
* @vsi: the VSI associated with the q_vector
@@ -1068,10 +1347,14 @@ static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr)
static void
ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
{
- struct ice_hw *hw = &vsi->back->hw;
- struct ice_ring_container *rc;
+ struct ice_ring_container *tx = &q_vector->tx;
+ struct ice_ring_container *rx = &q_vector->rx;
u32 itr_val;
+ /* This will do nothing if dynamic updates are not enabled */
+ ice_update_itr(q_vector, tx);
+ ice_update_itr(q_vector, rx);
+
/* This block of logic allows us to get away with only updating
* one ITR value with each interrupt. The idea is to perform a
* pseudo-lazy update with the following criteria.
@@ -1080,35 +1363,36 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
* 2. If we must reduce an ITR that is given highest priority.
* 3. We then give priority to increasing ITR based on amount.
*/
- if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
- rc = &q_vector->rx;
+ if (rx->target_itr < rx->current_itr) {
/* Rx ITR needs to be reduced, this is highest priority */
- itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
- rc->current_itr = rc->target_itr;
- } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
- ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
- (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
- rc = &q_vector->tx;
+ itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
+ rx->current_itr = rx->target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ } else if ((tx->target_itr < tx->current_itr) ||
+ ((rx->target_itr - rx->current_itr) <
+ (tx->target_itr - tx->current_itr))) {
/* Tx ITR needs to be reduced, this is second priority
* Tx ITR needs to be increased more than Rx, fourth priority
*/
- itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
- rc->current_itr = rc->target_itr;
- } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
- rc = &q_vector->rx;
+ itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
+ tx->current_itr = tx->target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ } else if (rx->current_itr != rx->target_itr) {
/* Rx ITR needs to be increased, third priority */
- itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
- rc->current_itr = rc->target_itr;
+ itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
+ rx->current_itr = rx->target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
} else {
/* Still have to re-enable the interrupts */
itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
+ if (q_vector->itr_countdown)
+ q_vector->itr_countdown--;
}
- if (!test_bit(__ICE_DOWN, vsi->state)) {
- int vector = vsi->hw_base_vector + q_vector->v_idx;
-
- wr32(hw, GLINT_DYN_CTL(vector), itr_val);
- }
+ if (!test_bit(__ICE_DOWN, vsi->state))
+ wr32(&vsi->back->hw,
+ GLINT_DYN_CTL(q_vector->reg_idx),
+ itr_val);
}
/**
@@ -1354,7 +1638,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
}
@@ -1475,7 +1759,7 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
}
/**
- * ice_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
* @tx_ring: ring to send buffer on
* @first: pointer to struct ice_tx_buf
*
@@ -1501,7 +1785,7 @@ ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
* to the encapsulated ethertype.
*/
skb->protocol = vlan_get_protocol(skb);
- goto out;
+ return 0;
}
/* if we have a HW VLAN tag being added, default to the HW one */
@@ -1523,8 +1807,7 @@ ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
first->tx_flags |= ICE_TX_FLAGS_SW_VLAN;
}
-out:
- return 0;
+ return ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
}
/**
@@ -1561,6 +1844,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
if (err < 0)
return err;
+ /* cppcheck-suppress unreadVariable */
ip.hdr = skb_network_header(skb);
l4.hdr = skb_transport_header(skb);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index fc358ea81816..66e05032ee56 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -45,8 +45,13 @@
#define ICE_TX_FLAGS_HW_VLAN BIT(1)
#define ICE_TX_FLAGS_SW_VLAN BIT(2)
#define ICE_TX_FLAGS_VLAN_M 0xffff0000
+#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
+#define ICE_TX_FLAGS_VLAN_PR_S 29
#define ICE_TX_FLAGS_VLAN_S 16
+#define ICE_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
struct ice_tx_buf {
struct ice_tx_desc *next_to_watch;
struct sk_buff *skb;
@@ -73,6 +78,7 @@ struct ice_rx_buf {
dma_addr_t dma;
struct page *page;
unsigned int page_offset;
+ u16 pagecnt_bias;
};
struct ice_q_stats {
@@ -124,11 +130,19 @@ enum ice_rx_dtype {
#define ICE_ITR_DYNAMIC 0x8000 /* used as flag for itr_setting */
#define ITR_IS_DYNAMIC(setting) (!!((setting) & ICE_ITR_DYNAMIC))
#define ITR_TO_REG(setting) ((setting) & ~ICE_ITR_DYNAMIC)
-#define ICE_ITR_GRAN_S 1 /* Assume ITR granularity is 2us */
+#define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */
+#define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S)
#define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */
#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~ICE_ITR_MASK)
+#define ICE_ITR_ADAPTIVE_MIN_INC 0x0002
+#define ICE_ITR_ADAPTIVE_MIN_USECS 0x0002
+#define ICE_ITR_ADAPTIVE_MAX_USECS 0x00FA
+#define ICE_ITR_ADAPTIVE_LATENCY 0x8000
+#define ICE_ITR_ADAPTIVE_BULK 0x0000
+
#define ICE_DFLT_INTRL 0
+#define ICE_MAX_INTRL 236
/* Legacy or Advanced Mode Queue */
#define ICE_TX_ADVANCED 0
@@ -149,6 +163,9 @@ struct ice_ring {
};
u16 q_index; /* Queue number of ring */
u32 txq_teid; /* Added Tx queue TEID */
+#ifdef CONFIG_DCB
+ u8 dcb_tc; /* Traffic class of ring */
+#endif /* CONFIG_DCB */
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
@@ -173,21 +190,13 @@ struct ice_ring {
u16 next_to_alloc;
} ____cacheline_internodealigned_in_smp;
-enum ice_latency_range {
- ICE_LOWEST_LATENCY = 0,
- ICE_LOW_LATENCY = 1,
- ICE_BULK_LATENCY = 2,
- ICE_ULTRA_LATENCY = 3,
-};
-
struct ice_ring_container {
/* head of linked-list of rings */
struct ice_ring *ring;
unsigned long next_update; /* jiffies value of next queue update */
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_pkts; /* total packets processed this int */
- enum ice_latency_range latency_range;
- int itr_idx; /* index in the interrupt vector */
+ u16 itr_idx; /* index in the interrupt vector */
u16 target_itr; /* value in usecs divided by the hw->itr_gran */
u16 current_itr; /* value in usecs divided by the hw->itr_gran */
/* high bit set means dynamic ITR, rest is used to store user
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 17086d5b5c33..a862af4cbf78 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -24,6 +24,7 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc)
/* debug masks - set these bits in hw->debug_mask to control output */
#define ICE_DBG_INIT BIT_ULL(1)
#define ICE_DBG_LINK BIT_ULL(4)
+#define ICE_DBG_PHY BIT_ULL(5)
#define ICE_DBG_QCTX BIT_ULL(6)
#define ICE_DBG_NVM BIT_ULL(7)
#define ICE_DBG_LAN BIT_ULL(8)
@@ -106,7 +107,7 @@ struct ice_link_status {
};
/* Different reset sources for which a disable queue AQ call has to be made in
- * order to clean the TX scheduler as a part of the reset
+ * order to clean the Tx scheduler as a part of the reset
*/
enum ice_disq_rst_src {
ICE_NO_RESET = 0,
@@ -128,11 +129,11 @@ struct ice_phy_info {
struct ice_hw_common_caps {
u32 valid_functions;
- /* TX/RX queues */
- u16 num_rxq; /* Number/Total RX queues */
- u16 rxq_first_id; /* First queue ID for RX queues */
- u16 num_txq; /* Number/Total TX queues */
- u16 txq_first_id; /* First queue ID for TX queues */
+ /* Tx/Rx queues */
+ u16 num_rxq; /* Number/Total Rx queues */
+ u16 rxq_first_id; /* First queue ID for Rx queues */
+ u16 num_txq; /* Number/Total Tx queues */
+ u16 txq_first_id; /* First queue ID for Tx queues */
/* MSI-X vectors */
u16 num_msix_vectors;
@@ -147,6 +148,8 @@ struct ice_hw_common_caps {
/* RSS related capabilities */
u16 rss_table_size; /* 512 for PFs and 64 for VFs */
u8 rss_table_entry_width; /* RSS Entry width in bits */
+
+ u8 dcb;
};
/* Function specific capabilities */
@@ -209,12 +212,17 @@ struct ice_nvm_info {
#define ICE_MAX_TRAFFIC_CLASS 8
#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS
+#define ice_for_each_traffic_class(_i) \
+ for ((_i) = 0; (_i) < ICE_MAX_TRAFFIC_CLASS; (_i)++)
+
+#define ICE_INVAL_TEID 0xFFFFFFFF
+
struct ice_sched_node {
struct ice_sched_node *parent;
struct ice_sched_node *sibling; /* next sibling in the same layer */
struct ice_sched_node **children;
struct ice_aqc_txsched_elem_data info;
- u32 agg_id; /* aggregator group id */
+ u32 agg_id; /* aggregator group ID */
u16 vsi_handle;
u8 in_use; /* suspended or in use */
u8 tx_sched_layer; /* Logical Layer (1-9) */
@@ -241,13 +249,12 @@ enum ice_agg_type {
#define ICE_SCHED_DFLT_RL_PROF_ID 0
#define ICE_SCHED_DFLT_BW_WT 1
-/* vsi type list entry to locate corresponding vsi/ag nodes */
+/* VSI type list entry to locate corresponding VSI/ag nodes */
struct ice_sched_vsi_info {
struct ice_sched_node *vsi_node[ICE_MAX_TRAFFIC_CLASS];
struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];
struct list_head list_entry;
u16 max_lanq[ICE_MAX_TRAFFIC_CLASS];
- u16 vsi_id;
};
/* driver defines the policy */
@@ -257,15 +264,70 @@ struct ice_sched_tx_policy {
u8 rdma_ena;
};
+/* CEE or IEEE 802.1Qaz ETS Configuration data */
+struct ice_dcb_ets_cfg {
+ u8 willing;
+ u8 cbs;
+ u8 maxtcs;
+ u8 prio_table[ICE_MAX_TRAFFIC_CLASS];
+ u8 tcbwtable[ICE_MAX_TRAFFIC_CLASS];
+ u8 tsatable[ICE_MAX_TRAFFIC_CLASS];
+};
+
+/* CEE or IEEE 802.1Qaz PFC Configuration data */
+struct ice_dcb_pfc_cfg {
+ u8 willing;
+ u8 mbc;
+ u8 pfccap;
+ u8 pfcena;
+};
+
+/* CEE or IEEE 802.1Qaz Application Priority data */
+struct ice_dcb_app_priority_table {
+ u16 prot_id;
+ u8 priority;
+ u8 selector;
+};
+
+#define ICE_MAX_USER_PRIORITY 8
+#define ICE_DCBX_MAX_APPS 32
+#define ICE_LLDPDU_SIZE 1500
+#define ICE_TLV_STATUS_OPER 0x1
+#define ICE_TLV_STATUS_SYNC 0x2
+#define ICE_TLV_STATUS_ERR 0x4
+#define ICE_APP_PROT_ID_FCOE 0x8906
+#define ICE_APP_PROT_ID_ISCSI 0x0cbc
+#define ICE_APP_PROT_ID_FIP 0x8914
+#define ICE_APP_SEL_ETHTYPE 0x1
+#define ICE_APP_SEL_TCPIP 0x2
+#define ICE_CEE_APP_SEL_ETHTYPE 0x0
+#define ICE_CEE_APP_SEL_TCPIP 0x1
+
+struct ice_dcbx_cfg {
+ u32 numapps;
+ u32 tlv_status; /* CEE mode TLV status */
+ struct ice_dcb_ets_cfg etscfg;
+ struct ice_dcb_ets_cfg etsrec;
+ struct ice_dcb_pfc_cfg pfc;
+ struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS];
+ u8 dcbx_mode;
+#define ICE_DCBX_MODE_CEE 0x1
+#define ICE_DCBX_MODE_IEEE 0x2
+ u8 app_mode;
+#define ICE_DCBX_APPS_NON_WILLING 0x1
+};
+
struct ice_port_info {
struct ice_sched_node *root; /* Root Node per Port */
- struct ice_hw *hw; /* back pointer to hw instance */
+ struct ice_hw *hw; /* back pointer to HW instance */
u32 last_node_teid; /* scheduler last node info */
u16 sw_id; /* Initial switch ID belongs to port */
u16 pf_vf_num;
u8 port_state;
#define ICE_SCHED_PORT_STATE_INIT 0x0
#define ICE_SCHED_PORT_STATE_READY 0x1
+ u8 lport;
+#define ICE_LPORT_MASK 0xff
u16 dflt_tx_vsi_rule_id;
u16 dflt_tx_vsi_num;
u16 dflt_rx_vsi_rule_id;
@@ -274,9 +336,14 @@ struct ice_port_info {
struct ice_mac_info mac;
struct ice_phy_info phy;
struct mutex sched_lock; /* protect access to TXSched tree */
- u8 lport;
-#define ICE_LPORT_MASK 0xff
- u8 is_vf;
+ struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */
+ /* DCBX info */
+ struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
+ struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */
+ /* LLDP/DCBX Status */
+ u8 dcbx_status:3; /* see ICE_DCBX_STATUS_DIS */
+ u8 is_sw_lldp:1;
+ u8 is_vf:1;
};
struct ice_switch_info {
@@ -320,7 +387,7 @@ struct ice_hw {
u8 pf_id; /* device profile info */
- /* TX Scheduler values */
+ /* Tx Scheduler values */
u16 num_tx_sched_layers;
u16 num_tx_sched_phys_layers;
u8 flattened_layers;
@@ -331,7 +398,7 @@ struct ice_hw {
struct ice_vsi_ctx *vsi_ctx[ICE_MAX_VSI];
u8 evb_veb; /* true for VEB, false for VEPA */
- u8 reset_ongoing; /* true if hw is in reset, false otherwise */
+ u8 reset_ongoing; /* true if HW is in reset, false otherwise */
struct ice_bus_info bus;
struct ice_nvm_info nvm;
struct ice_hw_dev_caps dev_caps; /* device capabilities */
@@ -410,6 +477,11 @@ struct ice_hw_port_stats {
u64 link_xoff_rx; /* lxoffrxc */
u64 link_xon_tx; /* lxontxc */
u64 link_xoff_tx; /* lxofftxc */
+ u64 priority_xon_rx[8]; /* pxonrxc[8] */
+ u64 priority_xoff_rx[8]; /* pxoffrxc[8] */
+ u64 priority_xon_tx[8]; /* pxontxc[8] */
+ u64 priority_xoff_tx[8]; /* pxofftxc[8] */
+ u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */
u64 rx_size_64; /* prc64 */
u64 rx_size_127; /* prc127 */
u64 rx_size_255; /* prc255 */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 57155b4a59dc..a805cbdd69be 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -5,6 +5,37 @@
#include "ice_lib.h"
/**
+ * ice_err_to_virt err - translate errors for VF return code
+ * @ice_err: error return code
+ */
+static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
+{
+ switch (ice_err) {
+ case ICE_SUCCESS:
+ return VIRTCHNL_STATUS_SUCCESS;
+ case ICE_ERR_BAD_PTR:
+ case ICE_ERR_INVAL_SIZE:
+ case ICE_ERR_DEVICE_NOT_SUPPORTED:
+ case ICE_ERR_PARAM:
+ case ICE_ERR_CFG:
+ return VIRTCHNL_STATUS_ERR_PARAM;
+ case ICE_ERR_NO_MEMORY:
+ return VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ case ICE_ERR_NOT_READY:
+ case ICE_ERR_RESET_FAILED:
+ case ICE_ERR_FW_API_VER:
+ case ICE_ERR_AQ_ERROR:
+ case ICE_ERR_AQ_TIMEOUT:
+ case ICE_ERR_AQ_FULL:
+ case ICE_ERR_AQ_NO_WORK:
+ case ICE_ERR_AQ_EMPTY:
+ return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+ default:
+ return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ }
+}
+
+/**
* ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
* @pf: pointer to the PF structure
* @v_opcode: operation code
@@ -14,7 +45,7 @@
*/
static void
ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
- enum ice_status v_retval, u8 *msg, u16 msglen)
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
struct ice_hw *hw = &pf->hw;
struct ice_vf *vf = pf->vf;
@@ -104,7 +135,8 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info &
ICE_AQ_LINK_UP);
- ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
sizeof(pfe), NULL);
}
@@ -343,11 +375,41 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr)
}
/**
- * ice_vsi_set_pvid - Set port VLAN id for the VSI
- * @vsi: the VSI being changed
- * @vid: the VLAN id to set as a PVID
+ * ice_vsi_set_pvid_fill_ctxt - Set VSI ctxt for add PVID
+ * @ctxt: the VSI ctxt to fill
+ * @vid: the VLAN ID to set as a PVID
+ */
+static void ice_vsi_set_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt, u16 vid)
+{
+ ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
+ ICE_AQ_VSI_PVLAN_INSERT_PVID |
+ ICE_AQ_VSI_VLAN_EMOD_STR);
+ ctxt->info.pvid = cpu_to_le16(vid);
+ ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+}
+
+/**
+ * ice_vsi_kill_pvid_fill_ctxt - Set VSI ctx for remove PVID
+ * @ctxt: the VSI ctxt to fill
+ */
+static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt)
+{
+ ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
+ ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
+ ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+}
+
+/**
+ * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
+ * @vsi: the VSI to update
+ * @vid: the VLAN ID to set as a PVID
+ * @enable: true for enable PVID false for disable
*/
-static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
+static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
{
struct device *dev = &vsi->back->pdev->dev;
struct ice_hw *hw = &vsi->back->hw;
@@ -359,50 +421,31 @@ static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
if (!ctxt)
return -ENOMEM;
- ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
- ICE_AQ_VSI_PVLAN_INSERT_PVID |
- ICE_AQ_VSI_VLAN_EMOD_STR);
- ctxt->info.pvid = cpu_to_le16(vid);
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+ ctxt->info = vsi->info;
+ if (enable)
+ ice_vsi_set_pvid_fill_ctxt(ctxt, vid);
+ else
+ ice_vsi_kill_pvid_fill_ctxt(ctxt);
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_info(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
+ dev_info(dev, "update VSI for port VLAN failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
}
- vsi->info.pvid = ctxt->info.pvid;
- vsi->info.vlan_flags = ctxt->info.vlan_flags;
+ vsi->info = ctxt->info;
out:
devm_kfree(dev, ctxt);
return ret;
}
/**
- * ice_vsi_kill_pvid - Remove port VLAN id from the VSI
- * @vsi: the VSI being changed
- */
-static int ice_vsi_kill_pvid(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
-
- if (ice_vsi_manage_vlan_stripping(vsi, false)) {
- dev_err(&pf->pdev->dev, "Error removing Port VLAN on VSI %i\n",
- vsi->vsi_num);
- return -ENODEV;
- }
-
- vsi->info.pvid = 0;
- return 0;
-}
-
-/**
* ice_vf_vsi_setup - Set up a VF VSI
* @pf: board private structure
* @pi: pointer to the port_info instance
- * @vf_id: defines VF id to which this VSI connects.
+ * @vf_id: defines VF ID to which this VSI connects.
*
* Returns pointer to the successfully allocated VSI struct on success,
* otherwise returns NULL on failure.
@@ -446,8 +489,10 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
vsi->hw_base_vector += 1;
/* Check if port VLAN exist before, and restore it accordingly */
- if (vf->port_vlan_id)
- ice_vsi_set_pvid(vsi, vf->port_vlan_id);
+ if (vf->port_vlan_id) {
+ ice_vsi_manage_pvid(vsi, vf->port_vlan_id, true);
+ ice_vsi_add_vlan(vsi, vf->port_vlan_id & ICE_VLAN_M);
+ }
eth_broadcast_addr(broadcast);
@@ -468,7 +513,7 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
/* Clear this bit after VF initialization since we shouldn't reclaim
* and reassign interrupts for synchronous or asynchronous VFR events.
- * We dont want to reconfigure interrupts since AVF driver doesn't
+ * We don't want to reconfigure interrupts since AVF driver doesn't
* expect vector assignment to be changed unless there is a request for
* more vectors.
*/
@@ -484,6 +529,8 @@ ice_alloc_vsi_res_exit:
*/
static int ice_alloc_vf_res(struct ice_vf *vf)
{
+ struct ice_pf *pf = vf->pf;
+ int tx_rx_queue_left;
int status;
/* setup VF VSI and necessary resources */
@@ -491,6 +538,15 @@ static int ice_alloc_vf_res(struct ice_vf *vf)
if (status)
goto ice_alloc_vf_res_exit;
+ /* Update number of VF queues, in case VF had requested for queue
+ * changes
+ */
+ tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
+ tx_rx_queue_left += ICE_DFLT_QS_PER_VF;
+ if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
+ vf->num_req_qs != vf->num_vf_qs)
+ vf->num_vf_qs = vf->num_req_qs;
+
if (vf->trusted)
set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
else
@@ -548,6 +604,10 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
wr32(hw, GLINT_VECT2FUNC(v), reg);
}
+ /* Map mailbox interrupt. We put an explicit 0 here to remind us that
+ * VF admin queue interrupts will go to VF MSI-X vector 0.
+ */
+ wr32(hw, VPINT_MBX_CTL(abs_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M | 0);
/* set regardless of mapping mode */
wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
@@ -750,6 +810,47 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
}
/**
+ * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
+ * @vf: pointer to the VF info
+ * @vsi: the VSI being configured
+ * @promisc_m: mask of promiscuous config bits
+ * @rm_promisc: promisc flag request from the VF to remove or add filter
+ *
+ * This function configures VF VSI promiscuous mode, based on the VF requests,
+ * for Unicast, Multicast and VLAN
+ */
+static enum ice_status
+ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
+ bool rm_promisc)
+{
+ struct ice_pf *pf = vf->pf;
+ enum ice_status status = 0;
+ struct ice_hw *hw;
+
+ hw = &pf->hw;
+ if (vf->num_vlan) {
+ status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
+ rm_promisc);
+ } else if (vf->port_vlan_id) {
+ if (rm_promisc)
+ status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
+ vf->port_vlan_id);
+ else
+ status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
+ vf->port_vlan_id);
+ } else {
+ if (rm_promisc)
+ status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
+ 0);
+ else
+ status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
+ 0);
+ }
+
+ return status;
+}
+
+/**
* ice_reset_all_vfs - reset all allocated VFs in one go
* @pf: pointer to the PF structure
* @is_vflr: true if VFLR was issued, false if not
@@ -764,6 +865,7 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
{
struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf;
int v, i;
/* If we don't have any VFs, then there is nothing to reset */
@@ -778,12 +880,17 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
for (v = 0; v < pf->num_alloc_vfs; v++)
ice_trigger_vf_reset(&pf->vf[v], is_vflr);
- /* Call Disable LAN Tx queue AQ call with VFR bit set and 0
- * queues to inform Firmware about VF reset.
- */
- for (v = 0; v < pf->num_alloc_vfs; v++)
- ice_dis_vsi_txq(pf->vsi[0]->port_info, 0, NULL, NULL,
- ICE_VF_RESET, v, NULL);
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ struct ice_vsi *vsi;
+
+ vf = &pf->vf[v];
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
+ ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id);
+ ice_vsi_stop_rx_rings(vsi);
+ clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
+ }
+ }
/* HW requires some time to make sure it can flush the FIFO for a VF
* when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
@@ -796,9 +903,9 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
/* Check each VF in sequence */
while (v < pf->num_alloc_vfs) {
- struct ice_vf *vf = &pf->vf[v];
u32 reg;
+ vf = &pf->vf[v];
reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
if (!(reg & VPGEN_VFRSTAT_VFRD_M))
break;
@@ -818,8 +925,18 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
usleep_range(10000, 20000);
/* free VF resources to begin resetting the VSI state */
- for (v = 0; v < pf->num_alloc_vfs; v++)
- ice_free_vf_res(&pf->vf[v]);
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ vf = &pf->vf[v];
+
+ ice_free_vf_res(vf);
+
+ /* Free VF queues as well, and reallocate later.
+ * If a given VF has different number of queues
+ * configured, the request for update will come
+ * via mailbox communication.
+ */
+ vf->num_vf_qs = 0;
+ }
if (ice_check_avail_res(pf)) {
dev_err(&pf->pdev->dev,
@@ -828,8 +945,15 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
}
/* Finish the reset on each VF */
- for (v = 0; v < pf->num_alloc_vfs; v++)
- ice_cleanup_and_realloc_vf(&pf->vf[v]);
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ vf = &pf->vf[v];
+
+ vf->num_vf_qs = pf->num_vf_qps;
+ dev_dbg(&pf->pdev->dev,
+ "VF-id %d has %d queues configured\n",
+ vf->vf_id, vf->num_vf_qs);
+ ice_cleanup_and_realloc_vf(vf);
+ }
ice_flush(hw);
clear_bit(__ICE_VF_DIS, pf->state);
@@ -847,9 +971,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
{
struct ice_pf *pf = vf->pf;
- struct ice_hw *hw = &pf->hw;
struct ice_vsi *vsi;
+ struct ice_hw *hw;
bool rsd = false;
+ u8 promisc_m;
u32 reg;
int i;
@@ -871,10 +996,11 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
/* Call Disable LAN Tx queue AQ call even when queues are not
* enabled. This is needed for successful completiom of VFR
*/
- ice_dis_vsi_txq(vsi->port_info, 0, NULL, NULL, ICE_VF_RESET,
- vf->vf_id, NULL);
+ ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
+ NULL, ICE_VF_RESET, vf->vf_id, NULL);
}
+ hw = &pf->hw;
/* poll VPGEN_VFRSTAT reg to make sure
* that reset is complete
*/
@@ -900,6 +1026,21 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
usleep_range(10000, 20000);
+ /* disable promiscuous modes in case they were enabled
+ * ignore any error if disabling process failed
+ */
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+ test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
+ if (vf->port_vlan_id || vf->num_vlan)
+ promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
+ else
+ promisc_m = ICE_UCAST_PROMISC_BITS;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
+ dev_err(&pf->pdev->dev, "disabling promiscuous mode failed\n");
+ }
+
/* free VF resources to begin resetting the VSI state */
ice_free_vf_res(vf);
@@ -938,7 +1079,7 @@ void ice_vc_notify_reset(struct ice_pf *pf)
pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
- ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, ICE_SUCCESS,
+ ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
(u8 *)&pfe, sizeof(struct virtchnl_pf_event));
}
@@ -961,8 +1102,9 @@ static void ice_vc_notify_vf_reset(struct ice_vf *vf)
pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
- ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0,
- (u8 *)&pfe, sizeof(pfe), NULL);
+ ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
+ NULL);
}
/**
@@ -1012,7 +1154,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
pf->num_alloc_vfs = num_alloc_vfs;
/* VF resources get allocated during reset */
- if (!ice_reset_all_vfs(pf, false))
+ if (!ice_reset_all_vfs(pf, true))
goto err_unroll_sriov;
goto err_unroll_intr;
@@ -1131,21 +1273,10 @@ void ice_process_vflr_event(struct ice_pf *pf)
int vf_id;
u32 reg;
- if (!test_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
+ if (!test_and_clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state) ||
!pf->num_alloc_vfs)
return;
- /* Re-enable the VFLR interrupt cause here, before looking for which
- * VF got reset. Otherwise, if another VF gets a reset while the
- * first one is being processed, that interrupt will be lost, and
- * that VF will be stuck in reset forever.
- */
- reg = rd32(hw, PFINT_OICR_ENA);
- reg |= PFINT_OICR_VFLR_M;
- wr32(hw, PFINT_OICR_ENA, reg);
- ice_flush(hw);
-
- clear_bit(__ICE_VFLR_EVENT_PENDING, pf->state);
for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
struct ice_vf *vf = &pf->vf[vf_id];
u32 reg_idx, bit_idx;
@@ -1182,8 +1313,9 @@ static void ice_vc_dis_vf(struct ice_vf *vf)
*
* send msg to VF
*/
-static int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
- enum ice_status v_retval, u8 *msg, u16 msglen)
+static int
+ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
enum ice_status aq_ret;
struct ice_pf *pf;
@@ -1243,8 +1375,8 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
if (VF_IS_V10(&vf->vf_ver))
info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, ICE_SUCCESS,
- (u8 *)&info,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
sizeof(struct virtchnl_version_info));
}
@@ -1257,15 +1389,15 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
*/
static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vf_resource *vfres = NULL;
- enum ice_status aq_ret = 0;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
int len = 0;
int ret;
if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto err;
}
@@ -1273,7 +1405,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL);
if (!vfres) {
- aq_ret = ICE_ERR_NO_MEMORY;
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
len = 0;
goto err;
}
@@ -1286,6 +1418,11 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
if (!vsi->info.pvid)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
@@ -1336,7 +1473,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
err:
/* send the response back to the VF */
- ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, aq_ret,
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
(u8 *)vfres, len);
devm_kfree(&pf->pdev->dev, vfres);
@@ -1360,15 +1497,15 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf)
/**
* ice_find_vsi_from_id
* @pf: the pf structure to search for the VSI
- * @id: id of the VSI it is searching for
+ * @id: ID of the VSI it is searching for
*
- * searches for the VSI with the given id
+ * searches for the VSI with the given ID
*/
static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
{
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++)
+ ice_for_each_vsi(pf, i)
if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
return pf->vsi[i];
@@ -1378,9 +1515,9 @@ static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
/**
* ice_vc_isvalid_vsi_id
* @vf: pointer to the VF info
- * @vsi_id: VF relative VSI id
+ * @vsi_id: VF relative VSI ID
*
- * check for the valid VSI id
+ * check for the valid VSI ID
*/
static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
{
@@ -1395,10 +1532,10 @@ static bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
/**
* ice_vc_isvalid_q_id
* @vf: pointer to the VF info
- * @vsi_id: VSI id
- * @qid: VSI relative queue id
+ * @vsi_id: VSI ID
+ * @qid: VSI relative queue ID
*
- * check for the valid queue id
+ * check for the valid queue ID
*/
static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
{
@@ -1416,42 +1553,42 @@ static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
*/
static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_rss_key *vrk =
(struct virtchnl_rss_key *)msg;
+ struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi = NULL;
- enum ice_status aq_ret;
- int ret;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, vrk->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- ret = ice_set_rss(vsi, vrk->key, NULL, 0);
- aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS;
+ if (ice_set_rss(vsi, vrk->key, NULL, 0))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
NULL, 0);
}
@@ -1465,40 +1602,40 @@ error_param:
static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
{
struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi = NULL;
- enum ice_status aq_ret;
- int ret;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, vrl->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- ret = ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE);
- aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS;
+ if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
NULL, 0);
}
@@ -1511,25 +1648,26 @@ error_param:
*/
static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
- enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
struct ice_eth_stats stats;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1540,7 +1678,7 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
error_param:
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
(u8 *)&stats, sizeof(stats));
}
@@ -1553,29 +1691,30 @@ error_param:
*/
static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
- enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!vqs->rx_queues && !vqs->tx_queues) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1584,15 +1723,15 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
* programmed using ice_vsi_cfg_txqs
*/
if (ice_vsi_start_rx_rings(vsi))
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
/* Set flag to indicate that queues are enabled */
- if (!aq_ret)
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS)
set_bit(ICE_VF_STATE_ENA, vf->vf_states);
error_param:
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
NULL, 0);
}
@@ -1606,30 +1745,31 @@ error_param:
*/
static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
- enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
!test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!vqs->rx_queues && !vqs->tx_queues) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1637,23 +1777,23 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
dev_err(&vsi->back->pdev->dev,
"Failed to stop tx rings on VSI %d\n",
vsi->vsi_num);
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
}
if (ice_vsi_stop_rx_rings(vsi)) {
dev_err(&vsi->back->pdev->dev,
"Failed to stop rx rings on VSI %d\n",
vsi->vsi_num);
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
}
/* Clear enabled queues flag */
- if (!aq_ret)
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS)
clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
error_param:
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
NULL, 0);
}
@@ -1666,22 +1806,30 @@ error_param:
*/
static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_irq_map_info *irqmap_info =
(struct virtchnl_irq_map_info *)msg;
u16 vsi_id, vsi_q_id, vector_id;
struct virtchnl_vector_map *map;
struct ice_vsi *vsi = NULL;
struct ice_pf *pf = vf->pf;
- enum ice_status aq_ret = 0;
unsigned long qmap;
+ u16 num_q_vectors;
int i;
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ num_q_vectors = irqmap_info->num_vectors - ICE_NONQ_VECS_VF;
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ !vsi || vsi->num_q_vectors < num_q_vectors ||
+ irqmap_info->num_vectors == 0) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- for (i = 0; i < irqmap_info->num_vectors; i++) {
+ for (i = 0; i < num_q_vectors; i++) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[i];
+
map = &irqmap_info->vecmap[i];
vector_id = map->vector_id;
@@ -1689,40 +1837,30 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
/* validate msg params */
if (!(vector_id < pf->hw.func_caps.common_cap
.num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
- if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* lookout for the invalid queue index */
qmap = map->rxq_map;
+ q_vector->num_ring_rx = 0;
for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
- struct ice_q_vector *q_vector;
-
if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- q_vector = vsi->q_vectors[i];
q_vector->num_ring_rx++;
q_vector->rx.itr_idx = map->rxitr_idx;
vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
}
qmap = map->txq_map;
+ q_vector->num_ring_tx = 0;
for_each_set_bit(vsi_q_id, &qmap, ICE_MAX_BASE_QS_PER_VF) {
- struct ice_q_vector *q_vector;
-
if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- q_vector = vsi->q_vectors[i];
q_vector->num_ring_tx++;
q_vector->tx.itr_idx = map->txitr_idx;
vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
@@ -1733,7 +1871,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
ice_vsi_cfg_msix(vsi);
error_param:
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
NULL, 0);
}
@@ -1746,26 +1884,34 @@ error_param:
*/
static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vsi_queue_config_info *qci =
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
- enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
int i;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, qci->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) {
+ dev_err(&pf->pdev->dev,
+ "VF-%d requesting more than supported number of queues: %d\n",
+ vf->vf_id, qci->num_queue_pairs);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1775,7 +1921,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
qpi->rxq.vsi_id != qci->vsi_id ||
qpi->rxq.queue_id != qpi->txq.queue_id ||
!ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* copy Tx queue info from VF into VSI */
@@ -1785,13 +1931,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
vsi->rx_buf_len = qpi->rxq.databuffer_size;
if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
qpi->rxq.max_pkt_size < 64) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
vsi->max_frame = qpi->rxq.max_pkt_size;
@@ -1802,15 +1948,16 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
*/
vsi->num_txq = qci->num_queue_pairs;
vsi->num_rxq = qci->num_queue_pairs;
+ /* All queues of VF VSI are in TC 0 */
+ vsi->tc_cfg.tc_info[0].qcount_tx = qci->num_queue_pairs;
+ vsi->tc_cfg.tc_info[0].qcount_rx = qci->num_queue_pairs;
- if (!ice_vsi_cfg_lan_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi))
- aq_ret = 0;
- else
- aq_ret = ICE_ERR_PARAM;
+ if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
error_param:
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
NULL, 0);
}
@@ -1845,18 +1992,18 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf)
* ice_vc_handle_mac_addr_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
- * @set: true if mac filters are being set, false otherwise
+ * @set: true if MAC filters are being set, false otherwise
*
* add guest MAC address filter
*/
static int
ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_ether_addr_list *al =
(struct virtchnl_ether_addr_list *)msg;
struct ice_pf *pf = vf->pf;
enum virtchnl_ops vc_op;
- enum ice_status ret;
LIST_HEAD(mac_list);
struct ice_vsi *vsi;
int mac_count = 0;
@@ -1869,19 +2016,27 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
!ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
- ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
}
if (set && !ice_is_vf_trusted(vf) &&
(vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
dev_err(&pf->pdev->dev,
- "Can't add more MAC addresses, because VF is not trusted, switch the VF to trusted mode in order to add more functionalities\n");
- ret = ICE_ERR_PARAM;
+ "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
+ vf->vf_id);
+ /* There is no need to let VF know about not being trusted
+ * to add more MAC addr, so we can just return success message.
+ */
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
}
vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto handle_mac_exit;
+ }
for (i = 0; i < al->num_elements; i++) {
u8 *maddr = al->list[i].addr;
@@ -1893,40 +2048,39 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
* already added. Just continue.
*/
dev_info(&pf->pdev->dev,
- "mac %pM already set for VF %d\n",
+ "MAC %pM already set for VF %d\n",
maddr, vf->vf_id);
continue;
} else {
- /* VF can't remove dflt_lan_addr/bcast mac */
+ /* VF can't remove dflt_lan_addr/bcast MAC */
dev_err(&pf->pdev->dev,
- "can't remove mac %pM for VF %d\n",
+ "VF can't remove default MAC address or MAC %pM programmed by PF for VF %d\n",
maddr, vf->vf_id);
- ret = ICE_ERR_PARAM;
- goto handle_mac_exit;
+ continue;
}
}
/* check for the invalid cases and bail if necessary */
if (is_zero_ether_addr(maddr)) {
dev_err(&pf->pdev->dev,
- "invalid mac %pM provided for VF %d\n",
+ "invalid MAC %pM provided for VF %d\n",
maddr, vf->vf_id);
- ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
}
if (is_unicast_ether_addr(maddr) &&
!ice_can_vf_change_mac(vf)) {
dev_err(&pf->pdev->dev,
- "can't change unicast mac for untrusted VF %d\n",
+ "can't change unicast MAC for untrusted VF %d\n",
vf->vf_id);
- ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
}
- /* get here if maddr is multicast or if VF can change mac */
+ /* get here if maddr is multicast or if VF can change MAC */
if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) {
- ret = ICE_ERR_NO_MEMORY;
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
goto handle_mac_exit;
}
mac_count++;
@@ -1934,14 +2088,14 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
/* program the updated filter list */
if (set)
- ret = ice_add_mac(&pf->hw, &mac_list);
+ v_ret = ice_err_to_virt_err(ice_add_mac(&pf->hw, &mac_list));
else
- ret = ice_remove_mac(&pf->hw, &mac_list);
+ v_ret = ice_err_to_virt_err(ice_remove_mac(&pf->hw, &mac_list));
- if (ret) {
+ if (v_ret) {
dev_err(&pf->pdev->dev,
- "can't update mac filters for VF %d, error %d\n",
- vf->vf_id, ret);
+ "can't update MAC filters for VF %d, error %d\n",
+ vf->vf_id, v_ret);
} else {
if (set)
vf->num_mac += mac_count;
@@ -1952,7 +2106,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
handle_mac_exit:
ice_free_fltr_list(&pf->pdev->dev, &mac_list);
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, vc_op, ret, NULL, 0);
+ return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
}
/**
@@ -1987,39 +2141,42 @@ static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
* VFs get a default number of queues but can use this message to request a
* different number. If the request is successful, PF will reset the VF and
* return 0. If unsuccessful, PF will send message informing VF of number of
- * available queue pairs via virtchnl message response to vf.
+ * available queue pairs via virtchnl message response to VF.
*/
static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vf_res_request *vfres =
(struct virtchnl_vf_res_request *)msg;
int req_queues = vfres->num_queue_pairs;
- enum ice_status aq_ret = 0;
struct ice_pf *pf = vf->pf;
+ int max_allowed_vf_queues;
int tx_rx_queue_left;
int cur_queues;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- cur_queues = pf->num_vf_qps;
+ cur_queues = vf->num_vf_qs;
tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
+ max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
if (req_queues <= 0) {
dev_err(&pf->pdev->dev,
"VF %d tried to request %d queues. Ignoring.\n",
vf->vf_id, req_queues);
- } else if (req_queues > ICE_MAX_QS_PER_VF) {
+ } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
dev_err(&pf->pdev->dev,
"VF %d tried to request more than %d queues.\n",
- vf->vf_id, ICE_MAX_QS_PER_VF);
- vfres->num_queue_pairs = ICE_MAX_QS_PER_VF;
+ vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
+ vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
} else if (req_queues - cur_queues > tx_rx_queue_left) {
dev_warn(&pf->pdev->dev,
"VF %d requested %d more queues, but only %d left.\n",
vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
- vfres->num_queue_pairs = tx_rx_queue_left + cur_queues;
+ vfres->num_queue_pairs = min_t(int, max_allowed_vf_queues,
+ ICE_MAX_BASE_QS_PER_VF);
} else {
/* request is successful, then reset VF */
vf->num_req_qs = req_queues;
@@ -2033,18 +2190,18 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
error_param:
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
- aq_ret, (u8 *)vfres, sizeof(*vfres));
+ v_ret, (u8 *)vfres, sizeof(*vfres));
}
/**
* ice_set_vf_port_vlan
* @netdev: network interface device structure
* @vf_id: VF identifier
- * @vlan_id: VLAN id being set
+ * @vlan_id: VLAN ID being set
* @qos: priority setting
* @vlan_proto: VLAN protocol
*
- * program VF Port VLAN id and/or qos
+ * program VF Port VLAN ID and/or QoS
*/
int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
@@ -2087,17 +2244,18 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
return ret;
}
- /* If pvid, then remove all filters on the old VLAN */
+ /* If PVID, then remove all filters on the old VLAN */
if (vsi->info.pvid)
ice_vsi_kill_vlan(vsi, (le16_to_cpu(vsi->info.pvid) &
VLAN_VID_MASK));
if (vlan_id || qos) {
- ret = ice_vsi_set_pvid(vsi, vlanprio);
+ ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
if (ret)
goto error_set_pvid;
} else {
- ice_vsi_kill_pvid(vsi);
+ ice_vsi_manage_pvid(vsi, 0, false);
+ vsi->info.pvid = 0;
}
if (vlan_id) {
@@ -2125,52 +2283,60 @@ error_set_pvid:
* @msg: pointer to the msg buffer
* @add_v: Add VLAN if true, otherwise delete VLAN
*
- * Process virtchnl op to add or remove programmed guest VLAN id
+ * Process virtchnl op to add or remove programmed guest VLAN ID
*/
static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vlan_filter_list *vfl =
(struct virtchnl_vlan_filter_list *)msg;
- enum ice_status aq_ret = 0;
struct ice_pf *pf = vf->pf;
+ bool vlan_promisc = false;
struct ice_vsi *vsi;
+ struct ice_hw *hw;
+ int status = 0;
+ u8 promisc_m;
int i;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (add_v && !ice_is_vf_trusted(vf) &&
vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
dev_info(&pf->pdev->dev,
- "VF is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n");
- aq_ret = ICE_ERR_PARAM;
+ "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
+ vf->vf_id);
+ /* There is no need to let VF know about being not trusted,
+ * so we can just return success message here
+ */
goto error_param;
}
for (i = 0; i < vfl->num_elements; i++) {
if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(&pf->pdev->dev,
"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
goto error_param;
}
}
- vsi = ice_find_vsi_from_id(vf->pf, vfl->vsi_id);
+ hw = &pf->hw;
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (vsi->info.pvid) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -2178,38 +2344,94 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
dev_err(&pf->pdev->dev,
"%sable VLAN stripping failed for VSI %i\n",
add_v ? "en" : "dis", vsi->vsi_num);
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+ test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ vlan_promisc = true;
+
if (add_v) {
for (i = 0; i < vfl->num_elements; i++) {
u16 vid = vfl->vlan_id[i];
- if (!ice_vsi_add_vlan(vsi, vid)) {
- vf->num_vlan++;
+ if (!ice_is_vf_trusted(vf) &&
+ vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
+ dev_info(&pf->pdev->dev,
+ "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
+ vf->vf_id);
+ /* There is no need to let VF know about being
+ * not trusted, so we can just return success
+ * message here as well.
+ */
+ goto error_param;
+ }
+
+ if (ice_vsi_add_vlan(vsi, vid)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
- /* Enable VLAN pruning when VLAN 0 is added */
- if (unlikely(!vid))
- if (ice_cfg_vlan_pruning(vsi, true))
- aq_ret = ICE_ERR_PARAM;
+ vf->num_vlan++;
+ /* Enable VLAN pruning when VLAN is added */
+ if (!vlan_promisc) {
+ status = ice_cfg_vlan_pruning(vsi, true, false);
+ if (status) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(&pf->pdev->dev,
+ "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
+ vid, status);
+ goto error_param;
+ }
} else {
- aq_ret = ICE_ERR_PARAM;
+ /* Enable Ucast/Mcast VLAN promiscuous mode */
+ promisc_m = ICE_PROMISC_VLAN_TX |
+ ICE_PROMISC_VLAN_RX;
+
+ status = ice_set_vsi_promisc(hw, vsi->idx,
+ promisc_m, vid);
+ if (status) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(&pf->pdev->dev,
+ "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
+ vid, status);
+ }
}
}
} else {
- for (i = 0; i < vfl->num_elements; i++) {
+ /* In case of non_trusted VF, number of VLAN elements passed
+ * to PF for removal might be greater than number of VLANs
+ * filter programmed for that VF - So, use actual number of
+ * VLANS added earlier with add VLAN opcode. In order to avoid
+ * removing VLAN that doesn't exist, which result to sending
+ * erroneous failed message back to the VF
+ */
+ int num_vf_vlan;
+
+ num_vf_vlan = vf->num_vlan;
+ for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
u16 vid = vfl->vlan_id[i];
/* Make sure ice_vsi_kill_vlan is successful before
* updating VLAN information
*/
- if (!ice_vsi_kill_vlan(vsi, vid)) {
- vf->num_vlan--;
+ if (ice_vsi_kill_vlan(vsi, vid)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vf->num_vlan--;
+ /* Disable VLAN pruning when removing VLAN */
+ ice_cfg_vlan_pruning(vsi, false, false);
- /* Disable VLAN pruning when removing VLAN 0 */
- if (unlikely(!vid))
- ice_cfg_vlan_pruning(vsi, false);
+ /* Disable Unicast/Multicast VLAN promiscuous mode */
+ if (vlan_promisc) {
+ promisc_m = ICE_PROMISC_VLAN_TX |
+ ICE_PROMISC_VLAN_RX;
+
+ ice_clear_vsi_promisc(hw, vsi->idx,
+ promisc_m, vid);
}
}
}
@@ -2217,10 +2439,10 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
error_param:
/* send the response to the VF */
if (add_v)
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
NULL, 0);
else
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
NULL, 0);
}
@@ -2229,7 +2451,7 @@ error_param:
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
- * Add and program guest VLAN id
+ * Add and program guest VLAN ID
*/
static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
{
@@ -2241,7 +2463,7 @@ static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
*
- * remove programmed guest VLAN id
+ * remove programmed guest VLAN ID
*/
static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
{
@@ -2256,22 +2478,22 @@ static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
*/
static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
{
- enum ice_status aq_ret = 0;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
vsi = pf->vsi[vf->lan_vsi_idx];
if (ice_vsi_manage_vlan_stripping(vsi, true))
- aq_ret = ICE_ERR_AQ_ERROR;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
- aq_ret, NULL, 0);
+ v_ret, NULL, 0);
}
/**
@@ -2282,22 +2504,27 @@ error_param:
*/
static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
{
- enum ice_status aq_ret = 0;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
if (ice_vsi_manage_vlan_stripping(vsi, false))
- aq_ret = ICE_ERR_AQ_ERROR;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
- aq_ret, NULL, 0);
+ v_ret, NULL, 0);
}
/**
@@ -2333,7 +2560,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
/* Perform basic checks on the msg */
err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
if (err) {
- if (err == VIRTCHNL_ERR_PARAM)
+ if (err == VIRTCHNL_STATUS_ERR_PARAM)
err = -EPERM;
else
err = -EINVAL;
@@ -2355,7 +2582,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
error_handler:
if (err) {
- ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_PARAM, NULL, 0);
+ ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
+ NULL, 0);
dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
vf_id, v_opcode, msglen, err);
return;
@@ -2418,7 +2646,8 @@ error_handler:
default:
dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
v_opcode, vf_id);
- err = ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_NOT_IMPL,
+ err = ice_vc_send_msg_to_vf(vf, v_opcode,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
NULL, 0);
break;
}
@@ -2427,7 +2656,7 @@ error_handler:
* as it is busy with pending work.
*/
dev_info(&pf->pdev->dev,
- "PF failed to honor VF %d, opcode %d\n, error %d\n",
+ "PF failed to honor VF %d, opcode %d, error %d\n",
vf_id, v_opcode, err);
}
}
@@ -2440,8 +2669,8 @@ error_handler:
*
* return VF configuration
*/
-int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
- struct ifla_vf_info *ivi)
+int
+ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -2550,9 +2779,9 @@ out:
* ice_set_vf_mac
* @netdev: network interface device structure
* @vf_id: VF identifier
- * @mac: mac address
+ * @mac: MAC address
*
- * program VF mac address
+ * program VF MAC address
*/
int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
{
@@ -2579,7 +2808,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
return -EINVAL;
}
- /* copy mac into dflt_lan_addr and trigger a VF reset. The reset
+ /* copy MAC into dflt_lan_addr and trigger a VF reset. The reset
* flow will use the updated dflt_lan_addr and add a MAC filter
* using ice_add_mac. Also set pf_set_mac to indicate that the PF has
* set the MAC address for this VF.
@@ -2587,7 +2816,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
ether_addr_copy(vf->dflt_lan_addr.addr, mac);
vf->pf_set_mac = true;
netdev_info(netdev,
- "mac on VF %d set to %pM\n. VF driver will be reinitialized\n",
+ "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
vf_id, mac);
ice_vc_dis_vf(vf);
@@ -2690,7 +2919,8 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up);
/* Notify the VF of its new link state */
- ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
sizeof(pfe), NULL);
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 01470a8ee03a..3725aea16840 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -48,10 +48,10 @@ enum ice_virtchnl_cap {
struct ice_vf {
struct ice_pf *pf;
- s16 vf_id; /* VF id in the PF space */
+ s16 vf_id; /* VF ID in the PF space */
u32 driver_caps; /* reported by VF driver */
int first_vector_idx; /* first vector index of this VF */
- struct ice_sw *vf_sw_id; /* switch id the VF VSIs connect to */
+ struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
struct virtchnl_version_info vf_ver;
struct virtchnl_ether_addr dflt_lan_addr;
u16 port_vlan_id;
@@ -59,10 +59,10 @@ struct ice_vf {
u8 trusted;
u16 lan_vsi_idx; /* index into PF struct */
u16 lan_vsi_num; /* ID as used by firmware */
- u64 num_mdd_events; /* number of mdd events detected */
+ u64 num_mdd_events; /* number of MDD events detected */
u64 num_inval_msgs; /* number of continuous invalid msgs */
u64 num_valid_msgs; /* number of valid msgs detected */
- unsigned long vf_caps; /* vf's adv. capabilities */
+ unsigned long vf_caps; /* VF's adv. capabilities */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
u8 link_forced;
@@ -70,6 +70,7 @@ struct ice_vf {
u8 spoofchk;
u16 num_mac;
u16 num_vlan;
+ u16 num_vf_qs; /* num of queue configured per VF */
u8 num_req_qs; /* num of queue pairs requested by VF */
};
@@ -77,8 +78,8 @@ struct ice_vf {
void ice_process_vflr_event(struct ice_pf *pf);
int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
-int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
- struct ifla_vf_info *ivi);
+int
+ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
void ice_free_vfs(struct ice_pf *pf);
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
@@ -86,11 +87,9 @@ void ice_vc_notify_link_state(struct ice_pf *pf);
void ice_vc_notify_reset(struct ice_pf *pf);
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
-int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id,
- u16 vlan_id, u8 qos, __be16 vlan_proto);
-
-int ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
- int max_tx_rate);
+int
+ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
+ __be16 vlan_proto);
int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
@@ -162,12 +161,5 @@ ice_set_vf_link_state(struct net_device __always_unused *netdev,
return -EOPNOTSUPP;
}
-static inline int
-ice_set_vf_bw(struct net_device __always_unused *netdev,
- int __always_unused vf_id, int __always_unused min_tx_rate,
- int __always_unused max_tx_rate)
-{
- return -EOPNOTSUPP;
-}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index c57671068245..c645d9e648e0 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -3158,8 +3158,8 @@ static int igb_set_eee(struct net_device *netdev,
} else if (!edata->eee_enabled) {
dev_err(&adapter->pdev->dev,
"Setting EEE options are not supported with EEE disabled\n");
- return -EINVAL;
- }
+ return -EINVAL;
+ }
adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 1d71ec360b1c..39f33afc479c 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2480,7 +2480,7 @@ static int igb_set_features(struct net_device *netdev,
else
igb_reset(adapter);
- return 0;
+ return 1;
}
static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
@@ -3452,6 +3452,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
}
+
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
+
pm_runtime_put_noidle(&pdev->dev);
return 0;
@@ -6026,7 +6029,7 @@ static int igb_tx_map(struct igb_ring *tx_ring,
/* Make sure there is space in the ring for the next send. */
igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
}
return 0;
@@ -8043,7 +8046,7 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
/* Determine available headroom for copy */
headlen = size;
if (headlen > IGB_RX_HDR_LEN)
- headlen = eth_get_headlen(va, IGB_RX_HDR_LEN);
+ headlen = eth_get_headlen(skb->dev, va, IGB_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 80faccc34cda..0f5534ce27b0 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -29,9 +29,15 @@ unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter);
void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
const u32 max_rss_queues);
int igc_reinit_queues(struct igc_adapter *adapter);
+void igc_write_rss_indir_tbl(struct igc_adapter *adapter);
bool igc_has_link(struct igc_adapter *adapter);
void igc_reset(struct igc_adapter *adapter);
int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx);
+int igc_add_mac_steering_filter(struct igc_adapter *adapter,
+ const u8 *addr, u8 queue, u8 flags);
+int igc_del_mac_steering_filter(struct igc_adapter *adapter,
+ const u8 *addr, u8 queue, u8 flags);
+void igc_update_stats(struct igc_adapter *adapter);
extern char igc_driver_name[];
extern char igc_driver_version[];
@@ -51,6 +57,13 @@ extern char igc_driver_version[];
#define IGC_FLAG_VLAN_PROMISC BIT(15)
#define IGC_FLAG_RX_LEGACY BIT(16)
+#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
+#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
+
+#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002
+#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
+#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+
#define IGC_START_ITR 648 /* ~6000 ints/sec */
#define IGC_4K_ITR 980
#define IGC_20K_ITR 196
@@ -284,15 +297,50 @@ struct igc_q_vector {
struct igc_ring ring[0] ____cacheline_internodealigned_in_smp;
};
+#define MAX_ETYPE_FILTER (4 - 1)
+
+enum igc_filter_match_flags {
+ IGC_FILTER_FLAG_ETHER_TYPE = 0x1,
+ IGC_FILTER_FLAG_VLAN_TCI = 0x2,
+ IGC_FILTER_FLAG_SRC_MAC_ADDR = 0x4,
+ IGC_FILTER_FLAG_DST_MAC_ADDR = 0x8,
+};
+
+/* RX network flow classification data structure */
+struct igc_nfc_input {
+ /* Byte layout in order, all values with MSB first:
+ * match_flags - 1 byte
+ * etype - 2 bytes
+ * vlan_tci - 2 bytes
+ */
+ u8 match_flags;
+ __be16 etype;
+ __be16 vlan_tci;
+ u8 src_addr[ETH_ALEN];
+ u8 dst_addr[ETH_ALEN];
+};
+
+struct igc_nfc_filter {
+ struct hlist_node nfc_node;
+ struct igc_nfc_input filter;
+ unsigned long cookie;
+ u16 etype_reg_index;
+ u16 sw_idx;
+ u16 action;
+};
+
struct igc_mac_addr {
u8 addr[ETH_ALEN];
u8 queue;
u8 state; /* bitmask */
};
-#define IGC_MAC_STATE_DEFAULT 0x1
-#define IGC_MAC_STATE_MODIFIED 0x2
-#define IGC_MAC_STATE_IN_USE 0x4
+#define IGC_MAC_STATE_DEFAULT 0x1
+#define IGC_MAC_STATE_IN_USE 0x2
+#define IGC_MAC_STATE_SRC_ADDR 0x4
+#define IGC_MAC_STATE_QUEUE_STEERING 0x8
+
+#define IGC_MAX_RXNFC_FILTERS 16
/* Board specific private data structure */
struct igc_adapter {
@@ -356,12 +404,22 @@ struct igc_adapter {
u16 tx_ring_count;
u16 rx_ring_count;
+ u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_skipped;
+ u32 rx_hwtstamp_cleared;
u32 *shadow_vfta;
u32 rss_queues;
+ u32 rss_indir_tbl_init;
+
+ /* RX network flow classification support */
+ struct hlist_head nfc_filter_list;
+ struct hlist_head cls_flower_list;
+ unsigned int nfc_filter_count;
/* lock for RX network flow classification filter */
spinlock_t nfc_lock;
+ bool etype_bitmap[MAX_ETYPE_FILTER];
struct igc_mac_addr *mac_table;
@@ -447,6 +505,10 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
/* forward declaration */
void igc_reinit_locked(struct igc_adapter *);
+int igc_add_filter(struct igc_adapter *adapter,
+ struct igc_nfc_filter *input);
+int igc_erase_filter(struct igc_adapter *adapter,
+ struct igc_nfc_filter *input);
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
index 76d4991d7284..58d1109d7f3f 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.h
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Intel Corporation */
-#ifndef _IGC_BASE_H
-#define _IGC_BASE_H
+#ifndef _IGC_BASE_H_
+#define _IGC_BASE_H_
/* forward declaration */
void igc_rx_fifo_flush_base(struct igc_hw *hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 7d1bdcd1225a..a9a30268de59 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -310,6 +310,12 @@
IGC_RXDEXT_STATERR_CXE | \
IGC_RXDEXT_STATERR_RXE)
+#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000
+#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
+#define IGC_MRQC_RSS_FIELD_IPV6 0x00100000
+#define IGC_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+
/* Header split receive */
#define IGC_RFCTL_IPV6_EX_DIS 0x00010000
#define IGC_RFCTL_LEF 0x00040000
@@ -325,6 +331,10 @@
#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
+/* Receive Checksum Control */
+#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
+#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
+
/* GPY211 - I225 defines */
#define GPY_MMD_MASK 0xFFFF0000
#define GPY_MMD_SHIFT 16
@@ -390,4 +400,11 @@
#define IGC_N0_QUEUE -1
+#define IGC_MAX_MAC_HDR_LEN 127
+#define IGC_MAX_NETWORK_HDR_LEN 511
+
+#define IGC_VLAPQF_QUEUE_SEL(_n, q_idx) ((q_idx) << ((_n) * 4))
+#define IGC_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4))
+#define IGC_VLAPQF_QUEUE_MASK 0x03
+
#endif /* _IGC_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index eff37a6c0afa..ac98f1d96892 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -2,10 +2,120 @@
/* Copyright (c) 2018 Intel Corporation */
/* ethtool support for igc */
+#include <linux/if_vlan.h>
#include <linux/pm_runtime.h>
#include "igc.h"
+/* forward declaration */
+struct igc_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define IGC_STAT(_name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = FIELD_SIZEOF(struct igc_adapter, _stat), \
+ .stat_offset = offsetof(struct igc_adapter, _stat) \
+}
+
+static const struct igc_stats igc_gstrings_stats[] = {
+ IGC_STAT("rx_packets", stats.gprc),
+ IGC_STAT("tx_packets", stats.gptc),
+ IGC_STAT("rx_bytes", stats.gorc),
+ IGC_STAT("tx_bytes", stats.gotc),
+ IGC_STAT("rx_broadcast", stats.bprc),
+ IGC_STAT("tx_broadcast", stats.bptc),
+ IGC_STAT("rx_multicast", stats.mprc),
+ IGC_STAT("tx_multicast", stats.mptc),
+ IGC_STAT("multicast", stats.mprc),
+ IGC_STAT("collisions", stats.colc),
+ IGC_STAT("rx_crc_errors", stats.crcerrs),
+ IGC_STAT("rx_no_buffer_count", stats.rnbc),
+ IGC_STAT("rx_missed_errors", stats.mpc),
+ IGC_STAT("tx_aborted_errors", stats.ecol),
+ IGC_STAT("tx_carrier_errors", stats.tncrs),
+ IGC_STAT("tx_window_errors", stats.latecol),
+ IGC_STAT("tx_abort_late_coll", stats.latecol),
+ IGC_STAT("tx_deferred_ok", stats.dc),
+ IGC_STAT("tx_single_coll_ok", stats.scc),
+ IGC_STAT("tx_multi_coll_ok", stats.mcc),
+ IGC_STAT("tx_timeout_count", tx_timeout_count),
+ IGC_STAT("rx_long_length_errors", stats.roc),
+ IGC_STAT("rx_short_length_errors", stats.ruc),
+ IGC_STAT("rx_align_errors", stats.algnerrc),
+ IGC_STAT("tx_tcp_seg_good", stats.tsctc),
+ IGC_STAT("tx_tcp_seg_failed", stats.tsctfc),
+ IGC_STAT("rx_flow_control_xon", stats.xonrxc),
+ IGC_STAT("rx_flow_control_xoff", stats.xoffrxc),
+ IGC_STAT("tx_flow_control_xon", stats.xontxc),
+ IGC_STAT("tx_flow_control_xoff", stats.xofftxc),
+ IGC_STAT("rx_long_byte_count", stats.gorc),
+ IGC_STAT("tx_dma_out_of_sync", stats.doosync),
+ IGC_STAT("tx_smbus", stats.mgptc),
+ IGC_STAT("rx_smbus", stats.mgprc),
+ IGC_STAT("dropped_smbus", stats.mgpdc),
+ IGC_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
+ IGC_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
+ IGC_STAT("os2bmc_tx_by_host", stats.o2bspc),
+ IGC_STAT("os2bmc_rx_by_host", stats.b2ogprc),
+ IGC_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ IGC_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
+ IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+};
+
+#define IGC_NETDEV_STAT(_net_stat) { \
+ .stat_string = __stringify(_net_stat), \
+ .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \
+ .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \
+}
+
+static const struct igc_stats igc_gstrings_net_stats[] = {
+ IGC_NETDEV_STAT(rx_errors),
+ IGC_NETDEV_STAT(tx_errors),
+ IGC_NETDEV_STAT(tx_dropped),
+ IGC_NETDEV_STAT(rx_length_errors),
+ IGC_NETDEV_STAT(rx_over_errors),
+ IGC_NETDEV_STAT(rx_frame_errors),
+ IGC_NETDEV_STAT(rx_fifo_errors),
+ IGC_NETDEV_STAT(tx_fifo_errors),
+ IGC_NETDEV_STAT(tx_heartbeat_errors)
+};
+
+enum igc_diagnostics_results {
+ TEST_REG = 0,
+ TEST_EEP,
+ TEST_IRQ,
+ TEST_LOOP,
+ TEST_LINK
+};
+
+static const char igc_gstrings_test[][ETH_GSTRING_LEN] = {
+ [TEST_REG] = "Register test (offline)",
+ [TEST_EEP] = "Eeprom test (offline)",
+ [TEST_IRQ] = "Interrupt test (offline)",
+ [TEST_LOOP] = "Loopback test (offline)",
+ [TEST_LINK] = "Link test (on/offline)"
+};
+
+#define IGC_TEST_LEN (sizeof(igc_gstrings_test) / ETH_GSTRING_LEN)
+
+#define IGC_GLOBAL_STATS_LEN \
+ (sizeof(igc_gstrings_stats) / sizeof(struct igc_stats))
+#define IGC_NETDEV_STATS_LEN \
+ (sizeof(igc_gstrings_net_stats) / sizeof(struct igc_stats))
+#define IGC_RX_QUEUE_STATS_LEN \
+ (sizeof(struct igc_rx_queue_stats) / sizeof(u64))
+#define IGC_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */
+#define IGC_QUEUE_STATS_LEN \
+ ((((struct igc_adapter *)netdev_priv(netdev))->num_rx_queues * \
+ IGC_RX_QUEUE_STATS_LEN) + \
+ (((struct igc_adapter *)netdev_priv(netdev))->num_tx_queues * \
+ IGC_TX_QUEUE_STATS_LEN))
+#define IGC_STATS_LEN \
+ (IGC_GLOBAL_STATS_LEN + IGC_NETDEV_STATS_LEN + IGC_QUEUE_STATS_LEN)
+
static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = {
#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0)
"legacy-rx",
@@ -545,6 +655,127 @@ static int igc_set_pauseparam(struct net_device *netdev,
return retval;
}
+static void igc_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *igc_gstrings_test,
+ IGC_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, igc_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) {
+ memcpy(p, igc_gstrings_net_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ sprintf(p, "tx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_restart", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ sprintf(p, "rx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_drops", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_csum_err", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_alloc_failed", i);
+ p += ETH_GSTRING_LEN;
+ }
+ /* BUG_ON(p - data != IGC_STATS_LEN * ETH_GSTRING_LEN); */
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, igc_priv_flags_strings,
+ IGC_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+ break;
+ }
+}
+
+static int igc_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return IGC_STATS_LEN;
+ case ETH_SS_TEST:
+ return IGC_TEST_LEN;
+ case ETH_SS_PRIV_FLAGS:
+ return IGC_PRIV_FLAGS_STR_LEN;
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static void igc_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct rtnl_link_stats64 *net_stats = &adapter->stats64;
+ unsigned int start;
+ struct igc_ring *ring;
+ int i, j;
+ char *p;
+
+ spin_lock(&adapter->stats64_lock);
+ igc_update_stats(adapter);
+
+ for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) {
+ p = (char *)adapter + igc_gstrings_stats[i].stat_offset;
+ data[i] = (igc_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < IGC_NETDEV_STATS_LEN; j++, i++) {
+ p = (char *)net_stats + igc_gstrings_net_stats[j].stat_offset;
+ data[i] = (igc_gstrings_net_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ u64 restart2;
+
+ ring = adapter->tx_ring[j];
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
+ data[i] = ring->tx_stats.packets;
+ data[i + 1] = ring->tx_stats.bytes;
+ data[i + 2] = ring->tx_stats.restart_queue;
+ } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->tx_syncp2);
+ restart2 = ring->tx_stats.restart_queue2;
+ } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start));
+ data[i + 2] += restart2;
+
+ i += IGC_TX_QUEUE_STATS_LEN;
+ }
+ for (j = 0; j < adapter->num_rx_queues; j++) {
+ ring = adapter->rx_ring[j];
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
+ data[i] = ring->rx_stats.packets;
+ data[i + 1] = ring->rx_stats.bytes;
+ data[i + 2] = ring->rx_stats.drops;
+ data[i + 3] = ring->rx_stats.csum_err;
+ data[i + 4] = ring->rx_stats.alloc_failed;
+ } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
+ i += IGC_RX_QUEUE_STATS_LEN;
+ }
+ spin_unlock(&adapter->stats64_lock);
+}
+
static int igc_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
@@ -643,6 +874,605 @@ static int igc_set_coalesce(struct net_device *netdev,
return 0;
}
+#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+static int igc_get_ethtool_nfc_entry(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct igc_nfc_filter *rule = NULL;
+
+ /* report total rule count */
+ cmd->data = IGC_MAX_RXNFC_FILTERS;
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ if (fsp->location <= rule->sw_idx)
+ break;
+ }
+
+ if (!rule || fsp->location != rule->sw_idx)
+ return -EINVAL;
+
+ if (rule->filter.match_flags) {
+ fsp->flow_type = ETHER_FLOW;
+ fsp->ring_cookie = rule->action;
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
+ fsp->h_u.ether_spec.h_proto = rule->filter.etype;
+ fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
+ }
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
+ fsp->flow_type |= FLOW_EXT;
+ fsp->h_ext.vlan_tci = rule->filter.vlan_tci;
+ fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK);
+ }
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
+ ether_addr_copy(fsp->h_u.ether_spec.h_dest,
+ rule->filter.dst_addr);
+ /* As we only support matching by the full
+ * mask, return the mask to userspace
+ */
+ eth_broadcast_addr(fsp->m_u.ether_spec.h_dest);
+ }
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
+ ether_addr_copy(fsp->h_u.ether_spec.h_source,
+ rule->filter.src_addr);
+ /* As we only support matching by the full
+ * mask, return the mask to userspace
+ */
+ eth_broadcast_addr(fsp->m_u.ether_spec.h_source);
+ }
+
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int igc_get_ethtool_nfc_all(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct igc_nfc_filter *rule;
+ int cnt = 0;
+
+ /* report total rule count */
+ cmd->data = IGC_MAX_RXNFC_FILTERS;
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[cnt] = rule->sw_idx;
+ cnt++;
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static int igc_get_rss_hash_opts(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ cmd->data = 0;
+
+ /* Report default options for RSS on igc */
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
+ case UDP_V4_FLOW:
+ if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
+ case SCTP_V4_FLOW:
+ /* Fall through */
+ case AH_ESP_V4_FLOW:
+ /* Fall through */
+ case AH_V4_FLOW:
+ /* Fall through */
+ case ESP_V4_FLOW:
+ /* Fall through */
+ case IPV4_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
+ case UDP_V6_FLOW:
+ if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
+ case SCTP_V6_FLOW:
+ /* Fall through */
+ case AH_ESP_V6_FLOW:
+ /* Fall through */
+ case AH_V6_FLOW:
+ /* Fall through */
+ case ESP_V6_FLOW:
+ /* Fall through */
+ case IPV6_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int igc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = adapter->nfc_filter_count;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = igc_get_ethtool_nfc_entry(adapter, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = igc_get_ethtool_nfc_all(adapter, cmd, rule_locs);
+ break;
+ case ETHTOOL_GRXFH:
+ ret = igc_get_rss_hash_opts(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+#define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \
+ IGC_FLAG_RSS_FIELD_IPV6_UDP)
+static int igc_set_rss_hash_opt(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *nfc)
+{
+ u32 flags = adapter->flags;
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ !(nfc->data & RXH_L4_B_0_1) ||
+ !(nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ case UDP_V4_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags &= ~IGC_FLAG_RSS_FIELD_IPV4_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags |= IGC_FLAG_RSS_FIELD_IPV4_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags &= ~IGC_FLAG_RSS_FIELD_IPV6_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags |= IGC_FLAG_RSS_FIELD_IPV6_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ (nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* if we changed something we need to update flags */
+ if (flags != adapter->flags) {
+ struct igc_hw *hw = &adapter->hw;
+ u32 mrqc = rd32(IGC_MRQC);
+
+ if ((flags & UDP_RSS_FLAGS) &&
+ !(adapter->flags & UDP_RSS_FLAGS))
+ dev_err(&adapter->pdev->dev,
+ "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
+
+ adapter->flags = flags;
+
+ /* Perform hash on these packet types */
+ mrqc |= IGC_MRQC_RSS_FIELD_IPV4 |
+ IGC_MRQC_RSS_FIELD_IPV4_TCP |
+ IGC_MRQC_RSS_FIELD_IPV6 |
+ IGC_MRQC_RSS_FIELD_IPV6_TCP;
+
+ mrqc &= ~(IGC_MRQC_RSS_FIELD_IPV4_UDP |
+ IGC_MRQC_RSS_FIELD_IPV6_UDP);
+
+ if (flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
+ mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
+
+ if (flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
+ mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
+
+ wr32(IGC_MRQC, mrqc);
+ }
+
+ return 0;
+}
+
+static int igc_rxnfc_write_etype_filter(struct igc_adapter *adapter,
+ struct igc_nfc_filter *input)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u8 i;
+ u32 etqf;
+ u16 etype;
+
+ /* find an empty etype filter register */
+ for (i = 0; i < MAX_ETYPE_FILTER; ++i) {
+ if (!adapter->etype_bitmap[i])
+ break;
+ }
+ if (i == MAX_ETYPE_FILTER) {
+ dev_err(&adapter->pdev->dev, "ethtool -N: etype filters are all used.\n");
+ return -EINVAL;
+ }
+
+ adapter->etype_bitmap[i] = true;
+
+ etqf = rd32(IGC_ETQF(i));
+ etype = ntohs(input->filter.etype & ETHER_TYPE_FULL_MASK);
+
+ etqf |= IGC_ETQF_FILTER_ENABLE;
+ etqf &= ~IGC_ETQF_ETYPE_MASK;
+ etqf |= (etype & IGC_ETQF_ETYPE_MASK);
+
+ etqf &= ~IGC_ETQF_QUEUE_MASK;
+ etqf |= ((input->action << IGC_ETQF_QUEUE_SHIFT)
+ & IGC_ETQF_QUEUE_MASK);
+ etqf |= IGC_ETQF_QUEUE_ENABLE;
+
+ wr32(IGC_ETQF(i), etqf);
+
+ input->etype_reg_index = i;
+
+ return 0;
+}
+
+static int igc_rxnfc_write_vlan_prio_filter(struct igc_adapter *adapter,
+ struct igc_nfc_filter *input)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u8 vlan_priority;
+ u16 queue_index;
+ u32 vlapqf;
+
+ vlapqf = rd32(IGC_VLAPQF);
+ vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK)
+ >> VLAN_PRIO_SHIFT;
+ queue_index = (vlapqf >> (vlan_priority * 4)) & IGC_VLAPQF_QUEUE_MASK;
+
+ /* check whether this vlan prio is already set */
+ if (vlapqf & IGC_VLAPQF_P_VALID(vlan_priority) &&
+ queue_index != input->action) {
+ dev_err(&adapter->pdev->dev, "ethtool rxnfc set vlan prio filter failed.\n");
+ return -EEXIST;
+ }
+
+ vlapqf |= IGC_VLAPQF_P_VALID(vlan_priority);
+ vlapqf |= IGC_VLAPQF_QUEUE_SEL(vlan_priority, input->action);
+
+ wr32(IGC_VLAPQF, vlapqf);
+
+ return 0;
+}
+
+int igc_add_filter(struct igc_adapter *adapter, struct igc_nfc_filter *input)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int err = -EINVAL;
+
+ if (hw->mac.type == igc_i225 &&
+ !(input->filter.match_flags & ~IGC_FILTER_FLAG_SRC_MAC_ADDR)) {
+ dev_err(&adapter->pdev->dev,
+ "i225 doesn't support flow classification rules specifying only source addresses.\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
+ err = igc_rxnfc_write_etype_filter(adapter, input);
+ if (err)
+ return err;
+ }
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
+ err = igc_add_mac_steering_filter(adapter,
+ input->filter.dst_addr,
+ input->action, 0);
+ err = min_t(int, err, 0);
+ if (err)
+ return err;
+ }
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
+ err = igc_add_mac_steering_filter(adapter,
+ input->filter.src_addr,
+ input->action,
+ IGC_MAC_STATE_SRC_ADDR);
+ err = min_t(int, err, 0);
+ if (err)
+ return err;
+ }
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
+ err = igc_rxnfc_write_vlan_prio_filter(adapter, input);
+
+ return err;
+}
+
+static void igc_clear_etype_filter_regs(struct igc_adapter *adapter,
+ u16 reg_index)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 etqf = rd32(IGC_ETQF(reg_index));
+
+ etqf &= ~IGC_ETQF_QUEUE_ENABLE;
+ etqf &= ~IGC_ETQF_QUEUE_MASK;
+ etqf &= ~IGC_ETQF_FILTER_ENABLE;
+
+ wr32(IGC_ETQF(reg_index), etqf);
+
+ adapter->etype_bitmap[reg_index] = false;
+}
+
+static void igc_clear_vlan_prio_filter(struct igc_adapter *adapter,
+ u16 vlan_tci)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u8 vlan_priority;
+ u32 vlapqf;
+
+ vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+
+ vlapqf = rd32(IGC_VLAPQF);
+ vlapqf &= ~IGC_VLAPQF_P_VALID(vlan_priority);
+ vlapqf &= ~IGC_VLAPQF_QUEUE_SEL(vlan_priority,
+ IGC_VLAPQF_QUEUE_MASK);
+
+ wr32(IGC_VLAPQF, vlapqf);
+}
+
+int igc_erase_filter(struct igc_adapter *adapter, struct igc_nfc_filter *input)
+{
+ if (input->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
+ igc_clear_etype_filter_regs(adapter,
+ input->etype_reg_index);
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
+ igc_clear_vlan_prio_filter(adapter,
+ ntohs(input->filter.vlan_tci));
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
+ igc_del_mac_steering_filter(adapter, input->filter.src_addr,
+ input->action,
+ IGC_MAC_STATE_SRC_ADDR);
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
+ igc_del_mac_steering_filter(adapter, input->filter.dst_addr,
+ input->action, 0);
+
+ return 0;
+}
+
+static int igc_update_ethtool_nfc_entry(struct igc_adapter *adapter,
+ struct igc_nfc_filter *input,
+ u16 sw_idx)
+{
+ struct igc_nfc_filter *rule, *parent;
+ int err = -EINVAL;
+
+ parent = NULL;
+ rule = NULL;
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ /* hash found, or no matching entry */
+ if (rule->sw_idx >= sw_idx)
+ break;
+ parent = rule;
+ }
+
+ /* if there is an old rule occupying our place remove it */
+ if (rule && rule->sw_idx == sw_idx) {
+ if (!input)
+ err = igc_erase_filter(adapter, rule);
+
+ hlist_del(&rule->nfc_node);
+ kfree(rule);
+ adapter->nfc_filter_count--;
+ }
+
+ /* If no input this was a delete, err should be 0 if a rule was
+ * successfully found and removed from the list else -EINVAL
+ */
+ if (!input)
+ return err;
+
+ /* initialize node */
+ INIT_HLIST_NODE(&input->nfc_node);
+
+ /* add filter to the list */
+ if (parent)
+ hlist_add_behind(&input->nfc_node, &parent->nfc_node);
+ else
+ hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list);
+
+ /* update counts */
+ adapter->nfc_filter_count++;
+
+ return 0;
+}
+
+static int igc_add_ethtool_nfc_entry(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct igc_nfc_filter *input, *rule;
+ int err = 0;
+
+ if (!(netdev->hw_features & NETIF_F_NTUPLE))
+ return -EOPNOTSUPP;
+
+ /* Don't allow programming if the action is a queue greater than
+ * the number of online Rx queues.
+ */
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC ||
+ fsp->ring_cookie >= adapter->num_rx_queues) {
+ dev_err(&adapter->pdev->dev, "ethtool -N: The specified action is invalid\n");
+ return -EINVAL;
+ }
+
+ /* Don't allow indexes to exist outside of available space */
+ if (fsp->location >= IGC_MAX_RXNFC_FILTERS) {
+ dev_err(&adapter->pdev->dev, "Location out of range\n");
+ return -EINVAL;
+ }
+
+ if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW)
+ return -EINVAL;
+
+ input = kzalloc(sizeof(*input), GFP_KERNEL);
+ if (!input)
+ return -ENOMEM;
+
+ if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) {
+ input->filter.etype = fsp->h_u.ether_spec.h_proto;
+ input->filter.match_flags = IGC_FILTER_FLAG_ETHER_TYPE;
+ }
+
+ /* Only support matching addresses by the full mask */
+ if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) {
+ input->filter.match_flags |= IGC_FILTER_FLAG_SRC_MAC_ADDR;
+ ether_addr_copy(input->filter.src_addr,
+ fsp->h_u.ether_spec.h_source);
+ }
+
+ /* Only support matching addresses by the full mask */
+ if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) {
+ input->filter.match_flags |= IGC_FILTER_FLAG_DST_MAC_ADDR;
+ ether_addr_copy(input->filter.dst_addr,
+ fsp->h_u.ether_spec.h_dest);
+ }
+
+ if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) {
+ if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) {
+ err = -EINVAL;
+ goto err_out;
+ }
+ input->filter.vlan_tci = fsp->h_ext.vlan_tci;
+ input->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI;
+ }
+
+ input->action = fsp->ring_cookie;
+ input->sw_idx = fsp->location;
+
+ spin_lock(&adapter->nfc_lock);
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ if (!memcmp(&input->filter, &rule->filter,
+ sizeof(input->filter))) {
+ err = -EEXIST;
+ dev_err(&adapter->pdev->dev,
+ "ethtool: this filter is already set\n");
+ goto err_out_w_lock;
+ }
+ }
+
+ err = igc_add_filter(adapter, input);
+ if (err)
+ goto err_out_w_lock;
+
+ igc_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
+
+ spin_unlock(&adapter->nfc_lock);
+ return 0;
+
+err_out_w_lock:
+ spin_unlock(&adapter->nfc_lock);
+err_out:
+ kfree(input);
+ return err;
+}
+
+static int igc_del_ethtool_nfc_entry(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ int err;
+
+ spin_lock(&adapter->nfc_lock);
+ err = igc_update_ethtool_nfc_entry(adapter, NULL, fsp->location);
+ spin_unlock(&adapter->nfc_lock);
+
+ return err;
+}
+
+static int igc_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = igc_set_rss_hash_opt(adapter, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ ret = igc_add_ethtool_nfc_entry(adapter, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = igc_del_ethtool_nfc_entry(adapter, cmd);
+ default:
+ break;
+ }
+
+ return ret;
+}
+
void igc_write_rss_indir_tbl(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
@@ -885,17 +1715,13 @@ static int igc_get_link_ksettings(struct net_device *netdev,
if (hw->mac.type == igc_i225 &&
(status & IGC_STATUS_SPEED_2500)) {
speed = SPEED_2500;
- hw_dbg("2500 Mbs, ");
} else {
speed = SPEED_1000;
- hw_dbg("1000 Mbs, ");
}
} else if (status & IGC_STATUS_SPEED_100) {
speed = SPEED_100;
- hw_dbg("100 Mbs, ");
} else {
speed = SPEED_10;
- hw_dbg("10 Mbs, ");
}
if ((status & IGC_STATUS_FD) ||
hw->phy.media_type != igc_media_type_copper)
@@ -1011,8 +1837,13 @@ static const struct ethtool_ops igc_ethtool_ops = {
.set_ringparam = igc_set_ringparam,
.get_pauseparam = igc_get_pauseparam,
.set_pauseparam = igc_set_pauseparam,
+ .get_strings = igc_get_strings,
+ .get_sset_count = igc_get_sset_count,
+ .get_ethtool_stats = igc_get_ethtool_stats,
.get_coalesce = igc_get_coalesce,
.set_coalesce = igc_set_coalesce,
+ .get_rxnfc = igc_get_rxnfc,
+ .set_rxnfc = igc_set_rxnfc,
.get_rxfh_indir_size = igc_get_rxfh_indir_size,
.get_rxfh = igc_get_rxfh,
.set_rxfh = igc_set_rxfh,
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index f8d692f6aa4f..34fa0e60a780 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -620,6 +620,55 @@ static void igc_configure_tx(struct igc_adapter *adapter)
*/
static void igc_setup_mrqc(struct igc_adapter *adapter)
{
+ struct igc_hw *hw = &adapter->hw;
+ u32 j, num_rx_queues;
+ u32 mrqc, rxcsum;
+ u32 rss_key[10];
+
+ netdev_rss_key_fill(rss_key, sizeof(rss_key));
+ for (j = 0; j < 10; j++)
+ wr32(IGC_RSSRK(j), rss_key[j]);
+
+ num_rx_queues = adapter->rss_queues;
+
+ if (adapter->rss_indir_tbl_init != num_rx_queues) {
+ for (j = 0; j < IGC_RETA_SIZE; j++)
+ adapter->rss_indir_tbl[j] =
+ (j * num_rx_queues) / IGC_RETA_SIZE;
+ adapter->rss_indir_tbl_init = num_rx_queues;
+ }
+ igc_write_rss_indir_tbl(adapter);
+
+ /* Disable raw packet checksumming so that RSS hash is placed in
+ * descriptor on writeback. No need to enable TCP/UDP/IP checksum
+ * offloads as they are enabled by default
+ */
+ rxcsum = rd32(IGC_RXCSUM);
+ rxcsum |= IGC_RXCSUM_PCSD;
+
+ /* Enable Receive Checksum Offload for SCTP */
+ rxcsum |= IGC_RXCSUM_CRCOFL;
+
+ /* Don't need to set TUOFL or IPOFL, they default to 1 */
+ wr32(IGC_RXCSUM, rxcsum);
+
+ /* Generate RSS hash based on packet types, TCP/UDP
+ * port numbers and/or IPv4/v6 src and dst addresses
+ */
+ mrqc = IGC_MRQC_RSS_FIELD_IPV4 |
+ IGC_MRQC_RSS_FIELD_IPV4_TCP |
+ IGC_MRQC_RSS_FIELD_IPV6 |
+ IGC_MRQC_RSS_FIELD_IPV6_TCP |
+ IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
+
+ if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
+ mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
+ if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
+ mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
+
+ mrqc |= IGC_MRQC_ENABLE_RSS_MQ;
+
+ wr32(IGC_MRQC, mrqc);
}
/**
@@ -890,7 +939,7 @@ static int igc_tx_map(struct igc_ring *tx_ring,
/* Make sure there is space in the ring for the next send. */
igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
}
@@ -1145,7 +1194,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
/* Determine available headroom for copy */
headlen = size;
if (headlen > IGC_RX_HDR_LEN)
- headlen = eth_get_headlen(va, IGC_RX_HDR_LEN);
+ headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
@@ -1733,12 +1782,200 @@ void igc_up(struct igc_adapter *adapter)
* igc_update_stats - Update the board statistics counters
* @adapter: board private structure
*/
-static void igc_update_stats(struct igc_adapter *adapter)
+void igc_update_stats(struct igc_adapter *adapter)
{
+ struct rtnl_link_stats64 *net_stats = &adapter->stats64;
+ struct pci_dev *pdev = adapter->pdev;
+ struct igc_hw *hw = &adapter->hw;
+ u64 _bytes, _packets;
+ u64 bytes, packets;
+ unsigned int start;
+ u32 mpc;
+ int i;
+
+ /* Prevent stats update while adapter is being reset, or if the pci
+ * connection is down.
+ */
+ if (adapter->link_speed == 0)
+ return;
+ if (pci_channel_offline(pdev))
+ return;
+
+ packets = 0;
+ bytes = 0;
+
+ rcu_read_lock();
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igc_ring *ring = adapter->rx_ring[i];
+ u32 rqdpc = rd32(IGC_RQDPC(i));
+
+ if (hw->mac.type >= igc_i225)
+ wr32(IGC_RQDPC(i), 0);
+
+ if (rqdpc) {
+ ring->rx_stats.drops += rqdpc;
+ net_stats->rx_fifo_errors += rqdpc;
+ }
+
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
+ _bytes = ring->rx_stats.bytes;
+ _packets = ring->rx_stats.packets;
+ } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
+ bytes += _bytes;
+ packets += _packets;
+ }
+
+ net_stats->rx_bytes = bytes;
+ net_stats->rx_packets = packets;
+
+ packets = 0;
+ bytes = 0;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
+ _bytes = ring->tx_stats.bytes;
+ _packets = ring->tx_stats.packets;
+ } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
+ bytes += _bytes;
+ packets += _packets;
+ }
+ net_stats->tx_bytes = bytes;
+ net_stats->tx_packets = packets;
+ rcu_read_unlock();
+
+ /* read stats registers */
+ adapter->stats.crcerrs += rd32(IGC_CRCERRS);
+ adapter->stats.gprc += rd32(IGC_GPRC);
+ adapter->stats.gorc += rd32(IGC_GORCL);
+ rd32(IGC_GORCH); /* clear GORCL */
+ adapter->stats.bprc += rd32(IGC_BPRC);
+ adapter->stats.mprc += rd32(IGC_MPRC);
+ adapter->stats.roc += rd32(IGC_ROC);
+
+ adapter->stats.prc64 += rd32(IGC_PRC64);
+ adapter->stats.prc127 += rd32(IGC_PRC127);
+ adapter->stats.prc255 += rd32(IGC_PRC255);
+ adapter->stats.prc511 += rd32(IGC_PRC511);
+ adapter->stats.prc1023 += rd32(IGC_PRC1023);
+ adapter->stats.prc1522 += rd32(IGC_PRC1522);
+ adapter->stats.symerrs += rd32(IGC_SYMERRS);
+ adapter->stats.sec += rd32(IGC_SEC);
+
+ mpc = rd32(IGC_MPC);
+ adapter->stats.mpc += mpc;
+ net_stats->rx_fifo_errors += mpc;
+ adapter->stats.scc += rd32(IGC_SCC);
+ adapter->stats.ecol += rd32(IGC_ECOL);
+ adapter->stats.mcc += rd32(IGC_MCC);
+ adapter->stats.latecol += rd32(IGC_LATECOL);
+ adapter->stats.dc += rd32(IGC_DC);
+ adapter->stats.rlec += rd32(IGC_RLEC);
+ adapter->stats.xonrxc += rd32(IGC_XONRXC);
+ adapter->stats.xontxc += rd32(IGC_XONTXC);
+ adapter->stats.xoffrxc += rd32(IGC_XOFFRXC);
+ adapter->stats.xofftxc += rd32(IGC_XOFFTXC);
+ adapter->stats.fcruc += rd32(IGC_FCRUC);
+ adapter->stats.gptc += rd32(IGC_GPTC);
+ adapter->stats.gotc += rd32(IGC_GOTCL);
+ rd32(IGC_GOTCH); /* clear GOTCL */
+ adapter->stats.rnbc += rd32(IGC_RNBC);
+ adapter->stats.ruc += rd32(IGC_RUC);
+ adapter->stats.rfc += rd32(IGC_RFC);
+ adapter->stats.rjc += rd32(IGC_RJC);
+ adapter->stats.tor += rd32(IGC_TORH);
+ adapter->stats.tot += rd32(IGC_TOTH);
+ adapter->stats.tpr += rd32(IGC_TPR);
+
+ adapter->stats.ptc64 += rd32(IGC_PTC64);
+ adapter->stats.ptc127 += rd32(IGC_PTC127);
+ adapter->stats.ptc255 += rd32(IGC_PTC255);
+ adapter->stats.ptc511 += rd32(IGC_PTC511);
+ adapter->stats.ptc1023 += rd32(IGC_PTC1023);
+ adapter->stats.ptc1522 += rd32(IGC_PTC1522);
+
+ adapter->stats.mptc += rd32(IGC_MPTC);
+ adapter->stats.bptc += rd32(IGC_BPTC);
+
+ adapter->stats.tpt += rd32(IGC_TPT);
+ adapter->stats.colc += rd32(IGC_COLC);
+
+ adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
+
+ adapter->stats.tsctc += rd32(IGC_TSCTC);
+ adapter->stats.tsctfc += rd32(IGC_TSCTFC);
+
+ adapter->stats.iac += rd32(IGC_IAC);
+ adapter->stats.icrxoc += rd32(IGC_ICRXOC);
+ adapter->stats.icrxptc += rd32(IGC_ICRXPTC);
+ adapter->stats.icrxatc += rd32(IGC_ICRXATC);
+ adapter->stats.ictxptc += rd32(IGC_ICTXPTC);
+ adapter->stats.ictxatc += rd32(IGC_ICTXATC);
+ adapter->stats.ictxqec += rd32(IGC_ICTXQEC);
+ adapter->stats.ictxqmtc += rd32(IGC_ICTXQMTC);
+ adapter->stats.icrxdmtc += rd32(IGC_ICRXDMTC);
+
+ /* Fill out the OS statistics structure */
+ net_stats->multicast = adapter->stats.mprc;
+ net_stats->collisions = adapter->stats.colc;
+
+ /* Rx Errors */
+
+ /* RLEC on some newer hardware can be incorrect so build
+ * our own version based on RUC and ROC
+ */
+ net_stats->rx_errors = adapter->stats.rxerrc +
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc +
+ adapter->stats.cexterr;
+ net_stats->rx_length_errors = adapter->stats.ruc +
+ adapter->stats.roc;
+ net_stats->rx_crc_errors = adapter->stats.crcerrs;
+ net_stats->rx_frame_errors = adapter->stats.algnerrc;
+ net_stats->rx_missed_errors = adapter->stats.mpc;
+
+ /* Tx Errors */
+ net_stats->tx_errors = adapter->stats.ecol +
+ adapter->stats.latecol;
+ net_stats->tx_aborted_errors = adapter->stats.ecol;
+ net_stats->tx_window_errors = adapter->stats.latecol;
+ net_stats->tx_carrier_errors = adapter->stats.tncrs;
+
+ /* Tx Dropped needs to be maintained elsewhere */
+
+ /* Management Stats */
+ adapter->stats.mgptc += rd32(IGC_MGTPTC);
+ adapter->stats.mgprc += rd32(IGC_MGTPRC);
+ adapter->stats.mgpdc += rd32(IGC_MGTPDC);
}
static void igc_nfc_filter_exit(struct igc_adapter *adapter)
{
+ struct igc_nfc_filter *rule;
+
+ spin_lock(&adapter->nfc_lock);
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
+ igc_erase_filter(adapter, rule);
+
+ hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
+ igc_erase_filter(adapter, rule);
+
+ spin_unlock(&adapter->nfc_lock);
+}
+
+static void igc_nfc_filter_restore(struct igc_adapter *adapter)
+{
+ struct igc_nfc_filter *rule;
+
+ spin_lock(&adapter->nfc_lock);
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
+ igc_add_filter(adapter, rule);
+
+ spin_unlock(&adapter->nfc_lock);
}
/**
@@ -1885,6 +2122,86 @@ static struct net_device_stats *igc_get_stats(struct net_device *netdev)
return &netdev->stats;
}
+static netdev_features_t igc_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ /* Since there is no support for separate Rx/Tx vlan accel
+ * enable/disable make sure Tx flag is always in same state as Rx.
+ */
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+
+ return features;
+}
+
+static int igc_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = netdev->features ^ features;
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ /* Add VLAN support */
+ if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
+ return 0;
+
+ if (!(features & NETIF_F_NTUPLE)) {
+ struct hlist_node *node2;
+ struct igc_nfc_filter *rule;
+
+ spin_lock(&adapter->nfc_lock);
+ hlist_for_each_entry_safe(rule, node2,
+ &adapter->nfc_filter_list, nfc_node) {
+ igc_erase_filter(adapter, rule);
+ hlist_del(&rule->nfc_node);
+ kfree(rule);
+ }
+ spin_unlock(&adapter->nfc_lock);
+ adapter->nfc_filter_count = 0;
+ }
+
+ netdev->features = features;
+
+ if (netif_running(netdev))
+ igc_reinit_locked(adapter);
+ else
+ igc_reset(adapter);
+
+ return 1;
+}
+
+static netdev_features_t
+igc_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ unsigned int network_hdr_len, mac_hdr_len;
+
+ /* Make certain the headers can be described by a context descriptor */
+ mac_hdr_len = skb_network_header(skb) - skb->data;
+ if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+ if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ /* We can only support IPv4 TSO in tunnels if we can mangle the
+ * inner IP ID field, so strip TSO if MANGLEID is not supported.
+ */
+ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
+
+ return features;
+}
+
/**
* igc_configure - configure the hardware for RX and TX
* @adapter: private board structure
@@ -1901,6 +2218,7 @@ static void igc_configure(struct igc_adapter *adapter)
igc_setup_mrqc(adapter);
igc_setup_rctl(adapter);
+ igc_nfc_filter_restore(adapter);
igc_configure_tx(adapter);
igc_configure_rx(adapter);
@@ -1962,6 +2280,127 @@ static void igc_set_default_mac_filter(struct igc_adapter *adapter)
igc_rar_set_index(adapter, 0);
}
+/* If the filter to be added and an already existing filter express
+ * the same address and address type, it should be possible to only
+ * override the other configurations, for example the queue to steer
+ * traffic.
+ */
+static bool igc_mac_entry_can_be_used(const struct igc_mac_addr *entry,
+ const u8 *addr, const u8 flags)
+{
+ if (!(entry->state & IGC_MAC_STATE_IN_USE))
+ return true;
+
+ if ((entry->state & IGC_MAC_STATE_SRC_ADDR) !=
+ (flags & IGC_MAC_STATE_SRC_ADDR))
+ return false;
+
+ if (!ether_addr_equal(addr, entry->addr))
+ return false;
+
+ return true;
+}
+
+/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
+ * 'flags' is used to indicate what kind of match is made, match is by
+ * default for the destination address, if matching by source address
+ * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used.
+ */
+static int igc_add_mac_filter_flags(struct igc_adapter *adapter,
+ const u8 *addr, const u8 queue,
+ const u8 flags)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int rar_entries = hw->mac.rar_entry_count;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Search for the first empty entry in the MAC table.
+ * Do not touch entries at the end of the table reserved for the VF MAC
+ * addresses.
+ */
+ for (i = 0; i < rar_entries; i++) {
+ if (!igc_mac_entry_can_be_used(&adapter->mac_table[i],
+ addr, flags))
+ continue;
+
+ ether_addr_copy(adapter->mac_table[i].addr, addr);
+ adapter->mac_table[i].queue = queue;
+ adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE | flags;
+
+ igc_rar_set_index(adapter, i);
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+int igc_add_mac_steering_filter(struct igc_adapter *adapter,
+ const u8 *addr, u8 queue, u8 flags)
+{
+ return igc_add_mac_filter_flags(adapter, addr, queue,
+ IGC_MAC_STATE_QUEUE_STEERING | flags);
+}
+
+/* Remove a MAC filter for 'addr' directing matching traffic to
+ * 'queue', 'flags' is used to indicate what kind of match need to be
+ * removed, match is by default for the destination address, if
+ * matching by source address is to be removed the flag
+ * IGC_MAC_STATE_SRC_ADDR can be used.
+ */
+static int igc_del_mac_filter_flags(struct igc_adapter *adapter,
+ const u8 *addr, const u8 queue,
+ const u8 flags)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int rar_entries = hw->mac.rar_entry_count;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Search for matching entry in the MAC table based on given address
+ * and queue. Do not touch entries at the end of the table reserved
+ * for the VF MAC addresses.
+ */
+ for (i = 0; i < rar_entries; i++) {
+ if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE))
+ continue;
+ if ((adapter->mac_table[i].state & flags) != flags)
+ continue;
+ if (adapter->mac_table[i].queue != queue)
+ continue;
+ if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
+ continue;
+
+ /* When a filter for the default address is "deleted",
+ * we return it to its initial configuration
+ */
+ if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) {
+ adapter->mac_table[i].state =
+ IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
+ } else {
+ adapter->mac_table[i].state = 0;
+ adapter->mac_table[i].queue = 0;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ }
+
+ igc_rar_set_index(adapter, i);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+int igc_del_mac_steering_filter(struct igc_adapter *adapter,
+ const u8 *addr, u8 queue, u8 flags)
+{
+ return igc_del_mac_filter_flags(adapter, addr, queue,
+ IGC_MAC_STATE_QUEUE_STEERING | flags);
+}
+
/**
* igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
@@ -3429,6 +3868,9 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_mac_address = igc_set_mac,
.ndo_change_mtu = igc_change_mtu,
.ndo_get_stats = igc_get_stats,
+ .ndo_fix_features = igc_fix_features,
+ .ndo_set_features = igc_set_features,
+ .ndo_features_check = igc_features_check,
};
/* PCIe configuration access */
@@ -3658,6 +4100,9 @@ static int igc_probe(struct pci_dev *pdev,
if (err)
goto err_sw_init;
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= NETIF_F_NTUPLE;
+
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index 5afe7a8d3faf..50d7c04dccf5 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -80,8 +80,23 @@
/* MSI-X Table Register Descriptions */
#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */
+/* RSS registers */
+#define IGC_MRQC 0x05818 /* Multiple Receive Control - RW */
+
+/* Filtering Registers */
+#define IGC_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
+
+/* ETQF register bit definitions */
+#define IGC_ETQF_FILTER_ENABLE BIT(26)
+#define IGC_ETQF_QUEUE_ENABLE BIT(31)
+#define IGC_ETQF_QUEUE_SHIFT 16
+#define IGC_ETQF_QUEUE_MASK 0x00070000
+#define IGC_ETQF_ETYPE_MASK 0x0000FFFF
+
/* Redirection Table - RW Array */
#define IGC_RETA(_i) (0x05C00 + ((_i) * 4))
+/* RSS Random Key - RW Array */
+#define IGC_RSSRK(_i) (0x05C80 + ((_i) * 4))
/* Receive Register Descriptions */
#define IGC_RCTL 0x00100 /* Rx Control - RW */
@@ -101,6 +116,7 @@
#define IGC_UTA 0x0A000 /* Unicast Table Array - RW */
#define IGC_RAL(_n) (0x05400 + ((_n) * 0x08))
#define IGC_RAH(_n) (0x05404 + ((_n) * 0x08))
+#define IGC_VLAPQF 0x055B0 /* VLAN Priority Queue Filter VLAPQF */
/* Transmit Register Descriptions */
#define IGC_TCTL 0x00400 /* Tx Control - RW */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 99e23cf6a73a..57fd9ee6de66 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1800,7 +1800,7 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
* we need the header to contain the greater of either ETH_HLEN or
* 60 bytes if the skb->len is less than 60 for skb_pad.
*/
- pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE);
+ pull_len = eth_get_headlen(skb->dev, va, IXGBE_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
@@ -8297,7 +8297,7 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
}
@@ -8478,8 +8478,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
#ifdef IXGBE_FCOE
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct ixgbe_adapter *adapter;
struct ixgbe_ring_feature *f;
@@ -8509,7 +8508,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
break;
/* fall through */
default:
- return fallback(dev, skb, sb_dev);
+ return netdev_pick_tx(dev, skb, sb_dev);
}
f = &adapter->ring_feature[RING_F_FCOE];
@@ -9791,7 +9790,7 @@ static int ixgbe_set_features(struct net_device *netdev,
NETIF_F_HW_VLAN_CTAG_FILTER))
ixgbe_set_rx_mode(netdev);
- return 0;
+ return 1;
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 49e23afa05a2..d189ed247665 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -895,7 +895,8 @@ struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
/* Determine available headroom for copy */
headlen = size;
if (headlen > IXGBEVF_RX_HDR_SIZE)
- headlen = eth_get_headlen(xdp->data, IXGBEVF_RX_HDR_SIZE);
+ headlen = eth_get_headlen(skb->dev, xdp->data,
+ IXGBEVF_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
memcpy(__skb_put(skb, headlen), xdp->data,
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index d29104de0d53..cda641ef89af 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -478,7 +478,7 @@ static int xrx200_probe(struct platform_device *pdev)
}
mac = of_get_mac_address(np);
- if (mac && is_valid_ether_addr(mac))
+ if (!IS_ERR(mac))
ether_addr_copy(net_dev->dev_addr, mac);
else
eth_hw_addr_random(net_dev);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 292a668ce88e..07e254fc96ef 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2749,7 +2749,7 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
}
mac_addr = of_get_mac_address(pnp);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
memcpy(ppd.mac_addr, mac_addr, ETH_ALEN);
mv643xx_eth_property(pnp, "tx-queue-size", ppd.tx_queue_size);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index c0a3718b2e2a..8186135883ed 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2467,7 +2467,7 @@ out:
if (txq->count >= txq->tx_stop_threshold)
netif_tx_stop_queue(nq);
- if (!skb->xmit_more || netif_xmit_stopped(nq) ||
+ if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
mvneta_txq_pend_desc_add(pp, txq, frags);
else
@@ -3385,6 +3385,7 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
phylink_set(mask, 1000baseX_Full);
}
if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) {
+ phylink_set(mask, 2500baseT_Full);
phylink_set(mask, 2500baseX_Full);
}
@@ -4475,15 +4476,14 @@ static int mvneta_probe(struct platform_device *pdev)
int err;
int cpu;
- dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
+ dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port),
+ txq_number, rxq_number);
if (!dev)
return -ENOMEM;
dev->irq = irq_of_parse_and_map(dn, 0);
- if (dev->irq == 0) {
- err = -EINVAL;
- goto err_free_netdev;
- }
+ if (dev->irq == 0)
+ return -EINVAL;
phy_mode = of_get_phy_mode(dn);
if (phy_mode < 0) {
@@ -4563,7 +4563,7 @@ static int mvneta_probe(struct platform_device *pdev)
}
dt_mac_addr = of_get_mac_address(dn);
- if (dt_mac_addr) {
+ if (!IS_ERR(dt_mac_addr)) {
mac_from = "device tree";
memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
} else {
@@ -4704,8 +4704,6 @@ err_free_phylink:
phylink_destroy(pp->phylink);
err_free_irq:
irq_dispose_mapping(dev->irq);
-err_free_netdev:
- free_netdev(dev);
return err;
}
@@ -4722,7 +4720,6 @@ static int mvneta_remove(struct platform_device *pdev)
free_percpu(pp->stats);
irq_dispose_mapping(dev->irq);
phylink_destroy(pp->phylink);
- free_netdev(dev);
if (pp->bm_priv) {
mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index ff0f4c503f53..6171270a016c 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -14,6 +14,7 @@
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/phylink.h>
+#include <net/flow_offload.h>
/* Fifo Registers */
#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port))
@@ -101,6 +102,7 @@
#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
#define MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK 0x7
#define MVPP2_CLS_FLOW_TBL1_N_FIELDS(x) (x)
+#define MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu) (((lu) & 0x3f) << 3)
#define MVPP2_CLS_FLOW_TBL1_PRIO_MASK 0x3f
#define MVPP2_CLS_FLOW_TBL1_PRIO(x) ((x) << 9)
#define MVPP2_CLS_FLOW_TBL1_SEQ_MASK 0x7
@@ -123,13 +125,18 @@
#define MVPP22_CLS_C2_TCAM_DATA2 0x1b18
#define MVPP22_CLS_C2_TCAM_DATA3 0x1b1c
#define MVPP22_CLS_C2_TCAM_DATA4 0x1b20
+#define MVPP22_CLS_C2_LU_TYPE(lu) ((lu) & 0x3f)
#define MVPP22_CLS_C2_PORT_ID(port) ((port) << 8)
+#define MVPP22_CLS_C2_PORT_MASK (0xff << 8)
+#define MVPP22_CLS_C2_TCAM_INV 0x1b24
+#define MVPP22_CLS_C2_TCAM_INV_BIT BIT(31)
#define MVPP22_CLS_C2_HIT_CTR 0x1b50
#define MVPP22_CLS_C2_ACT 0x1b60
#define MVPP22_CLS_C2_ACT_RSS_EN(act) (((act) & 0x3) << 19)
#define MVPP22_CLS_C2_ACT_FWD(act) (((act) & 0x7) << 13)
#define MVPP22_CLS_C2_ACT_QHIGH(act) (((act) & 0x3) << 11)
#define MVPP22_CLS_C2_ACT_QLOW(act) (((act) & 0x3) << 9)
+#define MVPP22_CLS_C2_ACT_COLOR(act) ((act) & 0x7)
#define MVPP22_CLS_C2_ATTR0 0x1b64
#define MVPP22_CLS_C2_ATTR0_QHIGH(qh) (((qh) & 0x1f) << 24)
#define MVPP22_CLS_C2_ATTR0_QHIGH_MASK 0x1f
@@ -610,6 +617,12 @@
#define MVPP2_BIT_TO_WORD(bit) ((bit) / 32)
#define MVPP2_BIT_IN_WORD(bit) ((bit) % 32)
+#define MVPP2_N_PRS_FLOWS 52
+#define MVPP2_N_RFS_ENTRIES_PER_FLOW 4
+
+/* There are 7 supported high-level flows */
+#define MVPP2_N_RFS_RULES (MVPP2_N_RFS_ENTRIES_PER_FLOW * 7)
+
/* RSS constants */
#define MVPP22_RSS_TABLE_ENTRIES 32
@@ -710,6 +723,7 @@ enum mvpp2_prs_l3_cast {
#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
/* Definitions */
+struct mvpp2_dbgfs_entries;
/* Shared Packet Processor resources */
struct mvpp2 {
@@ -771,6 +785,9 @@ struct mvpp2 {
/* Debugfs root entry */
struct dentry *dbgfs_dir;
+
+ /* Debugfs entries private data */
+ struct mvpp2_dbgfs_entries *dbgfs_entries;
};
struct mvpp2_pcpu_stats {
@@ -802,6 +819,37 @@ struct mvpp2_queue_vector {
struct cpumask *mask;
};
+/* Internal represention of a Flow Steering rule */
+struct mvpp2_rfs_rule {
+ /* Rule location inside the flow*/
+ int loc;
+
+ /* Flow type, such as TCP_V4_FLOW, IP6_FLOW, etc. */
+ int flow_type;
+
+ /* Index of the C2 TCAM entry handling this rule */
+ int c2_index;
+
+ /* Header fields that needs to be extracted to match this flow */
+ u16 hek_fields;
+
+ /* CLS engine : only c2 is supported for now. */
+ u8 engine;
+
+ /* TCAM key and mask for C2-based steering. These fields should be
+ * encapsulated in a union should we add more engines.
+ */
+ u64 c2_tcam;
+ u64 c2_tcam_mask;
+
+ struct flow_rule *flow;
+};
+
+struct mvpp2_ethtool_fs {
+ struct mvpp2_rfs_rule rule;
+ struct ethtool_rxnfc rxnfc;
+};
+
struct mvpp2_port {
u8 id;
@@ -873,6 +921,10 @@ struct mvpp2_port {
/* RSS indirection table */
u32 indir[MVPP22_RSS_TABLE_ENTRIES];
+
+ /* List of steering rules active on that port */
+ struct mvpp2_ethtool_fs *rfs_rules[MVPP2_N_RFS_RULES];
+ int n_rfs_rules;
};
/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index efdb7a656835..d046f7a1dcf5 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -22,302 +22,302 @@
} \
}
-static struct mvpp2_cls_flow cls_flows[MVPP2_N_FLOWS] = {
+static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
/* TCP over IPv4 flows, Not fragmented, no vlan tag */
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* TCP over IPv4 flows, Not fragmented, with vlan tag */
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
/* TCP over IPv4 flows, fragmented, no vlan tag */
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* TCP over IPv4 flows, fragmented, with vlan tag */
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
/* UDP over IPv4 flows, Not fragmented, no vlan tag */
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* UDP over IPv4 flows, Not fragmented, with vlan tag */
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
/* UDP over IPv4 flows, fragmented, no vlan tag */
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* UDP over IPv4 flows, fragmented, with vlan tag */
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
/* TCP over IPv6 flows, not fragmented, no vlan tag */
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG,
MVPP22_CLS_HEK_IP6_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG,
MVPP22_CLS_HEK_IP6_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* TCP over IPv6 flows, not fragmented, with vlan tag */
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG,
MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG,
MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
/* TCP over IPv6 flows, fragmented, no vlan tag */
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* TCP over IPv6 flows, fragmented, with vlan tag */
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG,
MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG,
MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_TCP,
MVPP2_PRS_IP_MASK),
/* UDP over IPv6 flows, not fragmented, no vlan tag */
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG,
MVPP22_CLS_HEK_IP6_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG,
MVPP22_CLS_HEK_IP6_5T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* UDP over IPv6 flows, not fragmented, with vlan tag */
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG,
MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG,
MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
/* UDP over IPv6 flows, fragmented, no vlan tag */
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
/* UDP over IPv6 flows, fragmented, with vlan tag */
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG,
MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG,
MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
MVPP2_PRS_RI_L4_UDP,
MVPP2_PRS_IP_MASK),
/* IPv4 flows, no vlan tag */
- MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4,
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT,
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
MVPP22_CLS_HEK_IP4_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER,
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
/* IPv4 flows, with vlan tag */
- MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4,
MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OPT,
MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP4_OTHER,
MVPP2_PRS_RI_L3_PROTO_MASK),
/* IPv6 flows, no vlan tag */
- MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG,
MVPP22_CLS_HEK_IP6_2T,
MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
/* IPv6 flows, with vlan tag */
- MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG,
MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_L3_PROTO_MASK),
- MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG,
MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_L3_PROTO_MASK),
/* Non IP flow, no vlan tag */
- MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_UNTAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_UNTAG,
0,
MVPP2_PRS_RI_VLAN_NONE,
MVPP2_PRS_RI_VLAN_MASK),
/* Non IP flow, with vlan tag */
- MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_TAG,
+ MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG,
MVPP22_CLS_HEK_OPT_VLAN,
0, 0),
};
@@ -344,9 +344,9 @@ static void mvpp2_cls_flow_write(struct mvpp2 *priv,
struct mvpp2_cls_flow_entry *fe)
{
mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
- mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
- mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
- mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
+ mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
+ mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
+ mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
}
u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index)
@@ -429,12 +429,6 @@ static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe,
fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
}
-static void mvpp2_cls_flow_seq_set(struct mvpp2_cls_flow_entry *fe, u32 seq)
-{
- fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_SEQ(MVPP2_CLS_FLOW_TBL1_SEQ_MASK);
- fe->data[1] |= MVPP2_CLS_FLOW_TBL1_SEQ(seq);
-}
-
static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe,
bool is_last)
{
@@ -454,9 +448,22 @@ static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe,
fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
}
+static void mvpp2_cls_flow_port_remove(struct mvpp2_cls_flow_entry *fe,
+ u32 port)
+{
+ fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
+}
+
+static void mvpp2_cls_flow_lu_type_set(struct mvpp2_cls_flow_entry *fe,
+ u8 lu_type)
+{
+ fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK);
+ fe->data[1] |= MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu_type);
+}
+
/* Initialize the parser entry for the given flow */
static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
- struct mvpp2_cls_flow *flow)
+ const struct mvpp2_cls_flow *flow)
{
mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri,
flow->prs_ri.ri_mask);
@@ -464,7 +471,7 @@ static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
/* Initialize the Lookup Id table entry for the given flow */
static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
- struct mvpp2_cls_flow *flow)
+ const struct mvpp2_cls_flow *flow)
{
struct mvpp2_cls_lookup_entry le;
@@ -477,7 +484,7 @@ static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
/* We point on the first lookup in the sequence for the flow, that is
* the C2 lookup.
*/
- le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_FLOW_C2_ENTRY(flow->flow_id));
+ le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_CLS_FLT_FIRST(flow->flow_id));
/* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
@@ -485,21 +492,111 @@ static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
mvpp2_cls_lookup_write(priv, &le);
}
+static void mvpp2_cls_c2_write(struct mvpp2 *priv,
+ struct mvpp2_cls_c2_entry *c2)
+{
+ u32 val;
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
+
+ val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
+ if (c2->valid)
+ val &= ~MVPP22_CLS_C2_TCAM_INV_BIT;
+ else
+ val |= MVPP22_CLS_C2_TCAM_INV_BIT;
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_INV, val);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
+ /* Writing TCAM_DATA4 flushes writes to TCAM_DATA0-4 and INV to HW */
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
+}
+
+void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_c2_entry *c2)
+{
+ u32 val;
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
+
+ c2->index = index;
+
+ c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
+ c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
+ c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
+ c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
+ c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
+
+ c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
+
+ c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
+ c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
+ c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
+ c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
+
+ val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
+ c2->valid = !(val & MVPP22_CLS_C2_TCAM_INV_BIT);
+}
+
+static int mvpp2_cls_ethtool_flow_to_type(int flow_type)
+{
+ switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
+ case TCP_V4_FLOW:
+ return MVPP22_FLOW_TCP4;
+ case TCP_V6_FLOW:
+ return MVPP22_FLOW_TCP6;
+ case UDP_V4_FLOW:
+ return MVPP22_FLOW_UDP4;
+ case UDP_V6_FLOW:
+ return MVPP22_FLOW_UDP6;
+ case IPV4_FLOW:
+ return MVPP22_FLOW_IP4;
+ case IPV6_FLOW:
+ return MVPP22_FLOW_IP6;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mvpp2_cls_c2_port_flow_index(struct mvpp2_port *port, int loc)
+{
+ return MVPP22_CLS_C2_RFS_LOC(port->id, loc);
+}
+
/* Initialize the flow table entries for the given flow */
-static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow)
+static void mvpp2_cls_flow_init(struct mvpp2 *priv,
+ const struct mvpp2_cls_flow *flow)
{
struct mvpp2_cls_flow_entry fe;
- int i;
+ int i, pri = 0;
+
+ /* Assign default values to all entries in the flow */
+ for (i = MVPP2_CLS_FLT_FIRST(flow->flow_id);
+ i <= MVPP2_CLS_FLT_LAST(flow->flow_id); i++) {
+ memset(&fe, 0, sizeof(fe));
+ fe.index = i;
+ mvpp2_cls_flow_pri_set(&fe, pri++);
+
+ if (i == MVPP2_CLS_FLT_LAST(flow->flow_id))
+ mvpp2_cls_flow_last_set(&fe, 1);
+
+ mvpp2_cls_flow_write(priv, &fe);
+ }
- /* C2 lookup */
- memset(&fe, 0, sizeof(fe));
- fe.index = MVPP2_FLOW_C2_ENTRY(flow->flow_id);
+ /* RSS config C2 lookup */
+ mvpp2_cls_flow_read(priv, MVPP2_CLS_FLT_C2_RSS_ENTRY(flow->flow_id),
+ &fe);
mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2);
mvpp2_cls_flow_port_id_sel(&fe, true);
- mvpp2_cls_flow_last_set(&fe, 0);
- mvpp2_cls_flow_pri_set(&fe, 0);
- mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_FIRST1);
+ mvpp2_cls_flow_lu_type_set(&fe, MVPP22_FLOW_ETHERNET);
/* Add all ports */
for (i = 0; i < MVPP2_MAX_PORTS; i++)
@@ -509,22 +606,19 @@ static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow)
/* C3Hx lookups */
for (i = 0; i < MVPP2_MAX_PORTS; i++) {
- memset(&fe, 0, sizeof(fe));
- fe.index = MVPP2_PORT_FLOW_HASH_ENTRY(i, flow->flow_id);
+ mvpp2_cls_flow_read(priv,
+ MVPP2_CLS_FLT_HASH_ENTRY(i, flow->flow_id),
+ &fe);
+ /* Set a default engine. Will be overwritten when setting the
+ * real HEK parameters
+ */
+ mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C3HA);
mvpp2_cls_flow_port_id_sel(&fe, true);
- mvpp2_cls_flow_pri_set(&fe, i + 1);
- mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_MIDDLE);
mvpp2_cls_flow_port_add(&fe, BIT(i));
mvpp2_cls_flow_write(priv, &fe);
}
-
- /* Update the last entry */
- mvpp2_cls_flow_last_set(&fe, 1);
- mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_LAST);
-
- mvpp2_cls_flow_write(priv, &fe);
}
/* Adds a field to the Header Extracted Key generation parameters*/
@@ -555,6 +649,9 @@ static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
switch (BIT(i)) {
+ case MVPP22_CLS_HEK_OPT_MAC_DA:
+ field_id = MVPP22_CLS_FIELD_MAC_DA;
+ break;
case MVPP22_CLS_HEK_OPT_VLAN:
field_id = MVPP22_CLS_FIELD_VLAN;
break;
@@ -586,9 +683,29 @@ static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
return 0;
}
-struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
+/* Returns the size, in bits, of the corresponding HEK field */
+static int mvpp2_cls_hek_field_size(u32 field)
{
- if (flow >= MVPP2_N_FLOWS)
+ switch (field) {
+ case MVPP22_CLS_HEK_OPT_MAC_DA:
+ return 48;
+ case MVPP22_CLS_HEK_OPT_IP4SA:
+ case MVPP22_CLS_HEK_OPT_IP4DA:
+ return 32;
+ case MVPP22_CLS_HEK_OPT_IP6SA:
+ case MVPP22_CLS_HEK_OPT_IP6DA:
+ return 128;
+ case MVPP22_CLS_HEK_OPT_L4SIP:
+ case MVPP22_CLS_HEK_OPT_L4DIP:
+ return 16;
+ default:
+ return -1;
+ }
+}
+
+const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
+{
+ if (flow >= MVPP2_N_PRS_FLOWS)
return NULL;
return &cls_flows[flow];
@@ -608,21 +725,17 @@ struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type,
u16 requested_opts)
{
+ const struct mvpp2_cls_flow *flow;
struct mvpp2_cls_flow_entry fe;
- struct mvpp2_cls_flow *flow;
int i, engine, flow_index;
u16 hash_opts;
- for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ for_each_cls_flow_id_with_type(i, flow_type) {
flow = mvpp2_cls_flow_get(i);
if (!flow)
return -EINVAL;
- if (flow->flow_type != flow_type)
- continue;
-
- flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
- flow->flow_id);
+ flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
mvpp2_cls_flow_read(port->priv, flow_index, &fe);
@@ -697,21 +810,17 @@ u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe)
*/
static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
{
+ const struct mvpp2_cls_flow *flow;
struct mvpp2_cls_flow_entry fe;
- struct mvpp2_cls_flow *flow;
int i, flow_index;
u16 hash_opts = 0;
- for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ for_each_cls_flow_id_with_type(i, flow_type) {
flow = mvpp2_cls_flow_get(i);
if (!flow)
return 0;
- if (flow->flow_type != flow_type)
- continue;
-
- flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
- flow->flow_id);
+ flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
mvpp2_cls_flow_read(port->priv, flow_index, &fe);
@@ -723,10 +832,10 @@ static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
{
- struct mvpp2_cls_flow *flow;
+ const struct mvpp2_cls_flow *flow;
int i;
- for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) {
flow = mvpp2_cls_flow_get(i);
if (!flow)
break;
@@ -737,47 +846,6 @@ static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
}
}
-static void mvpp2_cls_c2_write(struct mvpp2 *priv,
- struct mvpp2_cls_c2_entry *c2)
-{
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
-
- /* Write TCAM */
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
-
- mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
-
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
-}
-
-void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
- struct mvpp2_cls_c2_entry *c2)
-{
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
-
- c2->index = index;
-
- c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
- c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
- c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
- c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
- c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
-
- c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
-
- c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
- c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
- c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
- c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
-}
-
static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
{
struct mvpp2_cls_c2_entry c2;
@@ -791,6 +859,10 @@ static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
+ /* Match on Lookup Type */
+ c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
+ c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(MVPP22_FLOW_ETHERNET);
+
/* Update RSS status after matching this entry */
c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
@@ -809,6 +881,8 @@ static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
MVPP22_CLS_C2_ATTR0_QLOW(ql);
+ c2.valid = true;
+
mvpp2_cls_c2_write(port->priv, &c2);
}
@@ -817,6 +891,7 @@ void mvpp2_cls_init(struct mvpp2 *priv)
{
struct mvpp2_cls_lookup_entry le;
struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_c2_entry c2;
int index;
/* Enable classifier */
@@ -840,6 +915,14 @@ void mvpp2_cls_init(struct mvpp2 *priv)
mvpp2_cls_lookup_write(priv, &le);
}
+ /* Clear C2 TCAM engine table */
+ memset(&c2, 0, sizeof(c2));
+ c2.valid = false;
+ for (index = 0; index < MVPP22_CLS_C2_N_ENTRIES; index++) {
+ c2.index = index;
+ mvpp2_cls_c2_write(priv, &c2);
+ }
+
mvpp2_cls_port_init_flows(priv);
}
@@ -902,16 +985,28 @@ static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port)
mvpp2_cls_c2_write(port->priv, &c2);
}
-void mvpp22_rss_enable(struct mvpp2_port *port)
+void mvpp22_port_rss_enable(struct mvpp2_port *port)
{
mvpp2_rss_port_c2_enable(port);
}
-void mvpp22_rss_disable(struct mvpp2_port *port)
+void mvpp22_port_rss_disable(struct mvpp2_port *port)
{
mvpp2_rss_port_c2_disable(port);
}
+static void mvpp22_port_c2_lookup_disable(struct mvpp2_port *port, int entry)
+{
+ struct mvpp2_cls_c2_entry c2;
+
+ mvpp2_cls_c2_read(port->priv, entry, &c2);
+
+ /* Clear the port map so that the entry doesn't match anymore */
+ c2.tcam[4] &= ~(MVPP22_CLS_C2_PORT_ID(BIT(port->id)));
+
+ mvpp2_cls_c2_write(port->priv, &c2);
+}
+
/* Set CPU queue number for oversize packets */
void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
{
@@ -928,6 +1023,289 @@ void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
}
+static int mvpp2_port_c2_tcam_rule_add(struct mvpp2_port *port,
+ struct mvpp2_rfs_rule *rule)
+{
+ struct flow_action_entry *act;
+ struct mvpp2_cls_c2_entry c2;
+ u8 qh, ql, pmap;
+ int index;
+
+ memset(&c2, 0, sizeof(c2));
+
+ index = mvpp2_cls_c2_port_flow_index(port, rule->loc);
+ if (index < 0)
+ return -EINVAL;
+ c2.index = index;
+
+ act = &rule->flow->action.entries[0];
+
+ rule->c2_index = c2.index;
+
+ c2.tcam[0] = (rule->c2_tcam & 0xffff) |
+ ((rule->c2_tcam_mask & 0xffff) << 16);
+ c2.tcam[1] = ((rule->c2_tcam >> 16) & 0xffff) |
+ (((rule->c2_tcam_mask >> 16) & 0xffff) << 16);
+ c2.tcam[2] = ((rule->c2_tcam >> 32) & 0xffff) |
+ (((rule->c2_tcam_mask >> 32) & 0xffff) << 16);
+ c2.tcam[3] = ((rule->c2_tcam >> 48) & 0xffff) |
+ (((rule->c2_tcam_mask >> 48) & 0xffff) << 16);
+
+ pmap = BIT(port->id);
+ c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
+ c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
+
+ /* Match on Lookup Type */
+ c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
+ c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(rule->loc);
+
+ if (act->id == FLOW_ACTION_DROP) {
+ c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_RED_LOCK);
+ } else {
+ /* We want to keep the default color derived from the Header
+ * Parser drop entries, for VLAN and MAC filtering. This will
+ * assign a default color of Green or Red, and we want matches
+ * with a non-drop action to keep that color.
+ */
+ c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_NO_UPD_LOCK);
+
+ /* Mark packet as "forwarded to software", needed for RSS */
+ c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
+
+ c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD_LOCK) |
+ MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD_LOCK);
+
+ qh = ((act->queue.index + port->first_rxq) >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
+ ql = (act->queue.index + port->first_rxq) & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
+
+ c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
+ MVPP22_CLS_C2_ATTR0_QLOW(ql);
+ }
+
+ c2.valid = true;
+
+ mvpp2_cls_c2_write(port->priv, &c2);
+
+ return 0;
+}
+
+static int mvpp2_port_c2_rfs_rule_insert(struct mvpp2_port *port,
+ struct mvpp2_rfs_rule *rule)
+{
+ return mvpp2_port_c2_tcam_rule_add(port, rule);
+}
+
+static int mvpp2_port_cls_rfs_rule_remove(struct mvpp2_port *port,
+ struct mvpp2_rfs_rule *rule)
+{
+ const struct mvpp2_cls_flow *flow;
+ struct mvpp2_cls_flow_entry fe;
+ int index, i;
+
+ for_each_cls_flow_id_containing_type(i, rule->flow_type) {
+ flow = mvpp2_cls_flow_get(i);
+ if (!flow)
+ return 0;
+
+ index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc);
+
+ mvpp2_cls_flow_read(port->priv, index, &fe);
+ mvpp2_cls_flow_port_remove(&fe, BIT(port->id));
+ mvpp2_cls_flow_write(port->priv, &fe);
+ }
+
+ if (rule->c2_index >= 0)
+ mvpp22_port_c2_lookup_disable(port, rule->c2_index);
+
+ return 0;
+}
+
+static int mvpp2_port_flt_rfs_rule_insert(struct mvpp2_port *port,
+ struct mvpp2_rfs_rule *rule)
+{
+ const struct mvpp2_cls_flow *flow;
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_cls_flow_entry fe;
+ int index, ret, i;
+
+ if (rule->engine != MVPP22_CLS_ENGINE_C2)
+ return -EOPNOTSUPP;
+
+ ret = mvpp2_port_c2_rfs_rule_insert(port, rule);
+ if (ret)
+ return ret;
+
+ for_each_cls_flow_id_containing_type(i, rule->flow_type) {
+ flow = mvpp2_cls_flow_get(i);
+ if (!flow)
+ return 0;
+
+ index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc);
+
+ mvpp2_cls_flow_read(priv, index, &fe);
+ mvpp2_cls_flow_eng_set(&fe, rule->engine);
+ mvpp2_cls_flow_port_id_sel(&fe, true);
+ mvpp2_flow_set_hek_fields(&fe, rule->hek_fields);
+ mvpp2_cls_flow_lu_type_set(&fe, rule->loc);
+ mvpp2_cls_flow_port_add(&fe, 0xf);
+
+ mvpp2_cls_flow_write(priv, &fe);
+ }
+
+ return 0;
+}
+
+static int mvpp2_cls_c2_build_match(struct mvpp2_rfs_rule *rule)
+{
+ struct flow_rule *flow = rule->flow;
+ int offs = 64;
+
+ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(flow, &match);
+ if (match.mask->src) {
+ rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4SIP;
+ offs -= mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4SIP);
+
+ rule->c2_tcam |= ((u64)ntohs(match.key->src)) << offs;
+ rule->c2_tcam_mask |= ((u64)ntohs(match.mask->src)) << offs;
+ }
+
+ if (match.mask->dst) {
+ rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4DIP;
+ offs -= mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4DIP);
+
+ rule->c2_tcam |= ((u64)ntohs(match.key->dst)) << offs;
+ rule->c2_tcam_mask |= ((u64)ntohs(match.mask->dst)) << offs;
+ }
+ }
+
+ if (hweight16(rule->hek_fields) > MVPP2_FLOW_N_FIELDS)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int mvpp2_cls_rfs_parse_rule(struct mvpp2_rfs_rule *rule)
+{
+ struct flow_rule *flow = rule->flow;
+ struct flow_action_entry *act;
+
+ act = &flow->action.entries[0];
+ if (act->id != FLOW_ACTION_QUEUE && act->id != FLOW_ACTION_DROP)
+ return -EOPNOTSUPP;
+
+ /* For now, only use the C2 engine which has a HEK size limited to 64
+ * bits for TCAM matching.
+ */
+ rule->engine = MVPP22_CLS_ENGINE_C2;
+
+ if (mvpp2_cls_c2_build_match(rule))
+ return -EINVAL;
+
+ return 0;
+}
+
+int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port,
+ struct ethtool_rxnfc *rxnfc)
+{
+ struct mvpp2_ethtool_fs *efs;
+
+ if (rxnfc->fs.location >= MVPP2_N_RFS_RULES)
+ return -EINVAL;
+
+ efs = port->rfs_rules[rxnfc->fs.location];
+ if (!efs)
+ return -ENOENT;
+
+ memcpy(rxnfc, &efs->rxnfc, sizeof(efs->rxnfc));
+
+ return 0;
+}
+
+int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
+ struct ethtool_rxnfc *info)
+{
+ struct ethtool_rx_flow_spec_input input = {};
+ struct ethtool_rx_flow_rule *ethtool_rule;
+ struct mvpp2_ethtool_fs *efs, *old_efs;
+ int ret = 0;
+
+ if (info->fs.location >= 4 ||
+ info->fs.location < 0)
+ return -EINVAL;
+
+ efs = kzalloc(sizeof(*efs), GFP_KERNEL);
+ if (!efs)
+ return -ENOMEM;
+
+ input.fs = &info->fs;
+
+ ethtool_rule = ethtool_rx_flow_rule_create(&input);
+ if (IS_ERR(ethtool_rule)) {
+ ret = PTR_ERR(ethtool_rule);
+ goto clean_rule;
+ }
+
+ efs->rule.flow = ethtool_rule->rule;
+ efs->rule.flow_type = mvpp2_cls_ethtool_flow_to_type(info->fs.flow_type);
+
+ ret = mvpp2_cls_rfs_parse_rule(&efs->rule);
+ if (ret)
+ goto clean_eth_rule;
+
+ efs->rule.loc = info->fs.location;
+
+ /* Replace an already existing rule */
+ if (port->rfs_rules[efs->rule.loc]) {
+ old_efs = port->rfs_rules[efs->rule.loc];
+ ret = mvpp2_port_cls_rfs_rule_remove(port, &old_efs->rule);
+ if (ret)
+ goto clean_eth_rule;
+ kfree(old_efs);
+ port->n_rfs_rules--;
+ }
+
+ ret = mvpp2_port_flt_rfs_rule_insert(port, &efs->rule);
+ if (ret)
+ goto clean_eth_rule;
+
+ memcpy(&efs->rxnfc, info, sizeof(*info));
+ port->rfs_rules[efs->rule.loc] = efs;
+ port->n_rfs_rules++;
+
+ return ret;
+
+clean_eth_rule:
+ ethtool_rx_flow_rule_destroy(ethtool_rule);
+clean_rule:
+ kfree(efs);
+ return ret;
+}
+
+int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
+ struct ethtool_rxnfc *info)
+{
+ struct mvpp2_ethtool_fs *efs;
+ int ret;
+
+ efs = port->rfs_rules[info->fs.location];
+ if (!efs)
+ return -EINVAL;
+
+ /* Remove the rule from the engines. */
+ ret = mvpp2_port_cls_rfs_rule_remove(port, &efs->rule);
+ if (ret)
+ return ret;
+
+ port->n_rfs_rules--;
+ port->rfs_rules[info->fs.location] = NULL;
+ kfree(efs);
+
+ return 0;
+}
+
static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
{
int nrxqs, cpu, cpus = num_possible_cpus();
@@ -965,19 +1343,22 @@ void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table)
int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
{
u16 hash_opts = 0;
+ u32 flow_type;
- switch (info->flow_type) {
- case TCP_V4_FLOW:
- case UDP_V4_FLOW:
- case TCP_V6_FLOW:
- case UDP_V6_FLOW:
+ flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type);
+
+ switch (flow_type) {
+ case MVPP22_FLOW_TCP4:
+ case MVPP22_FLOW_UDP4:
+ case MVPP22_FLOW_TCP6:
+ case MVPP22_FLOW_UDP6:
if (info->data & RXH_L4_B_0_1)
hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
if (info->data & RXH_L4_B_2_3)
hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
/* Fallthrough */
- case IPV4_FLOW:
- case IPV6_FLOW:
+ case MVPP22_FLOW_IP4:
+ case MVPP22_FLOW_IP6:
if (info->data & RXH_L2DA)
hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
if (info->data & RXH_VLAN)
@@ -994,15 +1375,18 @@ int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
default: return -EOPNOTSUPP;
}
- return mvpp2_port_rss_hash_opts_set(port, info->flow_type, hash_opts);
+ return mvpp2_port_rss_hash_opts_set(port, flow_type, hash_opts);
}
int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
{
unsigned long hash_opts;
+ u32 flow_type;
int i;
- hash_opts = mvpp2_port_rss_hash_opts_get(port, info->flow_type);
+ flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type);
+
+ hash_opts = mvpp2_port_rss_hash_opts_get(port, flow_type);
info->data = 0;
for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
@@ -1037,7 +1421,7 @@ int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
return 0;
}
-void mvpp22_rss_port_init(struct mvpp2_port *port)
+void mvpp22_port_rss_init(struct mvpp2_port *port)
{
struct mvpp2 *priv = port->priv;
int i;
@@ -1065,10 +1449,10 @@ void mvpp22_rss_port_init(struct mvpp2_port *port)
mvpp22_rss_fill_table(port, port->id);
/* Configure default flows */
- mvpp2_port_rss_hash_opts_set(port, IPV4_FLOW, MVPP22_CLS_HEK_IP4_2T);
- mvpp2_port_rss_hash_opts_set(port, IPV6_FLOW, MVPP22_CLS_HEK_IP6_2T);
- mvpp2_port_rss_hash_opts_set(port, TCP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
- mvpp2_port_rss_hash_opts_set(port, TCP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
- mvpp2_port_rss_hash_opts_set(port, UDP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
- mvpp2_port_rss_hash_opts_set(port, UDP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP4, MVPP22_CLS_HEK_IP4_2T);
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP6, MVPP22_CLS_HEK_IP6_2T);
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP4, MVPP22_CLS_HEK_IP4_5T);
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP6, MVPP22_CLS_HEK_IP6_5T);
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP4, MVPP22_CLS_HEK_IP4_5T);
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP6, MVPP22_CLS_HEK_IP6_5T);
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
index 089f05f29891..56b617375a65 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -71,14 +71,6 @@ enum mvpp2_cls_field_id {
MVPP22_CLS_FIELD_L4DIP = 0x1e,
};
-enum mvpp2_cls_flow_seq {
- MVPP2_CLS_FLOW_SEQ_NORMAL = 0,
- MVPP2_CLS_FLOW_SEQ_FIRST1,
- MVPP2_CLS_FLOW_SEQ_FIRST2,
- MVPP2_CLS_FLOW_SEQ_LAST,
- MVPP2_CLS_FLOW_SEQ_MIDDLE
-};
-
/* Classifier C2 engine constants */
#define MVPP22_CLS_C2_TCAM_EN(data) ((data) << 16)
@@ -100,39 +92,62 @@ enum mvpp22_cls_c2_fwd_action {
MVPP22_C2_FWD_HW_LOW_LAT_LOCK,
};
+enum mvpp22_cls_c2_color_action {
+ MVPP22_C2_COL_NO_UPD = 0,
+ MVPP22_C2_COL_NO_UPD_LOCK,
+ MVPP22_C2_COL_GREEN,
+ MVPP22_C2_COL_GREEN_LOCK,
+ MVPP22_C2_COL_YELLOW,
+ MVPP22_C2_COL_YELLOW_LOCK,
+ MVPP22_C2_COL_RED, /* Drop */
+ MVPP22_C2_COL_RED_LOCK, /* Drop */
+};
+
#define MVPP2_CLS_C2_TCAM_WORDS 5
#define MVPP2_CLS_C2_ATTR_WORDS 5
struct mvpp2_cls_c2_entry {
u32 index;
+ /* TCAM lookup key */
u32 tcam[MVPP2_CLS_C2_TCAM_WORDS];
+ /* Actions to perform upon TCAM match */
u32 act;
+ /* Attributes relative to the actions to perform */
u32 attr[MVPP2_CLS_C2_ATTR_WORDS];
+ /* Entry validity */
+ u8 valid;
};
+#define MVPP22_FLOW_ETHER_BIT BIT(0)
+#define MVPP22_FLOW_IP4_BIT BIT(1)
+#define MVPP22_FLOW_IP6_BIT BIT(2)
+#define MVPP22_FLOW_TCP_BIT BIT(3)
+#define MVPP22_FLOW_UDP_BIT BIT(4)
+
+#define MVPP22_FLOW_TCP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT | MVPP22_FLOW_TCP_BIT)
+#define MVPP22_FLOW_TCP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT | MVPP22_FLOW_TCP_BIT)
+#define MVPP22_FLOW_UDP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT | MVPP22_FLOW_UDP_BIT)
+#define MVPP22_FLOW_UDP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT | MVPP22_FLOW_UDP_BIT)
+#define MVPP22_FLOW_IP4 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP4_BIT)
+#define MVPP22_FLOW_IP6 (MVPP22_FLOW_ETHER_BIT | MVPP22_FLOW_IP6_BIT)
+#define MVPP22_FLOW_ETHERNET (MVPP22_FLOW_ETHER_BIT)
+
/* Classifier C2 engine entries */
-#define MVPP22_CLS_C2_RSS_ENTRY(port) (port)
-#define MVPP22_CLS_C2_N_ENTRIES MVPP2_MAX_PORTS
+#define MVPP22_CLS_C2_N_ENTRIES 256
-/* RSS flow entries in the flow table. We have 2 entries per port for RSS.
- *
- * The first performs a lookup using the C2 TCAM engine, to tag the
- * packet for software forwarding (needed for RSS), enable or disable RSS, and
- * assign the default rx queue.
- *
- * The second configures the hash generation, by specifying which fields of the
- * packet header are used to generate the hash, and specifies the relevant hash
- * engine to use.
+/* Number of per-port dedicated entries in the C2 TCAM */
+#define MVPP22_CLS_C2_PORT_N_FLOWS MVPP2_N_RFS_ENTRIES_PER_FLOW
+
+/* Each port has oen range per flow type + one entry controling the global RSS
+ * setting and the default rx queue
*/
-#define MVPP22_RSS_FLOW_C2_OFFS 0
-#define MVPP22_RSS_FLOW_HASH_OFFS 1
-#define MVPP22_RSS_FLOW_SIZE (MVPP22_RSS_FLOW_HASH_OFFS + 1)
+#define MVPP22_CLS_C2_PORT_RANGE (MVPP22_CLS_C2_PORT_N_FLOWS + 1)
+#define MVPP22_CLS_C2_PORT_FIRST(p) ((p) * MVPP22_CLS_C2_PORT_RANGE)
+#define MVPP22_CLS_C2_RSS_ENTRY(p) (MVPP22_CLS_C2_PORT_FIRST((p) + 1) - 1)
-#define MVPP22_RSS_FLOW_C2(port) ((port) * MVPP22_RSS_FLOW_SIZE + \
- MVPP22_RSS_FLOW_C2_OFFS)
-#define MVPP22_RSS_FLOW_HASH(port) ((port) * MVPP22_RSS_FLOW_SIZE + \
- MVPP22_RSS_FLOW_HASH_OFFS)
-#define MVPP22_RSS_FLOW_FIRST(port) MVPP22_RSS_FLOW_C2(port)
+#define MVPP22_CLS_C2_PORT_FLOW_FIRST(p) (MVPP22_CLS_C2_PORT_FIRST(p))
+
+#define MVPP22_CLS_C2_RFS_LOC(p, loc) (MVPP22_CLS_C2_PORT_FLOW_FIRST(p) + (loc))
/* Packet flow ID */
enum mvpp2_prs_flow {
@@ -162,6 +177,11 @@ enum mvpp2_prs_flow {
MVPP2_FL_LAST,
};
+/* LU Type defined for all engines, and specified in the flow table */
+#define MVPP2_CLS_LU_TYPE_MASK 0x3f
+
+#define MVPP2_N_FLOWS (MVPP2_FL_LAST - MVPP2_FL_START)
+
struct mvpp2_cls_flow {
/* The L2-L4 traffic flow type */
int flow_type;
@@ -176,12 +196,48 @@ struct mvpp2_cls_flow {
struct mvpp2_prs_result_info prs_ri;
};
-#define MVPP2_N_FLOWS 52
+#define MVPP2_CLS_FLT_ENTRIES_PER_FLOW (MVPP2_MAX_PORTS + 1 + 16)
+#define MVPP2_CLS_FLT_FIRST(id) (((id) - MVPP2_FL_START) * \
+ MVPP2_CLS_FLT_ENTRIES_PER_FLOW)
+
+#define MVPP2_CLS_FLT_C2_RFS(port, id, rfs_n) (MVPP2_CLS_FLT_FIRST(id) + \
+ ((port) * MVPP2_MAX_PORTS) + \
+ (rfs_n))
+
+#define MVPP2_CLS_FLT_C2_RSS_ENTRY(id) (MVPP2_CLS_FLT_C2_RFS(MVPP2_MAX_PORTS, id, 0))
+#define MVPP2_CLS_FLT_HASH_ENTRY(port, id) (MVPP2_CLS_FLT_C2_RSS_ENTRY(id) + 1 + (port))
+#define MVPP2_CLS_FLT_LAST(id) (MVPP2_CLS_FLT_FIRST(id) + \
+ MVPP2_CLS_FLT_ENTRIES_PER_FLOW - 1)
+
+/* Iterate on each classifier flow id. Sets 'i' to be the index of the first
+ * entry in the cls_flows table for each different flow_id.
+ * This relies on entries having the same flow_id in the cls_flows table being
+ * contiguous.
+ */
+#define for_each_cls_flow_id(i) \
+ for ((i) = 0; (i) < MVPP2_N_PRS_FLOWS; (i)++) \
+ if ((i) > 0 && \
+ cls_flows[(i)].flow_id == cls_flows[(i) - 1].flow_id) \
+ continue; \
+ else
+
+/* Iterate on each classifier flow that has a given flow_type. Sets 'i' to be
+ * the index of the first entry in the cls_flow table for each different flow_id
+ * that has the given flow_type. This allows to operate on all flows that
+ * matches a given ethtool flow type.
+ */
+#define for_each_cls_flow_id_with_type(i, type) \
+ for_each_cls_flow_id((i)) \
+ if (cls_flows[(i)].flow_type != (type)) \
+ continue; \
+ else
+
+#define for_each_cls_flow_id_containing_type(i, type) \
+ for_each_cls_flow_id((i)) \
+ if ((cls_flows[(i)].flow_type & (type)) != (type)) \
+ continue; \
+ else
-#define MVPP2_ENTRIES_PER_FLOW (MVPP2_MAX_PORTS + 1)
-#define MVPP2_FLOW_C2_ENTRY(id) ((id) * MVPP2_ENTRIES_PER_FLOW)
-#define MVPP2_PORT_FLOW_HASH_ENTRY(port, id) ((id) * MVPP2_ENTRIES_PER_FLOW + \
- (port) + 1)
struct mvpp2_cls_flow_entry {
u32 index;
u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
@@ -194,11 +250,10 @@ struct mvpp2_cls_lookup_entry {
};
void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table);
+void mvpp22_port_rss_init(struct mvpp2_port *port);
-void mvpp22_rss_port_init(struct mvpp2_port *port);
-
-void mvpp22_rss_enable(struct mvpp2_port *port);
-void mvpp22_rss_disable(struct mvpp2_port *port);
+void mvpp22_port_rss_enable(struct mvpp2_port *port);
+void mvpp22_port_rss_disable(struct mvpp2_port *port);
int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info);
int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info);
@@ -213,7 +268,7 @@ int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe);
u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe);
-struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow);
+const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow);
u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index);
@@ -230,4 +285,13 @@ u32 mvpp2_cls_c2_hit_count(struct mvpp2 *priv, int c2_index);
void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
struct mvpp2_cls_c2_entry *c2);
+int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port,
+ struct ethtool_rxnfc *rxnfc);
+
+int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
+ struct ethtool_rxnfc *info);
+
+int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
+ struct ethtool_rxnfc *info);
+
#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
index f9744a61e5dd..0ee39ea47b6b 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -18,22 +18,48 @@ struct mvpp2_dbgfs_prs_entry {
struct mvpp2 *priv;
};
+struct mvpp2_dbgfs_c2_entry {
+ int id;
+ struct mvpp2 *priv;
+};
+
struct mvpp2_dbgfs_flow_entry {
int flow;
struct mvpp2 *priv;
};
+struct mvpp2_dbgfs_flow_tbl_entry {
+ int id;
+ struct mvpp2 *priv;
+};
+
struct mvpp2_dbgfs_port_flow_entry {
struct mvpp2_port *port;
struct mvpp2_dbgfs_flow_entry *dbg_fe;
};
+struct mvpp2_dbgfs_entries {
+ /* Entries for Header Parser debug info */
+ struct mvpp2_dbgfs_prs_entry prs_entries[MVPP2_PRS_TCAM_SRAM_SIZE];
+
+ /* Entries for Classifier C2 engine debug info */
+ struct mvpp2_dbgfs_c2_entry c2_entries[MVPP22_CLS_C2_N_ENTRIES];
+
+ /* Entries for Classifier Flow Table debug info */
+ struct mvpp2_dbgfs_flow_tbl_entry flt_entries[MVPP2_CLS_FLOWS_TBL_SIZE];
+
+ /* Entries for Classifier flows debug info */
+ struct mvpp2_dbgfs_flow_entry flow_entries[MVPP2_N_PRS_FLOWS];
+
+ /* Entries for per-port flows debug info */
+ struct mvpp2_dbgfs_port_flow_entry port_flow_entries[MVPP2_MAX_PORTS];
+};
+
static int mvpp2_dbgfs_flow_flt_hits_show(struct seq_file *s, void *unused)
{
- struct mvpp2_dbgfs_flow_entry *entry = s->private;
- int id = MVPP2_FLOW_C2_ENTRY(entry->flow);
+ struct mvpp2_dbgfs_flow_tbl_entry *entry = s->private;
- u32 hits = mvpp2_cls_flow_hits(entry->priv, id);
+ u32 hits = mvpp2_cls_flow_hits(entry->priv, entry->id);
seq_printf(s, "%u\n", hits);
@@ -58,7 +84,7 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_dec_hits);
static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused)
{
struct mvpp2_dbgfs_flow_entry *entry = s->private;
- struct mvpp2_cls_flow *f;
+ const struct mvpp2_cls_flow *f;
const char *flow_name;
f = mvpp2_cls_flow_get(entry->flow);
@@ -93,30 +119,12 @@ static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused)
return 0;
}
-static int mvpp2_dbgfs_flow_type_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mvpp2_dbgfs_flow_type_show, inode->i_private);
-}
-
-static int mvpp2_dbgfs_flow_type_release(struct inode *inode, struct file *file)
-{
- struct seq_file *seq = file->private_data;
- struct mvpp2_dbgfs_flow_entry *flow_entry = seq->private;
-
- kfree(flow_entry);
- return single_release(inode, file);
-}
-
-static const struct file_operations mvpp2_dbgfs_flow_type_fops = {
- .open = mvpp2_dbgfs_flow_type_open,
- .read = seq_read,
- .release = mvpp2_dbgfs_flow_type_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_type);
static int mvpp2_dbgfs_flow_id_show(struct seq_file *s, void *unused)
{
- struct mvpp2_dbgfs_flow_entry *entry = s->private;
- struct mvpp2_cls_flow *f;
+ const struct mvpp2_dbgfs_flow_entry *entry = s->private;
+ const struct mvpp2_cls_flow *f;
f = mvpp2_cls_flow_get(entry->flow);
if (!f)
@@ -134,7 +142,7 @@ static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused)
struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
struct mvpp2_port *port = entry->port;
struct mvpp2_cls_flow_entry fe;
- struct mvpp2_cls_flow *f;
+ const struct mvpp2_cls_flow *f;
int flow_index;
u16 hash_opts;
@@ -142,7 +150,7 @@ static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused)
if (!f)
return -EINVAL;
- flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id);
+ flow_index = MVPP2_CLS_FLT_HASH_ENTRY(entry->port->id, f->flow_id);
mvpp2_cls_flow_read(port->priv, flow_index, &fe);
@@ -153,42 +161,21 @@ static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused)
return 0;
}
-static int mvpp2_dbgfs_port_flow_hash_opt_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, mvpp2_dbgfs_port_flow_hash_opt_show,
- inode->i_private);
-}
-
-static int mvpp2_dbgfs_port_flow_hash_opt_release(struct inode *inode,
- struct file *file)
-{
- struct seq_file *seq = file->private_data;
- struct mvpp2_dbgfs_port_flow_entry *flow_entry = seq->private;
-
- kfree(flow_entry);
- return single_release(inode, file);
-}
-
-static const struct file_operations mvpp2_dbgfs_port_flow_hash_opt_fops = {
- .open = mvpp2_dbgfs_port_flow_hash_opt_open,
- .read = seq_read,
- .release = mvpp2_dbgfs_port_flow_hash_opt_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_hash_opt);
static int mvpp2_dbgfs_port_flow_engine_show(struct seq_file *s, void *unused)
{
struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
struct mvpp2_port *port = entry->port;
struct mvpp2_cls_flow_entry fe;
- struct mvpp2_cls_flow *f;
+ const struct mvpp2_cls_flow *f;
int flow_index, engine;
f = mvpp2_cls_flow_get(entry->dbg_fe->flow);
if (!f)
return -EINVAL;
- flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id);
+ flow_index = MVPP2_CLS_FLT_HASH_ENTRY(entry->port->id, f->flow_id);
mvpp2_cls_flow_read(port->priv, flow_index, &fe);
@@ -203,11 +190,10 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_engine);
static int mvpp2_dbgfs_flow_c2_hits_show(struct seq_file *s, void *unused)
{
- struct mvpp2_port *port = s->private;
+ struct mvpp2_dbgfs_c2_entry *entry = s->private;
u32 hits;
- hits = mvpp2_cls_c2_hit_count(port->priv,
- MVPP22_CLS_C2_RSS_ENTRY(port->id));
+ hits = mvpp2_cls_c2_hit_count(entry->priv, entry->id);
seq_printf(s, "%u\n", hits);
@@ -218,11 +204,11 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_hits);
static int mvpp2_dbgfs_flow_c2_rxq_show(struct seq_file *s, void *unused)
{
- struct mvpp2_port *port = s->private;
+ struct mvpp2_dbgfs_c2_entry *entry = s->private;
struct mvpp2_cls_c2_entry c2;
u8 qh, ql;
- mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+ mvpp2_cls_c2_read(entry->priv, entry->id, &c2);
qh = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QHIGH_OFFS) &
MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
@@ -239,11 +225,11 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_rxq);
static int mvpp2_dbgfs_flow_c2_enable_show(struct seq_file *s, void *unused)
{
- struct mvpp2_port *port = s->private;
+ struct mvpp2_dbgfs_c2_entry *entry = s->private;
struct mvpp2_cls_c2_entry c2;
int enabled;
- mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+ mvpp2_cls_c2_read(entry->priv, entry->id, &c2);
enabled = !!(c2.attr[2] & MVPP22_CLS_C2_ATTR2_RSS_EN);
@@ -456,25 +442,7 @@ static int mvpp2_dbgfs_prs_valid_show(struct seq_file *s, void *unused)
return 0;
}
-static int mvpp2_dbgfs_prs_valid_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mvpp2_dbgfs_prs_valid_show, inode->i_private);
-}
-
-static int mvpp2_dbgfs_prs_valid_release(struct inode *inode, struct file *file)
-{
- struct seq_file *seq = file->private_data;
- struct mvpp2_dbgfs_prs_entry *entry = seq->private;
-
- kfree(entry);
- return single_release(inode, file);
-}
-
-static const struct file_operations mvpp2_dbgfs_prs_valid_fops = {
- .open = mvpp2_dbgfs_prs_valid_open,
- .read = seq_read,
- .release = mvpp2_dbgfs_prs_valid_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_valid);
static int mvpp2_dbgfs_flow_port_init(struct dentry *parent,
struct mvpp2_port *port,
@@ -487,10 +455,7 @@ static int mvpp2_dbgfs_flow_port_init(struct dentry *parent,
if (IS_ERR(port_dir))
return PTR_ERR(port_dir);
- /* This will be freed by 'hash_opts' release op */
- port_entry = kmalloc(sizeof(*port_entry), GFP_KERNEL);
- if (!port_entry)
- return -ENOMEM;
+ port_entry = &port->priv->dbgfs_entries->port_flow_entries[port->id];
port_entry->port = port;
port_entry->dbg_fe = entry;
@@ -518,17 +483,11 @@ static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent,
if (!flow_entry_dir)
return -ENOMEM;
- /* This will be freed by 'type' release op */
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
+ entry = &priv->dbgfs_entries->flow_entries[flow];
entry->flow = flow;
entry->priv = priv;
- debugfs_create_file("flow_hits", 0444, flow_entry_dir, entry,
- &mvpp2_dbgfs_flow_flt_hits_fops);
-
debugfs_create_file("dec_hits", 0444, flow_entry_dir, entry,
&mvpp2_dbgfs_flow_dec_hits_fops);
@@ -545,6 +504,7 @@ static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent,
if (ret)
return ret;
}
+
return 0;
}
@@ -557,7 +517,7 @@ static int mvpp2_dbgfs_flow_init(struct dentry *parent, struct mvpp2 *priv)
if (!flow_dir)
return -ENOMEM;
- for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) {
ret = mvpp2_dbgfs_flow_entry_init(flow_dir, priv, i);
if (ret)
return ret;
@@ -582,10 +542,7 @@ static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent,
if (!prs_entry_dir)
return -ENOMEM;
- /* The 'valid' entry's ops will free that */
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
+ entry = &priv->dbgfs_entries->prs_entries[tid];
entry->tid = tid;
entry->priv = priv;
@@ -630,6 +587,98 @@ static int mvpp2_dbgfs_prs_init(struct dentry *parent, struct mvpp2 *priv)
return 0;
}
+static int mvpp2_dbgfs_c2_entry_init(struct dentry *parent,
+ struct mvpp2 *priv, int id)
+{
+ struct mvpp2_dbgfs_c2_entry *entry;
+ struct dentry *c2_entry_dir;
+ char c2_entry_name[10];
+
+ if (id >= MVPP22_CLS_C2_N_ENTRIES)
+ return -EINVAL;
+
+ sprintf(c2_entry_name, "%03d", id);
+
+ c2_entry_dir = debugfs_create_dir(c2_entry_name, parent);
+ if (!c2_entry_dir)
+ return -ENOMEM;
+
+ entry = &priv->dbgfs_entries->c2_entries[id];
+
+ entry->id = id;
+ entry->priv = priv;
+
+ debugfs_create_file("hits", 0444, c2_entry_dir, entry,
+ &mvpp2_dbgfs_flow_c2_hits_fops);
+
+ debugfs_create_file("default_rxq", 0444, c2_entry_dir, entry,
+ &mvpp2_dbgfs_flow_c2_rxq_fops);
+
+ debugfs_create_file("rss_enable", 0444, c2_entry_dir, entry,
+ &mvpp2_dbgfs_flow_c2_enable_fops);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_flow_tbl_entry_init(struct dentry *parent,
+ struct mvpp2 *priv, int id)
+{
+ struct mvpp2_dbgfs_flow_tbl_entry *entry;
+ struct dentry *flow_tbl_entry_dir;
+ char flow_tbl_entry_name[10];
+
+ if (id >= MVPP2_CLS_FLOWS_TBL_SIZE)
+ return -EINVAL;
+
+ sprintf(flow_tbl_entry_name, "%03d", id);
+
+ flow_tbl_entry_dir = debugfs_create_dir(flow_tbl_entry_name, parent);
+ if (!flow_tbl_entry_dir)
+ return -ENOMEM;
+
+ entry = &priv->dbgfs_entries->flt_entries[id];
+
+ entry->id = id;
+ entry->priv = priv;
+
+ debugfs_create_file("hits", 0444, flow_tbl_entry_dir, entry,
+ &mvpp2_dbgfs_flow_flt_hits_fops);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_cls_init(struct dentry *parent, struct mvpp2 *priv)
+{
+ struct dentry *cls_dir, *c2_dir, *flow_tbl_dir;
+ int i, ret;
+
+ cls_dir = debugfs_create_dir("classifier", parent);
+ if (!cls_dir)
+ return -ENOMEM;
+
+ c2_dir = debugfs_create_dir("c2", cls_dir);
+ if (!c2_dir)
+ return -ENOMEM;
+
+ for (i = 0; i < MVPP22_CLS_C2_N_ENTRIES; i++) {
+ ret = mvpp2_dbgfs_c2_entry_init(c2_dir, priv, i);
+ if (ret)
+ return ret;
+ }
+
+ flow_tbl_dir = debugfs_create_dir("flow_table", cls_dir);
+ if (!flow_tbl_dir)
+ return -ENOMEM;
+
+ for (i = 0; i < MVPP2_CLS_FLOWS_TBL_SIZE; i++) {
+ ret = mvpp2_dbgfs_flow_tbl_entry_init(flow_tbl_dir, priv, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int mvpp2_dbgfs_port_init(struct dentry *parent,
struct mvpp2_port *port)
{
@@ -648,21 +697,14 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent,
debugfs_create_file("vid_filter", 0444, port_dir, port,
&mvpp2_dbgfs_port_vid_fops);
- debugfs_create_file("c2_hits", 0444, port_dir, port,
- &mvpp2_dbgfs_flow_c2_hits_fops);
-
- debugfs_create_file("default_rxq", 0444, port_dir, port,
- &mvpp2_dbgfs_flow_c2_rxq_fops);
-
- debugfs_create_file("rss_enable", 0444, port_dir, port,
- &mvpp2_dbgfs_flow_c2_enable_fops);
-
return 0;
}
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
{
debugfs_remove_recursive(priv->dbgfs_dir);
+
+ kfree(priv->dbgfs_entries);
}
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
@@ -682,11 +724,18 @@ void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
return;
priv->dbgfs_dir = mvpp2_dir;
+ priv->dbgfs_entries = kzalloc(sizeof(*priv->dbgfs_entries), GFP_KERNEL);
+ if (!priv->dbgfs_entries)
+ goto err;
ret = mvpp2_dbgfs_prs_init(mvpp2_dir, priv);
if (ret)
goto err;
+ ret = mvpp2_dbgfs_cls_init(mvpp2_dir, priv);
+ if (ret)
+ goto err;
+
for (i = 0; i < priv->port_count; i++) {
ret = mvpp2_dbgfs_port_init(mvpp2_dir, priv->port_list[i]);
if (ret)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 25fbed2b8d94..56d43d9b43ef 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3741,9 +3741,9 @@ static int mvpp2_set_features(struct net_device *dev,
if (changed & NETIF_F_RXHASH) {
if (features & NETIF_F_RXHASH)
- mvpp22_rss_enable(port);
+ mvpp22_port_rss_enable(port);
else
- mvpp22_rss_disable(port);
+ mvpp22_port_rss_disable(port);
}
return 0;
@@ -3937,7 +3937,7 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *info, u32 *rules)
{
struct mvpp2_port *port = netdev_priv(dev);
- int ret = 0;
+ int ret = 0, i, loc = 0;
if (!mvpp22_rss_is_supported())
return -EOPNOTSUPP;
@@ -3949,6 +3949,18 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
case ETHTOOL_GRXRINGS:
info->data = port->nrxqs;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ info->rule_cnt = port->n_rfs_rules;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = mvpp2_ethtool_cls_rule_get(port, info);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ for (i = 0; i < MVPP2_N_RFS_RULES; i++) {
+ if (port->rfs_rules[i])
+ rules[loc++] = i;
+ }
+ break;
default:
return -ENOTSUPP;
}
@@ -3969,6 +3981,12 @@ static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
case ETHTOOL_SRXFH:
ret = mvpp2_ethtool_rxfh_set(port, info);
break;
+ case ETHTOOL_SRXCLSRLINS:
+ ret = mvpp2_ethtool_cls_rule_ins(port, info);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = mvpp2_ethtool_cls_rule_del(port, info);
+ break;
default:
return -EOPNOTSUPP;
}
@@ -4301,7 +4319,7 @@ static int mvpp2_port_init(struct mvpp2_port *port)
mvpp2_cls_port_config(port);
if (mvpp22_rss_is_supported())
- mvpp22_rss_port_init(port);
+ mvpp22_port_rss_init(port);
/* Provide an initial Rx packet size */
port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
@@ -4848,6 +4866,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
struct mvpp2_port *port;
struct mvpp2_port_pcpu *port_pcpu;
struct device_node *port_node = to_of_node(port_fwnode);
+ netdev_features_t features;
struct net_device *dev;
struct resource *res;
struct phylink *phylink;
@@ -4856,7 +4875,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
unsigned long flags = 0;
bool has_tx_irqs;
u32 id;
- int features;
int phy_mode;
int err, i;
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 35f2142aac5e..ce037e8530fa 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1461,7 +1461,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
if (pdev->dev.of_node)
mac_addr = of_get_mac_address(pdev->dev.of_node);
- if (mac_addr && is_valid_ether_addr(mac_addr)) {
+ if (!IS_ERR_OR_NULL(mac_addr)) {
ether_addr_copy(dev->dev_addr, mac_addr);
} else {
/* try reading the mac address, if set by the bootloader */
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 49486c10ef81..9d070cca3e9e 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4804,7 +4804,7 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
* 2) from internal registers set by bootloader
*/
iap = of_get_mac_address(hw->pdev->dev.of_node);
- if (iap)
+ if (!IS_ERR(iap))
memcpy(dev->dev_addr, iap, ETH_ALEN);
else
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8,
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 549d36497b8c..f9fbb3ffa3a6 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -767,7 +767,8 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
*/
wmb();
- if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
+ if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
+ !netdev_xmit_more())
mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
return 0;
@@ -2027,7 +2028,7 @@ static int __init mtk_init(struct net_device *dev)
const char *mac_addr;
mac_addr = of_get_mac_address(mac->of_node);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
ether_addr_copy(dev->dev_addr, mac_addr);
/* If the mac address is invalid, use random mac address */
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index ff8057ed97ee..8491db57b0b0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -26,6 +26,7 @@ config MLX4_EN_DCB
config MLX4_CORE
tristate
depends on PCI
+ select NET_DEVLINK
default n
config MLX4_DEBUG
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 2cbd2bd7c67c..36a92b19e613 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -685,16 +685,15 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
}
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 rings_p_up = priv->num_tx_rings_p_up;
if (netdev_get_num_tc(dev))
- return fallback(dev, skb, NULL);
+ return netdev_pick_tx(dev, skb, NULL);
- return fallback(dev, skb, NULL) % rings_p_up;
+ return netdev_pick_tx(dev, skb, NULL) % rings_p_up;
}
static void mlx4_bf_copy(void __iomem *dst, const void *src,
@@ -1043,7 +1042,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
send_doorbell = __netdev_tx_sent_queue(ring->tx_queue,
tx_info->nr_bytes,
- skb->xmit_more);
+ netdev_xmit_more());
real_size = (real_size / 16) & 0x3f;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 8137454e2534..630f15977f09 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -698,8 +698,7 @@ void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback);
+ struct net_device *sb_dev);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_rx_alloc *frame,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 6debffb8336b..9aca8086ee01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -5,6 +5,7 @@
config MLX5_CORE
tristate "Mellanox 5th generation network adapters (ConnectX series) core driver"
depends on PCI
+ select NET_DEVLINK
imply PTP_1588_CLOCK
imply VXLAN
default n
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 1a16f6d73cbc..243368dc23db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -22,7 +22,8 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
#
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
- en_selftest.o en/port.o en/monitor_stats.o en/reporter_tx.o
+ en_selftest.o en/port.o en/monitor_stats.o en/reporter_tx.o \
+ en/params.o
#
# Netdev extra
@@ -35,7 +36,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o lib/port_tu
#
# Core extra
#
-mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o ecpf.o
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o ecpf.o rdma.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
@@ -57,5 +58,3 @@ mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
en_accel/ipsec_stats.o
mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o
-
-CFLAGS_tracepoint.o := -I$(src)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 9008e17126db..549f962cd86e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -57,15 +57,16 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
int node)
{
struct mlx5_priv *priv = &dev->priv;
+ struct device *device = dev->device;
int original_node;
void *cpu_handle;
mutex_lock(&priv->alloc_mutex);
- original_node = dev_to_node(&dev->pdev->dev);
- set_dev_node(&dev->pdev->dev, node);
- cpu_handle = dma_alloc_coherent(&dev->pdev->dev, size, dma_handle,
+ original_node = dev_to_node(device);
+ set_dev_node(device, node);
+ cpu_handle = dma_alloc_coherent(device, size, dma_handle,
GFP_KERNEL);
- set_dev_node(&dev->pdev->dev, original_node);
+ set_dev_node(device, original_node);
mutex_unlock(&priv->alloc_mutex);
return cpu_handle;
}
@@ -110,7 +111,7 @@ EXPORT_SYMBOL(mlx5_buf_alloc);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
{
- dma_free_coherent(&dev->pdev->dev, buf->size, buf->frags->buf,
+ dma_free_coherent(dev->device, buf->size, buf->frags->buf,
buf->frags->map);
kfree(buf->frags);
@@ -139,7 +140,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
if (!frag->buf)
goto err_free_buf;
if (frag->map & ((1 << buf->page_shift) - 1)) {
- dma_free_coherent(&dev->pdev->dev, frag_sz,
+ dma_free_coherent(dev->device, frag_sz,
buf->frags[i].buf, buf->frags[i].map);
mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
&frag->map, buf->page_shift);
@@ -152,7 +153,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
err_free_buf:
while (i--)
- dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, buf->frags[i].buf,
+ dma_free_coherent(dev->device, PAGE_SIZE, buf->frags[i].buf,
buf->frags[i].map);
kfree(buf->frags);
err_out:
@@ -168,7 +169,7 @@ void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
for (i = 0; i < buf->npages; i++) {
int frag_sz = min_t(int, size, PAGE_SIZE);
- dma_free_coherent(&dev->pdev->dev, frag_sz, buf->frags[i].buf,
+ dma_free_coherent(dev->device, frag_sz, buf->frags[i].buf,
buf->frags[i].map);
size -= frag_sz;
}
@@ -274,7 +275,7 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
__set_bit(db->index, db->u.pgdir->bitmap);
if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
- dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ dma_free_coherent(dev->device, PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list);
bitmap_free(db->u.pgdir->bitmap);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index c087d1014b09..937ba4bcb056 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1346,7 +1346,7 @@ static void set_wqname(struct mlx5_core_dev *dev)
struct mlx5_cmd *cmd = &dev->cmd;
snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
- dev_name(&dev->pdev->dev));
+ dev_name(dev->device));
}
static void clean_debug_files(struct mlx5_core_dev *dev)
@@ -1851,7 +1851,7 @@ static void create_msg_cache(struct mlx5_core_dev *dev)
static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{
- struct device *ddev = &dev->pdev->dev;
+ struct device *ddev = dev->device;
cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
&cmd->alloc_dma, GFP_KERNEL);
@@ -1882,7 +1882,7 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{
- struct device *ddev = &dev->pdev->dev;
+ struct device *ddev = dev->device;
dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
cmd->alloc_dma);
@@ -1901,14 +1901,13 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
memset(cmd, 0, sizeof(*cmd));
cmd_if_rev = cmdif_rev(dev);
if (cmd_if_rev != CMD_IF_REV) {
- dev_err(&dev->pdev->dev,
- "Driver cmdif rev(%d) differs from firmware's(%d)\n",
- CMD_IF_REV, cmd_if_rev);
+ mlx5_core_err(dev,
+ "Driver cmdif rev(%d) differs from firmware's(%d)\n",
+ CMD_IF_REV, cmd_if_rev);
return -EINVAL;
}
- cmd->pool = dma_pool_create("mlx5_cmd", &dev->pdev->dev, size, align,
- 0);
+ cmd->pool = dma_pool_create("mlx5_cmd", dev->device, size, align, 0);
if (!cmd->pool)
return -ENOMEM;
@@ -1920,14 +1919,14 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
cmd->log_sz = cmd_l >> 4 & 0xf;
cmd->log_stride = cmd_l & 0xf;
if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
- dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
- 1 << cmd->log_sz);
+ mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n",
+ 1 << cmd->log_sz);
err = -EINVAL;
goto err_free_page;
}
if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
- dev_err(&dev->pdev->dev, "command queue size overflow\n");
+ mlx5_core_err(dev, "command queue size overflow\n");
err = -EINVAL;
goto err_free_page;
}
@@ -1938,8 +1937,8 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
if (cmd->cmdif_rev > CMD_IF_REV) {
- dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
- CMD_IF_REV, cmd->cmdif_rev);
+ mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n",
+ CMD_IF_REV, cmd->cmdif_rev);
err = -EOPNOTSUPP;
goto err_free_page;
}
@@ -1955,7 +1954,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
cmd_h = (u32)((u64)(cmd->dma) >> 32);
cmd_l = (u32)(cmd->dma);
if (cmd_l & 0xfff) {
- dev_err(&dev->pdev->dev, "invalid command queue address\n");
+ mlx5_core_err(dev, "invalid command queue address\n");
err = -ENOMEM;
goto err_free_page;
}
@@ -1975,7 +1974,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
set_wqname(dev);
cmd->wq = create_singlethread_workqueue(cmd->wq_name);
if (!cmd->wq) {
- dev_err(&dev->pdev->dev, "failed to create command workqueue\n");
+ mlx5_core_err(dev, "failed to create command workqueue\n");
err = -ENOMEM;
goto err_cache;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h
index 83f90e9aff45..3038be575923 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h
@@ -47,7 +47,7 @@ TRACE_EVENT(mlx5_fw,
TP_ARGS(tracer, trace_timestamp, lost, event_id, msg),
TP_STRUCT__entry(
- __string(dev_name, dev_name(&tracer->dev->pdev->dev))
+ __string(dev_name, dev_name(tracer->dev->device))
__field(u64, trace_timestamp)
__field(bool, lost)
__field(u8, event_id)
@@ -55,7 +55,8 @@ TRACE_EVENT(mlx5_fw,
),
TP_fast_assign(
- __assign_str(dev_name, dev_name(&tracer->dev->pdev->dev));
+ __assign_str(dev_name,
+ dev_name(tracer->dev->device));
__entry->trace_timestamp = trace_timestamp;
__entry->lost = lost;
__entry->event_id = event_id;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index d3eaf2ceaa39..3a183d690e23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -240,8 +240,8 @@ struct mlx5e_params {
bool rx_cqe_compress_def;
struct net_dim_cq_moder rx_cq_moderation;
struct net_dim_cq_moder tx_cq_moderation;
+ bool tunneled_offload_en;
bool lro_en;
- u32 lro_wqe_sz;
u8 tx_min_inline_mode;
bool vlan_strip_disable;
bool scatter_fcs_en;
@@ -410,14 +410,17 @@ struct mlx5e_xdp_info_fifo {
struct mlx5e_xdp_wqe_info {
u8 num_wqebbs;
- u8 num_ds;
+ u8 num_pkts;
};
struct mlx5e_xdp_mpwqe {
/* Current MPWQE session */
struct mlx5e_tx_wqe *wqe;
u8 ds_count;
+ u8 pkt_count;
u8 max_ds_count;
+ u8 complete;
+ u8 inline_on;
};
struct mlx5e_xdpsq;
@@ -429,7 +432,6 @@ struct mlx5e_xdpsq {
/* dirtied @completion */
u32 xdpi_fifo_cc;
u16 cc;
- bool redirect_flush;
/* dirtied @xmit */
u32 xdpi_fifo_pc ____cacheline_aligned_in_smp;
@@ -462,10 +464,10 @@ struct mlx5e_xdpsq {
struct mlx5e_icosq {
/* data path */
+ u16 cc;
+ u16 pc;
- /* dirtied @xmit */
- u16 pc ____cacheline_aligned_in_smp;
-
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg;
struct mlx5e_cq cq;
/* write@xmit, read@completion */
@@ -532,7 +534,8 @@ typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
enum mlx5e_rq_flag {
- MLX5E_RQ_FLAG_XDP_XMIT = BIT(0),
+ MLX5E_RQ_FLAG_XDP_XMIT,
+ MLX5E_RQ_FLAG_XDP_REDIRECT,
};
struct mlx5e_rq_frag_info {
@@ -563,8 +566,10 @@ struct mlx5e_rq {
struct mlx5e_mpw_info *info;
mlx5e_fp_skb_from_cqe_mpwrq skb_from_cqe_mpwrq;
u16 num_strides;
+ u16 actual_wq_head;
u8 log_stride_sz;
- bool umr_in_progress;
+ u8 umr_in_progress;
+ u8 umr_last_bulk;
} mpwqe;
};
struct {
@@ -769,12 +774,12 @@ struct mlx5e_profile {
void mlx5e_build_ptys2ethtool_map(void);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback);
+ struct net_device *sb_dev);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe, u16 pi);
+ struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
+void mlx5e_trigger_irq(struct mlx5e_icosq *sq);
void mlx5e_completion_event(struct mlx5_core_cq *mcq);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
@@ -886,6 +891,53 @@ static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
}
+static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_ETH(mdev, swp) &&
+ MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso);
+}
+
+struct mlx5e_swp_spec {
+ __be16 l3_proto;
+ u8 l4_proto;
+ u8 is_tun;
+ __be16 tun_l3_proto;
+ u8 tun_l4_proto;
+};
+
+static inline void
+mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
+ struct mlx5e_swp_spec *swp_spec)
+{
+ /* SWP offsets are in 2-bytes words */
+ eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
+ if (swp_spec->l3_proto == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
+ if (swp_spec->l4_proto) {
+ eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
+ if (swp_spec->l4_proto == IPPROTO_UDP)
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
+ }
+
+ if (swp_spec->is_tun) {
+ eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+ if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ } else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
+ eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
+ if (swp_spec->l3_proto == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ }
+ switch (swp_spec->tun_l4_proto) {
+ case IPPROTO_UDP:
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ /* fall through */
+ case IPPROTO_TCP:
+ eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
+ break;
+ }
+}
+
static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe **wqe,
u16 *pi)
@@ -930,7 +982,7 @@ void mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc,
*/
wmb();
- mlx5_write64((__be32 *)ctrl, uar_map, NULL);
+ mlx5_write64((__be32 *)ctrl, uar_map);
}
static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
@@ -1042,6 +1094,7 @@ mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *prof
int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
+void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_rss_params *rss_params,
struct mlx5e_params *params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
new file mode 100644
index 000000000000..d3744bffbae3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "en/params.h"
+
+u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params)
+{
+ u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ u16 linear_rq_headroom = params->xdp_prog ?
+ XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
+ u32 frag_sz;
+
+ linear_rq_headroom += NET_IP_ALIGN;
+
+ frag_sz = MLX5_SKB_FRAG_SZ(linear_rq_headroom + hw_mtu);
+
+ if (params->xdp_prog && frag_sz < PAGE_SIZE)
+ frag_sz = PAGE_SIZE;
+
+ return frag_sz;
+}
+
+u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params)
+{
+ u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params);
+
+ return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
+}
+
+bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params)
+{
+ u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params);
+
+ return !params->lro_en && frag_sz <= PAGE_SIZE;
+}
+
+#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \
+ MLX5_MPWQE_LOG_STRIDE_SZ_BASE)
+bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params);
+ s8 signed_log_num_strides_param;
+ u8 log_num_strides;
+
+ if (!mlx5e_rx_is_linear_skb(params))
+ return false;
+
+ if (order_base_2(frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ)
+ return false;
+
+ if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
+ return true;
+
+ log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(frag_sz);
+ signed_log_num_strides_param =
+ (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
+
+ return signed_log_num_strides_param >= 0;
+}
+
+u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params)
+{
+ u8 log_pkts_per_wqe = mlx5e_mpwqe_log_pkts_per_wqe(params);
+
+ /* Numbers are unsigned, don't subtract to avoid underflow. */
+ if (params->log_rq_mtu_frames <
+ log_pkts_per_wqe + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
+ return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
+
+ return params->log_rq_mtu_frames - log_pkts_per_wqe;
+}
+
+u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
+ return order_base_2(mlx5e_rx_get_linear_frag_sz(params));
+
+ return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
+}
+
+u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ return MLX5_MPWRQ_LOG_WQE_SZ -
+ mlx5e_mpwqe_get_log_stride_size(mdev, params);
+}
+
+u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ u16 linear_rq_headroom = params->xdp_prog ?
+ XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
+ bool is_linear_skb;
+
+ linear_rq_headroom += NET_IP_ALIGN;
+
+ is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ?
+ mlx5e_rx_is_linear_skb(params) :
+ mlx5e_rx_mpwqe_is_linear_skb(mdev, params);
+
+ return is_linear_skb ? linear_rq_headroom : 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
new file mode 100644
index 000000000000..b106a0236f36
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_PARAMS_H__
+#define __MLX5_EN_PARAMS_H__
+
+#include "en.h"
+
+u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params);
+u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params);
+bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params);
+bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params);
+u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params);
+u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params);
+u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params);
+u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params);
+
+#endif /* __MLX5_EN_PARAMS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index 4ab0d030b544..633b117eb13e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -167,23 +167,23 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
}
/**
- * update_buffer_lossy()
- * max_mtu: netdev's max_mtu
- * pfc_en: <input> current pfc configuration
- * buffer: <input> current prio to buffer mapping
- * xoff: <input> xoff value
- * port_buffer: <output> port receive buffer configuration
- * change: <output>
+ * update_buffer_lossy - Update buffer configuration based on pfc
+ * @max_mtu: netdev's max_mtu
+ * @pfc_en: <input> current pfc configuration
+ * @buffer: <input> current prio to buffer mapping
+ * @xoff: <input> xoff value
+ * @port_buffer: <output> port receive buffer configuration
+ * @change: <output>
*
- * Update buffer configuration based on pfc configuraiton and priority
- * to buffer mapping.
- * Buffer's lossy bit is changed to:
- * lossless if there is at least one PFC enabled priority mapped to this buffer
- * lossy if all priorities mapped to this buffer are PFC disabled
+ * Update buffer configuration based on pfc configuraiton and
+ * priority to buffer mapping.
+ * Buffer's lossy bit is changed to:
+ * lossless if there is at least one PFC enabled priority
+ * mapped to this buffer lossy if all priorities mapped to
+ * this buffer are PFC disabled
*
- * Return:
- * Return 0 if no error.
- * Set change to true if buffer configuration is modified.
+ * @return: 0 if no error,
+ * sets change to true if buffer configuration was modified.
*/
static int update_buffer_lossy(unsigned int max_mtu,
u8 pfc_en, u8 *buffer, u32 xoff,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index eec07b34b4ad..fe5d4d7f15ed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -74,7 +74,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
if (ret)
return ret;
- if (mlx5_lag_is_multipath(mdev) && !rt->rt_gateway)
+ if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET)
return -ENETUNREACH;
#else
return -EOPNOTSUPP;
@@ -100,7 +100,7 @@ static const char *mlx5e_netdev_kind(struct net_device *dev)
if (dev->rtnl_link_ops)
return dev->rtnl_link_ops->kind;
else
- return "";
+ return "unknown";
}
static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
@@ -640,8 +640,10 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
headers_c, headers_v);
} else {
netdev_warn(priv->netdev,
- "decapsulation offload is not supported for %s net device (%d)\n",
- mlx5e_netdev_kind(filter_dev), tunnel_type);
+ "decapsulation offload is not supported for %s (kind: \"%s\")\n",
+ netdev_name(filter_dev),
+ mlx5e_netdev_kind(filter_dev));
+
return -EOPNOTSUPP;
}
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index cad34d6f5f45..eb8ef78e5626 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -105,7 +105,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
if (unlikely(err))
goto xdp_abort;
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
- rq->xdpsq.redirect_flush = true;
+ __set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
mlx5e_page_dma_unmap(rq, di);
rq->stats->xdp_redirect++;
return true;
@@ -125,6 +125,7 @@ xdp_abort:
static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
{
struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5e_xdpsq_stats *stats = sq->stats;
struct mlx5_wq_cyc *wq = &sq->wq;
u8 wqebbs;
u16 pi;
@@ -132,7 +133,9 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
mlx5e_xdpsq_fetch_wqe(sq, &session->wqe);
prefetchw(session->wqe->data);
- session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT;
+ session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT;
+ session->pkt_count = 0;
+ session->complete = 0;
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
@@ -151,6 +154,10 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
MLX5E_XDP_MPW_MAX_WQEBBS);
session->max_ds_count = MLX5_SEND_WQEBB_NUM_DS * wqebbs;
+
+ mlx5e_xdp_update_inline_state(sq);
+
+ stats->mpwqe++;
}
static void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq)
@@ -167,7 +174,7 @@ static void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq)
cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count);
wi->num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS);
- wi->num_ds = ds_count - MLX5E_XDP_TX_EMPTY_DS_COUNT;
+ wi->num_pkts = session->pkt_count;
sq->pc += wi->num_wqebbs;
@@ -182,11 +189,9 @@ static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats;
- dma_addr_t dma_addr = xdpi->dma_addr;
struct xdp_frame *xdpf = xdpi->xdpf;
- unsigned int dma_len = xdpf->len;
- if (unlikely(sq->hw_mtu < dma_len)) {
+ if (unlikely(sq->hw_mtu < xdpf->len)) {
stats->err++;
return false;
}
@@ -203,9 +208,10 @@ static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
mlx5e_xdp_mpwqe_session_start(sq);
}
- mlx5e_xdp_mpwqe_add_dseg(sq, dma_addr, dma_len);
+ mlx5e_xdp_mpwqe_add_dseg(sq, xdpi, stats);
- if (unlikely(session->ds_count == session->max_ds_count))
+ if (unlikely(session->complete ||
+ session->ds_count == session->max_ds_count))
mlx5e_xdp_mpwqe_complete(sq);
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
@@ -269,12 +275,33 @@ static bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *
return true;
}
+static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
+ struct mlx5e_xdp_wqe_info *wi,
+ struct mlx5e_rq *rq,
+ bool recycle)
+{
+ struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
+ u16 i;
+
+ for (i = 0; i < wi->num_pkts; i++) {
+ struct mlx5e_xdp_info xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
+
+ if (rq) {
+ /* XDP_TX */
+ mlx5e_page_release(rq, &xdpi.di, recycle);
+ } else {
+ /* XDP_REDIRECT */
+ dma_unmap_single(sq->pdev, xdpi.dma_addr,
+ xdpi.xdpf->len, DMA_TO_DEVICE);
+ xdp_return_frame(xdpi.xdpf);
+ }
+ }
+}
+
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
{
- struct mlx5e_xdp_info_fifo *xdpi_fifo;
struct mlx5e_xdpsq *sq;
struct mlx5_cqe64 *cqe;
- bool is_redirect;
u16 sqcc;
int i;
@@ -287,9 +314,6 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
if (!cqe)
return false;
- is_redirect = !rq;
- xdpi_fifo = &sq->db.xdpi_fifo;
-
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
* otherwise a cq overrun may occur
*/
@@ -311,7 +335,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
do {
struct mlx5e_xdp_wqe_info *wi;
- u16 ci, j;
+ u16 ci;
last_wqe = (sqcc == wqe_counter);
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
@@ -319,19 +343,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
sqcc += wi->num_wqebbs;
- for (j = 0; j < wi->num_ds; j++) {
- struct mlx5e_xdp_info xdpi =
- mlx5e_xdpi_fifo_pop(xdpi_fifo);
-
- if (is_redirect) {
- dma_unmap_single(sq->pdev, xdpi.dma_addr,
- xdpi.xdpf->len, DMA_TO_DEVICE);
- xdp_return_frame(xdpi.xdpf);
- } else {
- /* Recycle RX page */
- mlx5e_page_release(rq, &xdpi.di, true);
- }
- }
+ mlx5e_free_xdpsq_desc(sq, wi, rq, true);
} while (!last_wqe);
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
@@ -348,31 +360,16 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
{
- struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
- bool is_redirect = !rq;
-
while (sq->cc != sq->pc) {
struct mlx5e_xdp_wqe_info *wi;
- u16 ci, i;
+ u16 ci;
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
wi = &sq->db.wqe_info[ci];
sq->cc += wi->num_wqebbs;
- for (i = 0; i < wi->num_ds; i++) {
- struct mlx5e_xdp_info xdpi =
- mlx5e_xdpi_fifo_pop(xdpi_fifo);
-
- if (is_redirect) {
- dma_unmap_single(sq->pdev, xdpi.dma_addr,
- xdpi.xdpf->len, DMA_TO_DEVICE);
- xdp_return_frame(xdpi.xdpf);
- } else {
- /* Recycle RX page */
- mlx5e_page_release(rq, &xdpi.di, false);
- }
- }
+ mlx5e_free_xdpsq_desc(sq, wi, rq, false);
}
}
@@ -439,9 +436,9 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq)
mlx5e_xmit_xdp_doorbell(xdpsq);
- if (xdpsq->redirect_flush) {
+ if (test_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags)) {
xdp_do_flush_map();
- xdpsq->redirect_flush = false;
+ __clear_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 553956cadc8a..8b537a4b0840 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -74,16 +74,68 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
}
}
+/* Enable inline WQEs to shift some load from a congested HCA (HW) to
+ * a less congested cpu (SW).
+ */
+static inline void mlx5e_xdp_update_inline_state(struct mlx5e_xdpsq *sq)
+{
+ u16 outstanding = sq->xdpi_fifo_pc - sq->xdpi_fifo_cc;
+ struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+
+#define MLX5E_XDP_INLINE_WATERMARK_LOW 10
+#define MLX5E_XDP_INLINE_WATERMARK_HIGH 128
+
+ if (session->inline_on) {
+ if (outstanding <= MLX5E_XDP_INLINE_WATERMARK_LOW)
+ session->inline_on = 0;
+ return;
+ }
+
+ /* inline is false */
+ if (outstanding >= MLX5E_XDP_INLINE_WATERMARK_HIGH)
+ session->inline_on = 1;
+}
+
static inline void
-mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, dma_addr_t dma_addr, u16 dma_len)
+mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi,
+ struct mlx5e_xdpsq_stats *stats)
{
struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ dma_addr_t dma_addr = xdpi->dma_addr;
+ struct xdp_frame *xdpf = xdpi->xdpf;
struct mlx5_wqe_data_seg *dseg =
- (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count++;
+ (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
+ u16 dma_len = xdpf->len;
+ session->pkt_count++;
+
+#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg))
+
+ if (session->inline_on && dma_len <= MLX5E_XDP_INLINE_WQE_SZ_THRSD) {
+ struct mlx5_wqe_inline_seg *inline_dseg =
+ (struct mlx5_wqe_inline_seg *)dseg;
+ u16 ds_len = sizeof(*inline_dseg) + dma_len;
+ u16 ds_cnt = DIV_ROUND_UP(ds_len, MLX5_SEND_WQE_DS);
+
+ if (unlikely(session->ds_count + ds_cnt > session->max_ds_count)) {
+ /* Not enough space for inline wqe, send with memory pointer */
+ session->complete = true;
+ goto no_inline;
+ }
+
+ inline_dseg->byte_count = cpu_to_be32(dma_len | MLX5_INLINE_SEG);
+ memcpy(inline_dseg->data, xdpf->data, dma_len);
+
+ session->ds_count += ds_cnt;
+ stats->inlnw++;
+ return;
+ }
+
+no_inline:
dseg->addr = cpu_to_be64(dma_addr);
dseg->byte_count = cpu_to_be32(dma_len);
dseg->lkey = sq->mkey_be;
+ session->ds_count++;
}
static inline void mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq,
@@ -110,5 +162,4 @@ mlx5e_xdpi_fifo_pop(struct mlx5e_xdp_info_fifo *fifo)
{
return fifo->xi[(*fifo->cc)++ & fifo->mask];
}
-
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 1dd225380a66..6da7c88742dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -40,6 +40,57 @@
#include "en_accel/tls_rxtx.h"
#include "en.h"
+#if IS_ENABLED(CONFIG_GENEVE)
+static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
+{
+ return mlx5_tx_swp_supported(mdev);
+}
+
+static inline void
+mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
+{
+ struct mlx5e_swp_spec swp_spec = {};
+ unsigned int offset = 0;
+ __be16 l3_proto;
+ u8 l4_proto;
+
+ l3_proto = vlan_get_protocol(skb);
+ switch (l3_proto) {
+ case htons(ETH_P_IP):
+ l4_proto = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
+ break;
+ default:
+ return;
+ }
+
+ if (l4_proto != IPPROTO_UDP ||
+ udp_hdr(skb)->dest != cpu_to_be16(GENEVE_UDP_PORT))
+ return;
+ swp_spec.l3_proto = l3_proto;
+ swp_spec.l4_proto = l4_proto;
+ swp_spec.is_tun = true;
+ if (inner_ip_hdr(skb)->version == 6) {
+ swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
+ swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
+ } else {
+ swp_spec.tun_l3_proto = htons(ETH_P_IP);
+ swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
+ }
+
+ mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
+}
+
+#else
+static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
+{
+ return false;
+}
+
+#endif /* CONFIG_GENEVE */
+
static inline void
mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 53608afd39b6..0dd17514caae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -136,7 +136,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg, u8 mode,
struct xfrm_offload *xo)
{
- u8 proto;
+ struct mlx5e_swp_spec swp_spec = {};
/* Tunnel Mode:
* SWP: OutL3 InL3 InL4
@@ -146,35 +146,23 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
* SWP: OutL3 InL4
* InL3
* Pkt: MAC IP ESP L4
- *
- * Offsets are in 2-byte words, counting from start of frame
*/
- eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
- if (skb->protocol == htons(ETH_P_IPV6))
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
-
- if (mode == XFRM_MODE_TUNNEL) {
- eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+ swp_spec.l3_proto = skb->protocol;
+ swp_spec.is_tun = mode == XFRM_MODE_TUNNEL;
+ if (swp_spec.is_tun) {
if (xo->proto == IPPROTO_IPV6) {
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
- proto = inner_ipv6_hdr(skb)->nexthdr;
+ swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
+ swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
} else {
- proto = inner_ip_hdr(skb)->protocol;
+ swp_spec.tun_l3_proto = htons(ETH_P_IP);
+ swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
}
} else {
- eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
- if (skb->protocol == htons(ETH_P_IPV6))
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
- proto = xo->proto;
- }
- switch (proto) {
- case IPPROTO_UDP:
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
- /* Fall through */
- case IPPROTO_TCP:
- eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
- break;
+ swp_spec.tun_l3_proto = skb->protocol;
+ swp_spec.tun_l4_proto = xo->proto;
}
+
+ mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
}
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index be137d4a9169..439bf5953885 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -181,7 +181,6 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
*/
nskb->ip_summed = CHECKSUM_PARTIAL;
- nskb->xmit_more = 1;
nskb->queue_mapping = skb->queue_mapping;
}
@@ -248,7 +247,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
sq->stats->tls_resync_bytes += nskb->len;
mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
cpu_to_be64(info.rcd_sn));
- mlx5e_sq_xmit(sq, nskb, *wqe, *pi);
+ mlx5e_sq_xmit(sq, nskb, *wqe, *pi, true);
mlx5e_sq_fetch_wqe(sq, wqe, pi);
return skb;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 78dc8fe2a83c..7efaa58ae034 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1561,7 +1561,7 @@ static int mlx5e_get_module_info(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *dev = priv->mdev;
int size_read = 0;
- u8 data[4];
+ u8 data[4] = {0};
size_read = mlx5_query_module_eeprom(dev, 0, 2, data);
if (size_read < 2)
@@ -1571,17 +1571,17 @@ static int mlx5e_get_module_info(struct net_device *netdev,
switch (data[0]) {
case MLX5_MODULE_ID_QSFP:
modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
break;
case MLX5_MODULE_ID_QSFP_PLUS:
case MLX5_MODULE_ID_QSFP28:
/* data[1] = revision id */
if (data[0] == MLX5_MODULE_ID_QSFP28 || data[1] >= 0x3) {
modinfo->type = ETH_MODULE_SFF_8636;
- modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
} else {
modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
}
break;
case MLX5_MODULE_ID_SFP:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 46157e2a1e5a..457cc39423f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -34,6 +34,7 @@
#include <net/pkt_cls.h>
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
+#include <net/geneve.h>
#include <linux/bpf.h>
#include <linux/if_bridge.h>
#include <net/page_pool.h>
@@ -43,6 +44,7 @@
#include "en_rep.h"
#include "en_accel/ipsec.h"
#include "en_accel/ipsec_rxtx.h"
+#include "en_accel/en_accel.h"
#include "en_accel/tls.h"
#include "accel/ipsec.h"
#include "accel/tls.h"
@@ -53,6 +55,7 @@
#include "lib/eq.h"
#include "en/monitor_stats.h"
#include "en/reporter.h"
+#include "en/params.h"
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
@@ -101,108 +104,9 @@ bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
return true;
}
-static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params)
-{
- u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- u16 linear_rq_headroom = params->xdp_prog ?
- XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
- u32 frag_sz;
-
- linear_rq_headroom += NET_IP_ALIGN;
-
- frag_sz = MLX5_SKB_FRAG_SZ(linear_rq_headroom + hw_mtu);
-
- if (params->xdp_prog && frag_sz < PAGE_SIZE)
- frag_sz = PAGE_SIZE;
-
- return frag_sz;
-}
-
-static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params)
-{
- u32 linear_frag_sz = mlx5e_rx_get_linear_frag_sz(params);
-
- return MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(linear_frag_sz);
-}
-
-static bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params);
-
- return !params->lro_en && frag_sz <= PAGE_SIZE;
-}
-
-#define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \
- MLX5_MPWQE_LOG_STRIDE_SZ_BASE)
-static bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- u32 frag_sz = mlx5e_rx_get_linear_frag_sz(params);
- s8 signed_log_num_strides_param;
- u8 log_num_strides;
-
- if (!mlx5e_rx_is_linear_skb(mdev, params))
- return false;
-
- if (order_base_2(frag_sz) > MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ)
- return false;
-
- if (MLX5_CAP_GEN(mdev, ext_stride_num_range))
- return true;
-
- log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ - order_base_2(frag_sz);
- signed_log_num_strides_param =
- (s8)log_num_strides - MLX5_MPWQE_LOG_NUM_STRIDES_BASE;
-
- return signed_log_num_strides_param >= 0;
-}
-
-static u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params)
-{
- if (params->log_rq_mtu_frames <
- mlx5e_mpwqe_log_pkts_per_wqe(params) + MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW)
- return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
-
- return params->log_rq_mtu_frames - mlx5e_mpwqe_log_pkts_per_wqe(params);
-}
-
-static u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- if (mlx5e_rx_mpwqe_is_linear_skb(mdev, params))
- return order_base_2(mlx5e_rx_get_linear_frag_sz(params));
-
- return MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
-}
-
-static u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- return MLX5_MPWRQ_LOG_WQE_SZ -
- mlx5e_mpwqe_get_log_stride_size(mdev, params);
-}
-
-static u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
- struct mlx5e_params *params)
-{
- u16 linear_rq_headroom = params->xdp_prog ?
- XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
- bool is_linear_skb;
-
- linear_rq_headroom += NET_IP_ALIGN;
-
- is_linear_skb = (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) ?
- mlx5e_rx_is_linear_skb(mdev, params) :
- mlx5e_rx_mpwqe_is_linear_skb(mdev, params);
-
- return is_linear_skb ? linear_rq_headroom : 0;
-}
-
void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
- params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
params->log_rq_mtu_frames = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
@@ -469,7 +373,6 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
}
static int mlx5e_init_di_list(struct mlx5e_rq *rq,
- struct mlx5e_params *params,
int wq_sz, int cpu)
{
int len = wq_sz << rq->wqe.info.log_num_frags;
@@ -597,7 +500,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_free;
}
- err = mlx5e_init_di_list(rq, params, wq_sz, c->cpu);
+ err = mlx5e_init_di_list(rq, wq_sz, c->cpu);
if (err)
goto err_free;
rq->post_wqes = mlx5e_post_rx_wqes;
@@ -615,7 +518,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_free;
}
- rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(mdev, params) ?
+ rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(params) ?
mlx5e_skb_from_cqe_linear :
mlx5e_skb_from_cqe_nonlinear;
rq->mkey_be = c->mkey_be;
@@ -902,10 +805,14 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
+ u16 head = wq->head;
+ int i;
- /* UMR WQE (if in progress) is always at wq->head */
- if (rq->mpwqe.umr_in_progress)
- rq->dealloc_wqe(rq, wq->head);
+ /* Outstanding UMR WQEs (in progress) start at wq->head */
+ for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
+ rq->dealloc_wqe(rq, head);
+ head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
+ }
while (!mlx5_wq_ll_is_empty(wq)) {
struct mlx5e_rx_wqe_ll *wqe;
@@ -970,16 +877,8 @@ err_free_rq:
static void mlx5e_activate_rq(struct mlx5e_rq *rq)
{
- struct mlx5e_icosq *sq = &rq->channel->icosq;
- struct mlx5_wq_cyc *wq = &sq->wq;
- struct mlx5e_tx_wqe *nopwqe;
-
- u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
-
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
- sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
- nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
- mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
+ mlx5e_trigger_irq(&rq->channel->icosq);
}
static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
@@ -1091,7 +990,7 @@ static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
{
- u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
sq->db.ico_wqe = kvzalloc_node(array_size(wq_sz,
sizeof(*sq->db.ico_wqe)),
@@ -1527,7 +1426,7 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
dseg->lkey = sq->mkey_be;
wi->num_wqebbs = 1;
- wi->num_ds = 1;
+ wi->num_pkts = 1;
}
}
@@ -1895,7 +1794,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->tstamp = &priv->tstamp;
c->ix = ix;
c->cpu = cpu;
- c->pdev = &priv->mdev->pdev->dev;
+ c->pdev = priv->mdev->device;
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
c->num_tc = params->num_tc;
@@ -2053,7 +1952,7 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
byte_count += MLX5E_METADATA_ETHER_LEN;
#endif
- if (mlx5e_rx_is_linear_skb(mdev, params)) {
+ if (mlx5e_rx_is_linear_skb(params)) {
int frag_stride;
frag_stride = mlx5e_rx_get_linear_frag_sz(params);
@@ -2107,6 +2006,13 @@ static inline u8 mlx5e_get_rqwq_log_stride(u8 wq_type, int ndsegs)
return order_base_2(sz);
}
+static u8 mlx5e_get_rq_log_wq_sz(void *rqc)
+{
+ void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ return MLX5_GET(wq, wq, log_wq_sz);
+}
+
static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
struct mlx5e_params *params,
struct mlx5e_rq_param *param)
@@ -2141,7 +2047,7 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
- param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
+ param->wq.buf_numa_node = dev_to_node(mdev->device);
}
static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
@@ -2156,7 +2062,7 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
- param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
+ param->wq.buf_numa_node = dev_to_node(mdev->device);
}
static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
@@ -2168,7 +2074,7 @@ static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
- param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
+ param->wq.buf_numa_node = dev_to_node(priv->mdev->device);
}
static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
@@ -2177,10 +2083,13 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ bool allow_swp;
+ allow_swp = mlx5_geneve_tx_allowed(priv->mdev) ||
+ !!MLX5_IPSEC_DEV(priv->mdev);
mlx5e_build_sq_param_common(priv, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
- MLX5_SET(sqc, sqc, allow_swp, !!MLX5_IPSEC_DEV(priv->mdev));
+ MLX5_SET(sqc, sqc, allow_swp, allow_swp);
}
static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -2270,13 +2179,28 @@ static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE);
}
+static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
+ struct mlx5e_rq_param *rqp)
+{
+ switch (params->rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return order_base_2(MLX5E_UMR_WQEBBS) +
+ mlx5e_get_rq_log_wq_sz(rqp->rqc);
+ default: /* MLX5_WQ_TYPE_CYCLIC */
+ return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+ }
+}
+
static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
{
- u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+ u8 icosq_log_wq_sz;
mlx5e_build_rq_param(priv, params, &cparam->rq);
+
+ icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq);
+
mlx5e_build_sq_param(priv, params, &cparam->sq);
mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
@@ -2332,14 +2256,18 @@ static void mlx5e_activate_channels(struct mlx5e_channels *chs)
mlx5e_activate_channel(chs->c[i]);
}
+#define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
+
static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
{
int err = 0;
int i;
- for (i = 0; i < chs->num; i++)
- err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq,
- err ? 0 : 20000);
+ for (i = 0; i < chs->num; i++) {
+ int timeout = err ? 0 : MLX5E_RQ_WQES_TIMEOUT;
+
+ err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq, timeout);
+ }
return err ? -ETIMEDOUT : 0;
}
@@ -2636,7 +2564,7 @@ static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
- (params->lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+ (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
}
@@ -2746,22 +2674,6 @@ free_in:
return err;
}
-static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
- enum mlx5e_traffic_types tt,
- u32 *tirc)
-{
- MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
-
- mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
-
- MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
- MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
- MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1);
-
- mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
- &tirc_default_config[tt], tirc, true);
-}
-
static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, u16 mtu)
{
@@ -2811,6 +2723,21 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
return 0;
}
+void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv)
+{
+ struct mlx5e_params *params = &priv->channels.params;
+ struct net_device *netdev = priv->netdev;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u16 max_mtu;
+
+ /* MTU range: 68 - hw-specific max */
+ netdev->min_mtu = ETH_MIN_MTU;
+
+ mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
+ netdev->max_mtu = min_t(unsigned int, MLX5E_HW2SW_MTU(params, max_mtu),
+ ETH_MAX_MTU);
+}
+
static void mlx5e_netdev_set_tcs(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3058,8 +2985,8 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
struct mlx5e_cq *cq,
struct mlx5e_cq_param *param)
{
- param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
- param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev);
+ param->wq.buf_numa_node = dev_to_node(mdev->device);
+ param->wq.db_numa_node = dev_to_node(mdev->device);
return mlx5e_alloc_cq_common(mdev, param, cq);
}
@@ -3167,32 +3094,42 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
}
-static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
- enum mlx5e_traffic_types tt,
- u32 *tirc)
+static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv,
+ u32 rqtn, u32 *tirc)
{
MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+ MLX5_SET(tirc, tirc, indirect_table, rqtn);
+ MLX5_SET(tirc, tirc, tunneled_offload_en,
+ priv->channels.params.tunneled_offload_en);
mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
+}
- MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
- MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
-
+static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
+ enum mlx5e_traffic_types tt,
+ u32 *tirc)
+{
+ mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc);
mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
&tirc_default_config[tt], tirc, false);
}
static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
{
- MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
-
- mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
-
- MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
- MLX5_SET(tirc, tirc, indirect_table, rqtn);
+ mlx5e_build_indir_tir_ctx_common(priv, rqtn, tirc);
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
}
+static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
+ enum mlx5e_traffic_types tt,
+ u32 *tirc)
+{
+ mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc);
+ mlx5e_build_indir_tir_ctx_hash(&priv->rss_params,
+ &tirc_default_config[tt], tirc, true);
+}
+
int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
{
struct mlx5e_tir *tir;
@@ -3775,7 +3712,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
new_channels.params.sw_mtu = new_mtu;
if (params->xdp_prog &&
- !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
+ !mlx5e_rx_is_linear_skb(&new_channels.params)) {
netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n",
new_mtu, mlx5e_xdp_max_mtu(params));
err = -EINVAL;
@@ -4115,6 +4052,12 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
/* Verify if UDP port is being offloaded by HW */
if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
return features;
+
+#if IS_ENABLED(CONFIG_GENEVE)
+ /* Support Geneve offload for default UDP port */
+ if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev))
+ return features;
+#endif
}
out:
@@ -4210,7 +4153,7 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
new_channels.params = priv->channels.params;
new_channels.params.xdp_prog = prog;
- if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) {
+ if (!mlx5e_rx_is_linear_skb(&new_channels.params)) {
netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n",
new_channels.params.sw_mtu,
mlx5e_xdp_max_mtu(&new_channels.params));
@@ -4264,7 +4207,7 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
mlx5e_set_rq_type(priv->mdev, &priv->channels.params);
if (was_opened && reset)
- mlx5e_open_locked(netdev);
+ err = mlx5e_open_locked(netdev);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
goto unlock;
@@ -4554,7 +4497,7 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
if (!slow_pci_heuristic(mdev) &&
mlx5e_striding_rq_possible(mdev, params) &&
(mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ||
- !mlx5e_rx_is_linear_skb(mdev, params)))
+ !mlx5e_rx_is_linear_skb(params)))
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
mlx5e_set_rq_type(mdev, params);
mlx5e_init_rq_type_params(mdev, params);
@@ -4630,6 +4573,8 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
/* RSS */
mlx5e_build_rss_params(rss_params, params->num_channels);
+ params->tunneled_offload_en =
+ mlx5e_tunnel_inner_ft_supported(mdev);
}
static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
@@ -4651,7 +4596,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
bool fcs_supported;
bool fcs_enabled;
- SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
+ SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops;
@@ -4686,7 +4631,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
- if (mlx5_vxlan_allowed(mdev->vxlan) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
+ if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) ||
+ MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
netdev->hw_enc_features |= NETIF_F_IP_CSUM;
netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
@@ -4694,7 +4640,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
}
- if (mlx5_vxlan_allowed(mdev->vxlan)) {
+ if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
@@ -4913,7 +4859,6 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
- u16 max_mtu;
mlx5e_init_l2_addr(priv);
@@ -4921,10 +4866,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
if (!netif_running(netdev))
mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
- /* MTU range: 68 - hw-specific max */
- netdev->min_mtu = ETH_MIN_MTU;
- mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
- netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
+ mlx5e_set_netdev_mtu_boundaries(priv);
mlx5e_set_dev_port_mtu(priv);
mlx5_lag_add(mdev, netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index a66b6ed80b30..91e24f1cead8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -795,7 +795,8 @@ static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
- if (!mlx5e_tc_tun_device_to_offload(priv, netdev))
+ if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
+ !is_vlan_dev(netdev))
return NOTIFY_OK;
switch (event) {
@@ -1374,6 +1375,7 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
params->num_tc = 1;
+ params->tunneled_offload_en = false;
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
@@ -1389,7 +1391,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
struct mlx5_core_dev *mdev = priv->mdev;
if (rep->vport == MLX5_VPORT_UPLINK) {
- SET_NETDEV_DEV(netdev, &priv->mdev->pdev->dev);
+ SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
/* we want a persistent mac for the uplink rep */
mlx5_query_nic_vport_mac_address(mdev, 0, netdev->dev_addr);
@@ -1623,13 +1625,7 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
static void mlx5e_vf_rep_enable(struct mlx5e_priv *priv)
{
- struct net_device *netdev = priv->netdev;
- struct mlx5_core_dev *mdev = priv->mdev;
- u16 max_mtu;
-
- netdev->min_mtu = ETH_MIN_MTU;
- mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
- netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
+ mlx5e_set_netdev_mtu_boundaries(priv);
}
static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index c3b3002ff62f..13133e7f088e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -409,14 +409,15 @@ mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle
mlx5e_page_release(rq, &dma_info[i], recycle);
}
-static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
+static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
{
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
- struct mlx5e_rx_wqe_ll *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
- rq->mpwqe.umr_in_progress = false;
+ do {
+ u16 next_wqe_index = mlx5_wq_ll_get_wqe_next_ix(wq, wq->head);
- mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
+ mlx5_wq_ll_push(wq, next_wqe_index);
+ } while (--n);
/* ensure wqes are visible to device before updating doorbell record */
dma_wmb();
@@ -426,7 +427,7 @@ static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq)
{
- return sq->pc >> MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+ return mlx5_wq_cyc_get_ctr_wrap_cnt(&sq->wq, sq->pc);
}
static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
@@ -478,8 +479,6 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
bitmap_zero(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE);
wi->consumed_strides = 0;
- rq->mpwqe.umr_in_progress = true;
-
umr_wqe->ctrl.opmod_idx_opcode =
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR);
@@ -487,7 +486,8 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
sq->pc += MLX5E_UMR_WQEBBS;
- mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &umr_wqe->ctrl);
+
+ sq->doorbell_cseg = &umr_wqe->ctrl;
return 0;
@@ -542,37 +542,13 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
return !!err;
}
-static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq,
- struct mlx5e_icosq *sq,
- struct mlx5e_rq *rq,
- struct mlx5_cqe64 *cqe)
-{
- struct mlx5_wq_cyc *wq = &sq->wq;
- u16 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
- struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci];
-
- mlx5_cqwq_pop(&cq->wq);
-
- if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
- netdev_WARN_ONCE(cq->channel->netdev,
- "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe));
- return;
- }
-
- if (likely(icowi->opcode == MLX5_OPCODE_UMR)) {
- mlx5e_post_rx_mpwqe(rq);
- return;
- }
-
- if (unlikely(icowi->opcode != MLX5_OPCODE_NOP))
- netdev_WARN_ONCE(cq->channel->netdev,
- "Bad OPCODE in ICOSQ WQE info: 0x%x\n", icowi->opcode);
-}
-
static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
{
struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
struct mlx5_cqe64 *cqe;
+ u8 completed_umr = 0;
+ u16 sqcc;
+ int i;
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
return;
@@ -581,28 +557,96 @@ static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
if (likely(!cqe))
return;
- /* by design, there's only a single cqe */
- mlx5e_poll_ico_single_cqe(cq, sq, rq, cqe);
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ sqcc = sq->cc;
+
+ i = 0;
+ do {
+ u16 wqe_counter;
+ bool last_wqe;
+
+ mlx5_cqwq_pop(&cq->wq);
+
+ wqe_counter = be16_to_cpu(cqe->wqe_counter);
+
+ if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
+ netdev_WARN_ONCE(cq->channel->netdev,
+ "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe));
+ break;
+ }
+ do {
+ struct mlx5e_sq_wqe_info *wi;
+ u16 ci;
+
+ last_wqe = (sqcc == wqe_counter);
+
+ ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
+ wi = &sq->db.ico_wqe[ci];
+
+ if (likely(wi->opcode == MLX5_OPCODE_UMR)) {
+ sqcc += MLX5E_UMR_WQEBBS;
+ completed_umr++;
+ } else if (likely(wi->opcode == MLX5_OPCODE_NOP)) {
+ sqcc++;
+ } else {
+ netdev_WARN_ONCE(cq->channel->netdev,
+ "Bad OPCODE in ICOSQ WQE info: 0x%x\n",
+ wi->opcode);
+ }
+
+ } while (!last_wqe);
+
+ } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
+
+ sq->cc = sqcc;
mlx5_cqwq_update_db_record(&cq->wq);
+
+ if (likely(completed_umr)) {
+ mlx5e_post_rx_mpwqe(rq, completed_umr);
+ rq->mpwqe.umr_in_progress -= completed_umr;
+ }
}
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
{
+ struct mlx5e_icosq *sq = &rq->channel->icosq;
struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
+ u8 missing, i;
+ u16 head;
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return false;
- mlx5e_poll_ico_cq(&rq->channel->icosq.cq, rq);
+ mlx5e_poll_ico_cq(&sq->cq, rq);
+
+ missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress;
- if (mlx5_wq_ll_is_full(wq))
+ if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk))
+ rq->stats->congst_umr++;
+
+#define UMR_WQE_BULK (2)
+ if (likely(missing < UMR_WQE_BULK))
return false;
- if (!rq->mpwqe.umr_in_progress)
- mlx5e_alloc_rx_mpwqe(rq, wq->head);
- else
- rq->stats->congst_umr += mlx5_wq_ll_missing(wq) > 2;
+ head = rq->mpwqe.actual_wq_head;
+ i = missing;
+ do {
+ if (unlikely(mlx5e_alloc_rx_mpwqe(rq, head)))
+ break;
+ head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
+ } while (--i);
+
+ rq->mpwqe.umr_last_bulk = missing - i;
+ if (sq->doorbell_cseg) {
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
+ sq->doorbell_cseg = NULL;
+ }
+
+ rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk;
+ rq->mpwqe.actual_wq_head = head;
return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index b75aa8b8bf04..483d321d2151 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -65,6 +65,8 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_mpwqe) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_inlnw) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) },
@@ -79,6 +81,8 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_mpwqe) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_inlnw) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) },
@@ -89,7 +93,6 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
@@ -160,6 +163,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_xdp_drop += rq_stats->xdp_drop;
s->rx_xdp_redirect += rq_stats->xdp_redirect;
s->rx_xdp_tx_xmit += xdpsq_stats->xmit;
+ s->rx_xdp_tx_mpwqe += xdpsq_stats->mpwqe;
+ s->rx_xdp_tx_inlnw += xdpsq_stats->inlnw;
s->rx_xdp_tx_full += xdpsq_stats->full;
s->rx_xdp_tx_err += xdpsq_stats->err;
s->rx_xdp_tx_cqe += xdpsq_stats->cqes;
@@ -170,7 +175,6 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
- s->rx_page_reuse += rq_stats->page_reuse;
s->rx_cache_reuse += rq_stats->cache_reuse;
s->rx_cache_full += rq_stats->cache_full;
s->rx_cache_empty += rq_stats->cache_empty;
@@ -185,6 +189,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->ch_eq_rearm += ch_stats->eq_rearm;
/* xdp redirect */
s->tx_xdp_xmit += xdpsq_red_stats->xmit;
+ s->tx_xdp_mpwqe += xdpsq_red_stats->mpwqe;
+ s->tx_xdp_inlnw += xdpsq_red_stats->inlnw;
s->tx_xdp_full += xdpsq_red_stats->full;
s->tx_xdp_err += xdpsq_red_stats->err;
s->tx_xdp_cqes += xdpsq_red_stats->cqes;
@@ -1212,7 +1218,6 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
@@ -1245,6 +1250,8 @@ static const struct counter_desc sq_stats_desc[] = {
static const struct counter_desc rq_xdpsq_stats_desc[] = {
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
+ { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
{ MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
@@ -1252,6 +1259,8 @@ static const struct counter_desc rq_xdpsq_stats_desc[] = {
static const struct counter_desc xdpsq_stats_desc[] = {
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) },
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, mpwqe) },
+ { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, inlnw) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) },
{ MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 16c3b785f282..cdddcc46971b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -77,6 +77,8 @@ struct mlx5e_sw_stats {
u64 rx_xdp_drop;
u64 rx_xdp_redirect;
u64 rx_xdp_tx_xmit;
+ u64 rx_xdp_tx_mpwqe;
+ u64 rx_xdp_tx_inlnw;
u64 rx_xdp_tx_full;
u64 rx_xdp_tx_err;
u64 rx_xdp_tx_cqe;
@@ -91,6 +93,8 @@ struct mlx5e_sw_stats {
u64 tx_queue_wake;
u64 tx_cqe_err;
u64 tx_xdp_xmit;
+ u64 tx_xdp_mpwqe;
+ u64 tx_xdp_inlnw;
u64 tx_xdp_full;
u64 tx_xdp_err;
u64 tx_xdp_cqes;
@@ -101,7 +105,6 @@ struct mlx5e_sw_stats {
u64 rx_buff_alloc_err;
u64 rx_cqe_compress_blks;
u64 rx_cqe_compress_pkts;
- u64 rx_page_reuse;
u64 rx_cache_reuse;
u64 rx_cache_full;
u64 rx_cache_empty;
@@ -201,7 +204,6 @@ struct mlx5e_rq_stats {
u64 buff_alloc_err;
u64 cqe_compress_blks;
u64 cqe_compress_pkts;
- u64 page_reuse;
u64 cache_reuse;
u64 cache_full;
u64 cache_empty;
@@ -241,6 +243,8 @@ struct mlx5e_sq_stats {
struct mlx5e_xdpsq_stats {
u64 xmit;
+ u64 mpwqe;
+ u64 inlnw;
u64 full;
u64 err;
/* dirtied @completion */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index d75dc44eb2ff..122f457091a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -44,6 +44,7 @@
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_csum.h>
#include <net/arp.h>
+#include <net/ipv6_stubs.h>
#include "en.h"
#include "en_rep.h"
#include "en_tc.h"
@@ -663,7 +664,8 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
}
netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
- hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name,
+ hp->tirn, hp->pair->rqn[0],
+ dev_name(hp->pair->peer_mdev->device),
hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
hpe->hp = hp;
@@ -700,7 +702,7 @@ static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
- hpe->hp->pair->peer_mdev->priv.name);
+ dev_name(hpe->hp->pair->peer_mdev->device));
mlx5e_hairpin_destroy(hpe->hp);
hash_del(&hpe->hairpin_hlist);
@@ -1437,6 +1439,26 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
return 0;
}
+static void *get_match_headers_criteria(u32 flags,
+ struct mlx5_flow_spec *spec)
+{
+ return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
+ MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ inner_headers) :
+ MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers);
+}
+
+static void *get_match_headers_value(u32 flags,
+ struct mlx5_flow_spec *spec)
+{
+ return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
+ MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ inner_headers) :
+ MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers);
+}
+
static int __parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f,
@@ -1502,10 +1524,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
/* In decap flow, header pointers should point to the inner
* headers, outer header were already set by parse_tunnel_attr
*/
- headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- inner_headers);
- headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
- inner_headers);
+ headers_c = get_match_headers_criteria(MLX5_FLOW_CONTEXT_ACTION_DECAP,
+ spec);
+ headers_v = get_match_headers_value(MLX5_FLOW_CONTEXT_ACTION_DECAP,
+ spec);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
@@ -1520,11 +1542,23 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
if (match.mask->n_proto)
*match_level = MLX5_MATCH_L2;
}
-
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
+ is_vlan_dev(filter_dev)) {
+ struct flow_dissector_key_vlan filter_dev_mask;
+ struct flow_dissector_key_vlan filter_dev_key;
struct flow_match_vlan match;
- flow_rule_match_vlan(rule, &match);
+ if (is_vlan_dev(filter_dev)) {
+ match.key = &filter_dev_key;
+ match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
+ match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
+ match.key->vlan_priority = 0;
+ match.mask = &filter_dev_mask;
+ memset(match.mask, 0xff, sizeof(*match.mask));
+ match.mask->vlan_priority = 0;
+ } else {
+ flow_rule_match_vlan(rule, &match);
+ }
if (match.mask->vlan_id ||
match.mask->vlan_priority ||
match.mask->vlan_tpid) {
@@ -1827,6 +1861,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
struct pedit_headers {
struct ethhdr eth;
+ struct vlan_hdr vlan;
struct iphdr ip4;
struct ipv6hdr ip6;
struct tcphdr tcp;
@@ -1873,38 +1908,73 @@ struct mlx5_fields {
u8 field;
u8 size;
u32 offset;
+ u32 match_offset;
};
-#define OFFLOAD(fw_field, size, field, off) \
- {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
+#define OFFLOAD(fw_field, size, field, off, match_field) \
+ {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, \
+ offsetof(struct pedit_headers, field) + (off), \
+ MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
+
+static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
+ void *matchmaskp, int size)
+{
+ bool same = false;
+
+ switch (size) {
+ case sizeof(u8):
+ same = ((*(u8 *)valp) & (*(u8 *)maskp)) ==
+ ((*(u8 *)matchvalp) & (*(u8 *)matchmaskp));
+ break;
+ case sizeof(u16):
+ same = ((*(u16 *)valp) & (*(u16 *)maskp)) ==
+ ((*(u16 *)matchvalp) & (*(u16 *)matchmaskp));
+ break;
+ case sizeof(u32):
+ same = ((*(u32 *)valp) & (*(u32 *)maskp)) ==
+ ((*(u32 *)matchvalp) & (*(u32 *)matchmaskp));
+ break;
+ }
+
+ return same;
+}
static struct mlx5_fields fields[] = {
- OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
- OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0),
- OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
- OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0),
- OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0),
-
- OFFLOAD(IP_TTL, 1, ip4.ttl, 0),
- OFFLOAD(SIPV4, 4, ip4.saddr, 0),
- OFFLOAD(DIPV4, 4, ip4.daddr, 0),
-
- OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0),
- OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0),
- OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0),
- OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0),
- OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0),
- OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0),
- OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0),
- OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0),
- OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0),
-
- OFFLOAD(TCP_SPORT, 2, tcp.source, 0),
- OFFLOAD(TCP_DPORT, 2, tcp.dest, 0),
- OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5),
-
- OFFLOAD(UDP_SPORT, 2, udp.source, 0),
- OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
+ OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0, dmac_47_16),
+ OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0, dmac_15_0),
+ OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0, smac_47_16),
+ OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0, smac_15_0),
+ OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0, ethertype),
+ OFFLOAD(FIRST_VID, 2, vlan.h_vlan_TCI, 0, first_vid),
+
+ OFFLOAD(IP_TTL, 1, ip4.ttl, 0, ttl_hoplimit),
+ OFFLOAD(SIPV4, 4, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ OFFLOAD(DIPV4, 4, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+
+ OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
+ OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
+ OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
+ OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
+ OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
+ OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
+ OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
+ OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
+ OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0, ttl_hoplimit),
+
+ OFFLOAD(TCP_SPORT, 2, tcp.source, 0, tcp_sport),
+ OFFLOAD(TCP_DPORT, 2, tcp.dest, 0, tcp_dport),
+ OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5, tcp_flags),
+
+ OFFLOAD(UDP_SPORT, 2, udp.source, 0, udp_sport),
+ OFFLOAD(UDP_DPORT, 2, udp.dest, 0, udp_dport),
};
/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
@@ -1913,9 +1983,14 @@ static struct mlx5_fields fields[] = {
*/
static int offload_pedit_fields(struct pedit_headers_action *hdrs,
struct mlx5e_tc_flow_parse_attr *parse_attr,
+ u32 *action_flags,
struct netlink_ext_ack *extack)
{
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
+ void *headers_c = get_match_headers_criteria(*action_flags,
+ &parse_attr->spec);
+ void *headers_v = get_match_headers_value(*action_flags,
+ &parse_attr->spec);
int i, action_size, nactions, max_actions, first, last, next_z;
void *s_masks_p, *a_masks_p, *vals_p;
struct mlx5_fields *f;
@@ -1939,6 +2014,8 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
nactions = parse_attr->num_mod_hdr_actions;
for (i = 0; i < ARRAY_SIZE(fields); i++) {
+ bool skip;
+
f = &fields[i];
/* avoid seeing bits set from previous iterations */
s_mask = 0;
@@ -1967,19 +2044,34 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
return -EOPNOTSUPP;
}
+ skip = false;
if (s_mask) {
+ void *match_mask = headers_c + f->match_offset;
+ void *match_val = headers_v + f->match_offset;
+
cmd = MLX5_ACTION_TYPE_SET;
mask = s_mask;
vals_p = (void *)set_vals + f->offset;
+ /* don't rewrite if we have a match on the same value */
+ if (cmp_val_mask(vals_p, s_masks_p, match_val,
+ match_mask, f->size))
+ skip = true;
/* clear to denote we consumed this field */
memset(s_masks_p, 0, f->size);
} else {
+ u32 zero = 0;
+
cmd = MLX5_ACTION_TYPE_ADD;
mask = a_mask;
vals_p = (void *)add_vals + f->offset;
+ /* add 0 is no change */
+ if (!memcmp(vals_p, &zero, f->size))
+ skip = true;
/* clear to denote we consumed this field */
memset(a_masks_p, 0, f->size);
}
+ if (skip)
+ continue;
field_bsize = f->size * BITS_PER_BYTE;
@@ -2026,6 +2118,15 @@ static int offload_pedit_fields(struct pedit_headers_action *hdrs,
return 0;
}
+static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
+ int namespace)
+{
+ if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
+ return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
+ else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
+ return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
+}
+
static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
struct pedit_headers_action *hdrs,
int namespace,
@@ -2037,11 +2138,7 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits;
action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
- if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
- max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
- else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
- max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
-
+ max_actions = mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace);
/* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
max_actions = min(max_actions, nkeys * 16);
@@ -2074,6 +2171,12 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
goto out_err;
}
+ if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "The pedit offload action is not supported");
+ goto out_err;
+ }
+
mask = act->mangle.mask;
val = act->mangle.val;
offset = act->mangle.offset;
@@ -2092,6 +2195,7 @@ out_err:
static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct pedit_headers_action *hdrs,
+ u32 *action_flags,
struct netlink_ext_ack *extack)
{
struct pedit_headers *cmd_masks;
@@ -2104,7 +2208,7 @@ static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
goto out_err;
}
- err = offload_pedit_fields(hdrs, parse_attr, extack);
+ err = offload_pedit_fields(hdrs, parse_attr, action_flags, extack);
if (err < 0)
goto out_dealloc_parsed_actions;
@@ -2216,11 +2320,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
u8 ip_proto;
int i;
- if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
- headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
- else
- headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
-
+ headers_v = get_match_headers_value(actions, spec);
ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
/* for non-IP we only re-write MACs, so we're okay */
@@ -2266,7 +2366,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
actions = flow->nic_attr->action;
if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
- !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
+ !((actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) ||
+ (actions & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)))
return false;
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
@@ -2291,6 +2392,74 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
return (fsystem_guid == psystem_guid);
}
+static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
+ const struct flow_action_entry *act,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ u32 *action, struct netlink_ext_ack *extack)
+{
+ u16 mask16 = VLAN_VID_MASK;
+ u16 val16 = act->vlan.vid & VLAN_VID_MASK;
+ const struct flow_action_entry pedit_act = {
+ .id = FLOW_ACTION_MANGLE,
+ .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
+ .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
+ .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
+ .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
+ };
+ u8 match_prio_mask, match_prio_val;
+ void *headers_c, *headers_v;
+ int err;
+
+ headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
+ headers_v = get_match_headers_value(*action, &parse_attr->spec);
+
+ if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
+ MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VLAN rewrite action must have VLAN protocol match");
+ return -EOPNOTSUPP;
+ }
+
+ match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
+ match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
+ if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Changing VLAN prio is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr,
+ hdrs, NULL);
+ *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ return err;
+}
+
+static int
+add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ u32 *action, struct netlink_ext_ack *extack)
+{
+ const struct flow_action_entry prio_tag_act = {
+ .vlan.vid = 0,
+ .vlan.prio =
+ MLX5_GET(fte_match_set_lyr_2_4,
+ get_match_headers_value(*action,
+ &parse_attr->spec),
+ first_prio) &
+ MLX5_GET(fte_match_set_lyr_2_4,
+ get_match_headers_criteria(*action,
+ &parse_attr->spec),
+ first_prio),
+ };
+
+ return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
+ &prio_tag_act, parse_attr, hdrs, action,
+ extack);
+}
+
static int parse_tc_nic_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -2326,6 +2495,15 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
break;
+ case FLOW_ACTION_VLAN_MANGLE:
+ err = add_vlan_rewrite_action(priv,
+ MLX5_FLOW_NAMESPACE_KERNEL,
+ act, parse_attr, hdrs,
+ &action, extack);
+ if (err)
+ return err;
+
+ break;
case FLOW_ACTION_CSUM:
if (csum_offload_supported(priv, action,
act->csum_flags,
@@ -2365,16 +2543,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
}
break;
default:
- return -EINVAL;
+ NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
+ return -EOPNOTSUPP;
}
}
if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
- parse_attr, hdrs, extack);
+ parse_attr, hdrs, &action, extack);
if (err)
return err;
+ /* in case all pedit actions are skipped, remove the MOD_HDR
+ * flag.
+ */
+ if (parse_attr->num_mod_hdr_actions == 0)
+ action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
}
attr->action = action;
@@ -2544,8 +2728,7 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
}
break;
default:
- /* action is FLOW_ACT_VLAN_MANGLE */
- return -EOPNOTSUPP;
+ return -EINVAL;
}
attr->total_vlan = vlan_idx + 1;
@@ -2553,15 +2736,60 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
return 0;
}
+static int add_vlan_push_action(struct mlx5e_priv *priv,
+ struct mlx5_esw_flow_attr *attr,
+ struct net_device **out_dev,
+ u32 *action)
+{
+ struct net_device *vlan_dev = *out_dev;
+ struct flow_action_entry vlan_act = {
+ .id = FLOW_ACTION_VLAN_PUSH,
+ .vlan.vid = vlan_dev_vlan_id(vlan_dev),
+ .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
+ .vlan.prio = 0,
+ };
+ int err;
+
+ err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
+ if (err)
+ return err;
+
+ *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
+ dev_get_iflink(vlan_dev));
+ if (is_vlan_dev(*out_dev))
+ err = add_vlan_push_action(priv, attr, out_dev, action);
+
+ return err;
+}
+
+static int add_vlan_pop_action(struct mlx5e_priv *priv,
+ struct mlx5_esw_flow_attr *attr,
+ u32 *action)
+{
+ int nest_level = vlan_get_encap_level(attr->parse_attr->filter_dev);
+ struct flow_action_entry vlan_act = {
+ .id = FLOW_ACTION_VLAN_POP,
+ };
+ int err = 0;
+
+ while (nest_level--) {
+ err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
struct pedit_headers_action hdrs[2] = {};
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
const struct ip_tunnel_info *info = NULL;
const struct flow_action_entry *act;
@@ -2633,6 +2861,20 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
uplink_upper == out_dev)
out_dev = uplink_dev;
+ if (is_vlan_dev(out_dev)) {
+ err = add_vlan_push_action(priv, attr,
+ &out_dev,
+ &action);
+ if (err)
+ return err;
+ }
+ if (is_vlan_dev(parse_attr->filter_dev)) {
+ err = add_vlan_pop_action(priv, attr,
+ &action);
+ if (err)
+ return err;
+ }
+
if (!mlx5e_eswitch_rep(out_dev))
return -EOPNOTSUPP;
@@ -2646,7 +2888,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
out_dev->ifindex;
parse_attr->tun_info[attr->out_count] = *info;
encap = false;
- attr->parse_attr = parse_attr;
attr->dests[attr->out_count].flags |=
MLX5_ESW_DEST_ENCAP;
attr->out_count++;
@@ -2679,7 +2920,27 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
break;
case FLOW_ACTION_VLAN_PUSH:
case FLOW_ACTION_VLAN_POP:
- err = parse_tc_vlan_action(priv, act, attr, &action);
+ if (act->id == FLOW_ACTION_VLAN_PUSH &&
+ (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
+ /* Replace vlan pop+push with vlan modify */
+ action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ err = add_vlan_rewrite_action(priv,
+ MLX5_FLOW_NAMESPACE_FDB,
+ act, parse_attr, hdrs,
+ &action, extack);
+ } else {
+ err = parse_tc_vlan_action(priv, act, attr, &action);
+ }
+ if (err)
+ return err;
+
+ attr->split_count = attr->out_count;
+ break;
+ case FLOW_ACTION_VLAN_MANGLE:
+ err = add_vlan_rewrite_action(priv,
+ MLX5_FLOW_NAMESPACE_FDB,
+ act, parse_attr, hdrs,
+ &action, extack);
if (err)
return err;
@@ -2705,16 +2966,39 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
break;
}
default:
- return -EINVAL;
+ NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
+ return -EOPNOTSUPP;
}
}
+ if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
+ action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
+ /* For prio tag mode, replace vlan pop with rewrite vlan prio
+ * tag rewrite.
+ */
+ action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
+ &action, extack);
+ if (err)
+ return err;
+ }
+
if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
- parse_attr, hdrs, extack);
+ parse_attr, hdrs, &action, extack);
if (err)
return err;
+ /* in case all pedit actions are skipped, remove the MOD_HDR
+ * flag. we might have set split_count either by pedit or
+ * pop/push. if there is no pop/push either, reset it too.
+ */
+ if (parse_attr->num_mod_hdr_actions == 0) {
+ action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
+ (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
+ attr->split_count = 0;
+ }
}
attr->action = action;
@@ -2883,7 +3167,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
- err = parse_tc_fdb_actions(priv, &rule->action, parse_attr, flow, extack);
+ err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
if (err)
goto err_free;
@@ -3080,6 +3364,7 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
netdev_warn_once(priv->netdev,
"flow cookie %lx already exists, ignoring\n",
f->cookie);
+ err = -EEXIST;
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 25a8f8260c14..7b61126fcec9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -32,6 +32,7 @@
#include <linux/tcp.h>
#include <linux/if_vlan.h>
+#include <net/geneve.h>
#include <net/dsfield.h>
#include "en.h"
#include "ipoib/ipoib.h"
@@ -110,11 +111,10 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
#endif
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
+ int channel_ix = netdev_pick_tx(dev, skb, NULL);
struct mlx5e_priv *priv = netdev_priv(dev);
- int channel_ix = fallback(dev, skb, NULL);
u16 num_channels;
int up = 0;
@@ -163,7 +163,7 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
case MLX5_INLINE_MODE_NONE:
return 0;
case MLX5_INLINE_MODE_TCP_UDP:
- hlen = eth_get_headlen(skb->data, skb_headlen(skb));
+ hlen = eth_get_headlen(skb->dev, skb->data, skb_headlen(skb));
if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
hlen += VLAN_HLEN;
break;
@@ -297,7 +297,8 @@ static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
static inline void
mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma,
- struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg)
+ struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg,
+ bool xmit_more)
{
struct mlx5_wq_cyc *wq = &sq->wq;
@@ -320,14 +321,14 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
sq->stats->stopped++;
}
- if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
+ if (!xmit_more || netif_xmit_stopped(sq->txq))
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
}
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe, u16 pi)
+ struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
{
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5_wqe_ctrl_seg *cseg;
@@ -360,7 +361,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
stats->bytes += num_bytes;
- stats->xmit_more += skb->xmit_more;
+ stats->xmit_more += netdev_xmit_more();
headlen = skb->len - ihs - skb->data_len;
ds_cnt += !!headlen;
@@ -392,6 +393,10 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg = &wqe->eth;
dseg = wqe->data;
+#if IS_ENABLED(CONFIG_GENEVE)
+ if (skb->encapsulation)
+ mlx5e_tx_tunnel_accel(skb, eseg);
+#endif
mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
eseg->mss = mss;
@@ -419,7 +424,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
goto err_drop;
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
- num_dma, wi, cseg);
+ num_dma, wi, cseg, xmit_more);
return NETDEV_TX_OK;
@@ -445,7 +450,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!skb))
return NETDEV_TX_OK;
- return mlx5e_sq_xmit(sq, skb, wqe, pi);
+ return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
}
static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
@@ -655,7 +660,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
stats->bytes += num_bytes;
- stats->xmit_more += skb->xmit_more;
+ stats->xmit_more += netdev_xmit_more();
headlen = skb->len - ihs - skb->data_len;
ds_cnt += !!headlen;
@@ -700,7 +705,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
goto err_drop;
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
- num_dma, wi, cseg);
+ num_dma, wi, cseg, false);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index b4af5e19f6ac..f9862bf75491 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -71,6 +71,17 @@ static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
net_dim(&rq->dim, dim_sample);
}
+void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5e_tx_wqe *nopwqe;
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+
+ sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
+ nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+ mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
+}
+
int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index bb6e5b5d9681..5aac97847721 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -504,8 +504,7 @@ static u64 gather_async_events_mask(struct mlx5_core_dev *dev)
if (MLX5_VPORT_MANAGER(dev))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
- if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
- MLX5_CAP_GEN(dev, general_notification_event))
+ if (MLX5_CAP_GEN(dev, general_notification_event))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_GENERAL_EVENT);
if (MLX5_CAP_GEN(dev, port_module_event))
@@ -707,7 +706,7 @@ void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm)
__raw_writel((__force u32)cpu_to_be32(val), addr);
/* We still want ordering, just not swabbing, so add a barrier */
- mb();
+ wmb();
}
EXPORT_SYMBOL(mlx5_eq_update_ci);
@@ -900,14 +899,12 @@ mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
}
EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
+#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
{
-#ifdef CONFIG_RFS_ACCEL
return dev->priv.eq_table->rmap;
-#else
- return NULL;
-#endif
}
+#endif
struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 8a67fd197b79..9ea0ccfe5ef5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -72,25 +72,22 @@ static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
MC_ADDR_CHANGE | \
PROMISC_CHANGE)
-/* The vport getter/iterator are only valid after esw->total_vports
- * and vport->vport are initialized in mlx5_eswitch_init.
- */
-#define mlx5_esw_for_all_vports(esw, i, vport) \
- for ((i) = MLX5_VPORT_PF; \
- (vport) = &(esw)->vports[i], \
- (i) < (esw)->total_vports; (i)++)
+struct mlx5_vport *__must_check
+mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ u16 idx;
-#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
- for ((i) = MLX5_VPORT_FIRST_VF; \
- (vport) = &(esw)->vports[i], \
- (i) <= (nvfs); (i)++)
+ if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
+ return ERR_PTR(-EPERM);
-static struct mlx5_vport *mlx5_eswitch_get_vport(struct mlx5_eswitch *esw,
- u16 vport_num)
-{
- u16 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
+ idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
+
+ if (idx > esw->total_vports - 1) {
+ esw_debug(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n",
+ vport_num, idx);
+ return ERR_PTR(-EINVAL);
+ }
- WARN_ON(idx > esw->total_vports - 1);
return &esw->vports[idx];
}
@@ -644,9 +641,8 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
/* Apply vport UC/MC list to HW l2 table and FDB table */
static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
- u16 vport_num, int list_type)
+ struct mlx5_vport *vport, int list_type)
{
- struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
vport_addr_action vport_addr_add;
vport_addr_action vport_addr_del;
@@ -679,9 +675,8 @@ static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
/* Sync vport UC/MC list from vport context */
static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
- u16 vport_num, int list_type)
+ struct mlx5_vport *vport, int list_type)
{
- struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
u8 (*mac_list)[ETH_ALEN];
struct l2addr_node *node;
@@ -710,12 +705,12 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
if (!vport->enabled)
goto out;
- err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
+ err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type,
mac_list, &size);
if (err)
goto out;
esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
- vport_num, is_uc ? "UC" : "MC", size);
+ vport->vport, is_uc ? "UC" : "MC", size);
for (i = 0; i < size; i++) {
if (is_uc && !is_valid_ether_addr(mac_list[i]))
@@ -753,10 +748,10 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
if (!addr) {
esw_warn(esw->dev,
"Failed to add MAC(%pM) to vport[%d] DB\n",
- mac_list[i], vport_num);
+ mac_list[i], vport->vport);
continue;
}
- addr->vport = vport_num;
+ addr->vport = vport->vport;
addr->action = MLX5_ACTION_ADD;
}
out:
@@ -766,9 +761,9 @@ out:
/* Sync vport UC/MC list from vport context
* Must be called after esw_update_vport_addr_list
*/
-static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u16 vport_num)
+static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
struct l2addr_node *node;
struct vport_addr *addr;
struct hlist_head *hash;
@@ -791,20 +786,20 @@ static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u16 vport_num)
if (!addr) {
esw_warn(esw->dev,
"Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
- mac, vport_num);
+ mac, vport->vport);
continue;
}
- addr->vport = vport_num;
+ addr->vport = vport->vport;
addr->action = MLX5_ACTION_ADD;
addr->mc_promisc = true;
}
}
/* Apply vport rx mode to HW FDB table */
-static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num,
+static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
bool promisc, bool mc_promisc)
{
- struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
@@ -812,7 +807,7 @@ static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num,
if (mc_promisc) {
vport->allmulti_rule =
- esw_fdb_set_vport_allmulti_rule(esw, vport_num);
+ esw_fdb_set_vport_allmulti_rule(esw, vport->vport);
if (!allmulti_addr->uplink_rule)
allmulti_addr->uplink_rule =
esw_fdb_set_vport_allmulti_rule(esw,
@@ -835,8 +830,8 @@ promisc:
return;
if (promisc) {
- vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
- vport_num);
+ vport->promisc_rule =
+ esw_fdb_set_vport_promisc_rule(esw, vport->vport);
} else if (vport->promisc_rule) {
mlx5_del_flow_rules(vport->promisc_rule);
vport->promisc_rule = NULL;
@@ -844,23 +839,23 @@ promisc:
}
/* Sync vport rx mode from vport context */
-static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num)
+static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
int promisc_all = 0;
int promisc_uc = 0;
int promisc_mc = 0;
int err;
err = mlx5_query_nic_vport_promisc(esw->dev,
- vport_num,
+ vport->vport,
&promisc_uc,
&promisc_mc,
&promisc_all);
if (err)
return;
esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
- vport_num, promisc_all, promisc_mc);
+ vport->vport, promisc_all, promisc_mc);
if (!vport->info.trusted || !vport->enabled) {
promisc_uc = 0;
@@ -868,7 +863,7 @@ static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u16 vport_num)
promisc_all = 0;
}
- esw_apply_vport_rx_mode(esw, vport_num, promisc_all,
+ esw_apply_vport_rx_mode(esw, vport, promisc_all,
(promisc_all || promisc_mc));
}
@@ -883,27 +878,21 @@ static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
vport->vport, mac);
if (vport->enabled_events & UC_ADDR_CHANGE) {
- esw_update_vport_addr_list(esw, vport->vport,
- MLX5_NVPRT_LIST_TYPE_UC);
- esw_apply_vport_addr_list(esw, vport->vport,
- MLX5_NVPRT_LIST_TYPE_UC);
+ esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
+ esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
}
- if (vport->enabled_events & MC_ADDR_CHANGE) {
- esw_update_vport_addr_list(esw, vport->vport,
- MLX5_NVPRT_LIST_TYPE_MC);
- }
+ if (vport->enabled_events & MC_ADDR_CHANGE)
+ esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
if (vport->enabled_events & PROMISC_CHANGE) {
- esw_update_vport_rx_mode(esw, vport->vport);
+ esw_update_vport_rx_mode(esw, vport);
if (!IS_ERR_OR_NULL(vport->allmulti_rule))
- esw_update_vport_mc_promisc(esw, vport->vport);
+ esw_update_vport_mc_promisc(esw, vport);
}
- if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) {
- esw_apply_vport_addr_list(esw, vport->vport,
- MLX5_NVPRT_LIST_TYPE_MC);
- }
+ if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE))
+ esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
if (vport->enabled)
@@ -922,8 +911,8 @@ static void esw_vport_change_handler(struct work_struct *work)
mutex_unlock(&esw->state_lock);
}
-static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *vlan_grp = NULL;
@@ -1006,8 +995,8 @@ out:
return err;
}
-static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
mlx5_del_flow_rules(vport->egress.allowed_vlan);
@@ -1019,8 +1008,8 @@ static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
vport->egress.drop_rule = NULL;
}
-static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
if (IS_ERR_OR_NULL(vport->egress.acl))
return;
@@ -1036,8 +1025,8 @@ static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
vport->egress.acl = NULL;
}
-static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_core_dev *dev = esw->dev;
@@ -1168,8 +1157,8 @@ out:
return err;
}
-static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
mlx5_del_flow_rules(vport->ingress.drop_rule);
@@ -1181,8 +1170,8 @@ static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
vport->ingress.allow_rule = NULL;
}
-static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
if (IS_ERR_OR_NULL(vport->ingress.acl))
return;
@@ -1420,10 +1409,10 @@ static void esw_destroy_tsar(struct mlx5_eswitch *esw)
esw->qos.enabled = false;
}
-static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
+static int esw_vport_enable_qos(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
u32 initial_max_rate, u32 initial_bw_share)
{
- struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
struct mlx5_core_dev *dev = esw->dev;
void *vport_elem;
@@ -1440,7 +1429,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
element_attributes);
- MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
+ MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
esw->qos.root_tsar_id);
MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
@@ -1453,7 +1442,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
&vport->qos.esw_tsar_ix);
if (err) {
esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
- vport_num, err);
+ vport->vport, err);
return err;
}
@@ -1461,10 +1450,10 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
return 0;
}
-static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num)
+static void esw_vport_disable_qos(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
- int err = 0;
+ int err;
if (!vport->qos.enabled)
return;
@@ -1474,15 +1463,15 @@ static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num)
vport->qos.esw_tsar_ix);
if (err)
esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
- vport_num, err);
+ vport->vport, err);
vport->qos.enabled = false;
}
-static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
+static int esw_vport_qos_config(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
u32 max_rate, u32 bw_share)
{
- struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
struct mlx5_core_dev *dev = esw->dev;
void *vport_elem;
@@ -1499,7 +1488,7 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
element_attributes);
- MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
+ MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
esw->qos.root_tsar_id);
MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
@@ -1515,7 +1504,7 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
bitmask);
if (err) {
esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
- vport_num, err);
+ vport->vport, err);
return err;
}
@@ -1618,7 +1607,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
esw_apply_vport_conf(esw, vport);
/* Attach vport to the eswitch rate limiter */
- if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate,
+ if (esw_vport_enable_qos(esw, vport, vport->info.max_rate,
vport->qos.bw_share))
esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
@@ -1663,7 +1652,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw,
*/
esw_vport_change_handle_locked(vport);
vport->enabled_events = 0;
- esw_vport_disable_qos(esw, vport_num);
+ esw_vport_disable_qos(esw, vport);
if (esw->manager_vport != vport_num &&
esw->mode == SRIOV_LEGACY) {
mlx5_modify_vport_admin_state(esw->dev,
@@ -1688,6 +1677,9 @@ static int eswitch_vport_event(struct notifier_block *nb,
vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return NOTIFY_OK;
+
if (vport->enabled)
queue_work(esw->work_queue, &vport->vport_change_handler);
@@ -1922,22 +1914,19 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
}
/* Vport Administration */
-#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
-
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int vport, u8 mac[ETH_ALEN])
{
- struct mlx5_vport *evport;
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
u64 node_guid;
int err = 0;
- if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
- return -EPERM;
- if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
+ if (IS_ERR(evport))
+ return PTR_ERR(evport);
+ if (is_multicast_ether_addr(mac))
return -EINVAL;
mutex_lock(&esw->state_lock);
- evport = &esw->vports[vport];
if (evport->info.spoofchk && !is_valid_ether_addr(mac))
mlx5_core_warn(esw->dev,
@@ -1972,16 +1961,15 @@ unlock:
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
int vport, int link_state)
{
- struct mlx5_vport *evport;
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
int err = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
- if (!LEGAL_VPORT(esw, vport))
- return -EINVAL;
+ if (IS_ERR(evport))
+ return PTR_ERR(evport);
mutex_lock(&esw->state_lock);
- evport = &esw->vports[vport];
err = mlx5_modify_vport_admin_state(esw->dev,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
@@ -2003,14 +1991,10 @@ unlock:
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int vport, struct ifla_vf_info *ivi)
{
- struct mlx5_vport *evport;
-
- if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
- return -EPERM;
- if (!LEGAL_VPORT(esw, vport))
- return -EINVAL;
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
- evport = &esw->vports[vport];
+ if (IS_ERR(evport))
+ return PTR_ERR(evport);
memset(ivi, 0, sizeof(*ivi));
ivi->vf = vport - 1;
@@ -2032,16 +2016,17 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int vport, u16 vlan, u8 qos, u8 set_flags)
{
- struct mlx5_vport *evport;
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
int err = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
- if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
+ if (IS_ERR(evport))
+ return PTR_ERR(evport);
+ if (vlan > 4095 || qos > 7)
return -EINVAL;
mutex_lock(&esw->state_lock);
- evport = &esw->vports[vport];
err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
if (err)
@@ -2075,17 +2060,16 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
int vport, bool spoofchk)
{
- struct mlx5_vport *evport;
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
bool pschk;
int err = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
- if (!LEGAL_VPORT(esw, vport))
- return -EINVAL;
+ if (IS_ERR(evport))
+ return PTR_ERR(evport);
mutex_lock(&esw->state_lock);
- evport = &esw->vports[vport];
pschk = evport->info.spoofchk;
evport->info.spoofchk = spoofchk;
if (pschk && !is_valid_ether_addr(evport->info.mac))
@@ -2226,15 +2210,14 @@ out:
int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
int vport, bool setting)
{
- struct mlx5_vport *evport;
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
if (!ESW_ALLOWED(esw))
return -EPERM;
- if (!LEGAL_VPORT(esw, vport))
- return -EINVAL;
+ if (IS_ERR(evport))
+ return PTR_ERR(evport);
mutex_lock(&esw->state_lock);
- evport = &esw->vports[vport];
evport->info.trusted = setting;
if (evport->enabled)
esw_vport_change_handle_locked(evport);
@@ -2284,7 +2267,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
if (bw_share == evport->qos.bw_share)
continue;
- err = esw_vport_qos_config(esw, evport->vport, vport_max_rate,
+ err = esw_vport_qos_config(esw, evport, vport_max_rate,
bw_share);
if (!err)
evport->qos.bw_share = bw_share;
@@ -2298,7 +2281,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
u32 max_rate, u32 min_rate)
{
- struct mlx5_vport *evport;
+ struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
u32 fw_max_bw_share;
u32 previous_min_rate;
u32 divider;
@@ -2308,8 +2291,8 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
if (!ESW_ALLOWED(esw))
return -EPERM;
- if (!LEGAL_VPORT(esw, vport))
- return -EINVAL;
+ if (IS_ERR(evport))
+ return PTR_ERR(evport);
fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
@@ -2320,7 +2303,6 @@ int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
return -EOPNOTSUPP;
mutex_lock(&esw->state_lock);
- evport = &esw->vports[vport];
if (min_rate == evport->info.min_rate)
goto set_max_rate;
@@ -2338,7 +2320,7 @@ set_max_rate:
if (max_rate == evport->info.max_rate)
goto unlock;
- err = esw_vport_qos_config(esw, vport, max_rate, evport->qos.bw_share);
+ err = esw_vport_qos_config(esw, evport, max_rate, evport->qos.bw_share);
if (!err)
evport->info.max_rate = max_rate;
@@ -2348,11 +2330,10 @@ unlock:
}
static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
- int vport_idx,
+ struct mlx5_vport *vport,
struct mlx5_vport_drop_stats *stats)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
- struct mlx5_vport *vport = &esw->vports[vport_idx];
u64 rx_discard_vport_down, tx_discard_vport_down;
u64 bytes = 0;
int err = 0;
@@ -2372,7 +2353,7 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
!MLX5_CAP_GEN(dev, transmit_discard_vport_down))
return 0;
- err = mlx5_query_vport_down_stats(dev, vport_idx, 1,
+ err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
&rx_discard_vport_down,
&tx_discard_vport_down);
if (err)
@@ -2387,19 +2368,18 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
}
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
- int vport,
+ int vport_num,
struct ifla_vf_stats *vf_stats)
{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
struct mlx5_vport_drop_stats stats = {0};
int err = 0;
u32 *out;
- if (!ESW_ALLOWED(esw))
- return -EPERM;
- if (!LEGAL_VPORT(esw, vport))
- return -EINVAL;
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
@@ -2408,7 +2388,7 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
MLX5_SET(query_vport_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_VPORT_COUNTER);
MLX5_SET(query_vport_counter_in, in, op_mod, 0);
- MLX5_SET(query_vport_counter_in, in, vport_number, vport);
+ MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport);
MLX5_SET(query_vport_counter_in, in, other_vport, 1);
memset(out, 0, outlen);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 3f3cd32ae60a..ed3fad689ec9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -227,6 +227,18 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
int total_nvports);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
+void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
@@ -376,11 +388,11 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
-#define esw_info(dev, format, ...) \
- pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
+#define esw_info(__dev, format, ...) \
+ dev_info((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
-#define esw_warn(dev, format, ...) \
- pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
+#define esw_warn(__dev, format, ...) \
+ dev_warn((__dev)->device, "E-Switch: " format, ##__VA_ARGS__)
#define esw_debug(dev, format, ...) \
mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
@@ -431,6 +443,54 @@ static inline int mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw,
return index;
}
+/* TODO: This mlx5e_tc function shouldn't be called by eswitch */
+void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
+
+/* The vport getter/iterator are only valid after esw->total_vports
+ * and vport->vport are initialized in mlx5_eswitch_init.
+ */
+#define mlx5_esw_for_all_vports(esw, i, vport) \
+ for ((i) = MLX5_VPORT_PF; \
+ (vport) = &(esw)->vports[i], \
+ (i) < (esw)->total_vports; (i)++)
+
+#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
+ for ((i) = MLX5_VPORT_FIRST_VF; \
+ (vport) = &(esw)->vports[(i)], \
+ (i) <= (nvfs); (i)++)
+
+#define mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, nvfs) \
+ for ((i) = (nvfs); \
+ (vport) = &(esw)->vports[(i)], \
+ (i) >= MLX5_VPORT_FIRST_VF; (i)--)
+
+/* The rep getter/iterator are only valid after esw->total_vports
+ * and vport->vport are initialized in mlx5_eswitch_init.
+ */
+#define mlx5_esw_for_all_reps(esw, i, rep) \
+ for ((i) = MLX5_VPORT_PF; \
+ (rep) = &(esw)->offloads.vport_reps[i], \
+ (i) < (esw)->total_vports; (i)++)
+
+#define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \
+ for ((i) = MLX5_VPORT_FIRST_VF; \
+ (rep) = &(esw)->offloads.vport_reps[i], \
+ (i) <= (nvfs); (i)++)
+
+#define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \
+ for ((i) = (nvfs); \
+ (rep) = &(esw)->offloads.vport_reps[i], \
+ (i) >= MLX5_VPORT_FIRST_VF; (i)--)
+
+#define mlx5_esw_for_each_vf_vport_num(esw, vport, nvfs) \
+ for ((vport) = MLX5_VPORT_FIRST_VF; (vport) <= (nvfs); (vport)++)
+
+#define mlx5_esw_for_each_vf_vport_num_reverse(esw, vport, nvfs) \
+ for ((vport) = (nvfs); (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
+
+struct mlx5_vport *__must_check
+mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 9b2d78ee22b8..e09ae27485ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -37,17 +37,13 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
+#include "rdma.h"
#include "en.h"
#include "fs_core.h"
#include "lib/devcom.h"
#include "ecpf.h"
#include "lib/eq.h"
-enum {
- FDB_FAST_PATH = 0,
- FDB_SLOW_PATH
-};
-
/* There are two match-all miss flows, one for unicast dst mac and
* one for multicast.
*/
@@ -58,32 +54,6 @@ enum {
#define UPLINK_REP_INDEX 0
-/* The rep getter/iterator are only valid after esw->total_vports
- * and vport->vport are initialized in mlx5_eswitch_init.
- */
-#define mlx5_esw_for_all_reps(esw, i, rep) \
- for ((i) = MLX5_VPORT_PF; \
- (rep) = &(esw)->offloads.vport_reps[i], \
- (i) < (esw)->total_vports; (i)++)
-
-#define mlx5_esw_for_each_vf_rep(esw, i, rep, nvfs) \
- for ((i) = MLX5_VPORT_FIRST_VF; \
- (rep) = &(esw)->offloads.vport_reps[i], \
- (i) <= (nvfs); (i)++)
-
-#define mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvfs) \
- for ((i) = (nvfs); \
- (rep) = &(esw)->offloads.vport_reps[i], \
- (i) >= MLX5_VPORT_FIRST_VF; (i)--)
-
-#define mlx5_esw_for_each_vf_vport(esw, vport, nvfs) \
- for ((vport) = MLX5_VPORT_FIRST_VF; \
- (vport) <= (nvfs); (vport)++)
-
-#define mlx5_esw_for_each_vf_vport_reverse(esw, vport, nvfs) \
- for ((vport) = (nvfs); \
- (vport) >= MLX5_VPORT_FIRST_VF; (vport)--)
-
static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
u16 vport_num)
{
@@ -363,7 +333,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
rep = &esw->offloads.vport_reps[vf_vport];
- if (rep->rep_if[REP_ETH].state != REP_LOADED)
+ if (atomic_read(&rep->rep_if[REP_ETH].state) != REP_LOADED)
continue;
err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
@@ -663,7 +633,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
flows[mlx5_eswitch_ecpf_idx(esw)] = flow;
}
- mlx5_esw_for_each_vf_vport(esw, i, mlx5_core_max_vfs(esw->dev)) {
+ mlx5_esw_for_each_vf_vport_num(esw, i, mlx5_core_max_vfs(esw->dev)) {
MLX5_SET(fte_match_set_misc, misc, source_port, i);
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
spec, &flow_act, &dest, 1);
@@ -681,7 +651,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
add_vf_flow_err:
nvports = --i;
- mlx5_esw_for_each_vf_vport_reverse(esw, i, nvports)
+ mlx5_esw_for_each_vf_vport_num_reverse(esw, i, nvports)
mlx5_del_flow_rules(flows[i]);
if (mlx5_ecpf_vport_exists(esw->dev))
@@ -704,7 +674,8 @@ static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
flows = esw->fdb_table.offloads.peer_miss_rules;
- mlx5_esw_for_each_vf_vport_reverse(esw, i, mlx5_core_max_vfs(esw->dev))
+ mlx5_esw_for_each_vf_vport_num_reverse(esw, i,
+ mlx5_core_max_vfs(esw->dev))
mlx5_del_flow_rules(flows[i]);
if (mlx5_ecpf_vport_exists(esw->dev))
@@ -1287,13 +1258,13 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
int esw_offloads_init_reps(struct mlx5_eswitch *esw)
{
- int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
+ int total_vports = MLX5_TOTAL_VPORTS(esw->dev);
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_eswitch_rep *rep;
u8 hw_id[ETH_ALEN], rep_type;
int vport;
- esw->offloads.vport_reps = kcalloc(total_vfs,
+ esw->offloads.vport_reps = kcalloc(total_vports,
sizeof(struct mlx5_eswitch_rep),
GFP_KERNEL);
if (!esw->offloads.vport_reps)
@@ -1306,7 +1277,8 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
ether_addr_copy(rep->hw_id, hw_id);
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
- rep->rep_if[rep_type].state = REP_UNREGISTERED;
+ atomic_set(&rep->rep_if[rep_type].state,
+ REP_UNREGISTERED);
}
return 0;
@@ -1315,11 +1287,9 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw)
static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type)
{
- if (rep->rep_if[rep_type].state != REP_LOADED)
- return;
-
- rep->rep_if[rep_type].unload(rep);
- rep->rep_if[rep_type].state = REP_REGISTERED;
+ if (atomic_cmpxchg(&rep->rep_if[rep_type].state,
+ REP_LOADED, REP_REGISTERED) == REP_LOADED)
+ rep->rep_if[rep_type].unload(rep);
}
static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
@@ -1380,16 +1350,15 @@ static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
{
int err = 0;
- if (rep->rep_if[rep_type].state != REP_REGISTERED)
- return 0;
-
- err = rep->rep_if[rep_type].load(esw->dev, rep);
- if (err)
- return err;
-
- rep->rep_if[rep_type].state = REP_LOADED;
+ if (atomic_cmpxchg(&rep->rep_if[rep_type].state,
+ REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
+ err = rep->rep_if[rep_type].load(esw->dev, rep);
+ if (err)
+ atomic_set(&rep->rep_if[rep_type].state,
+ REP_REGISTERED);
+ }
- return 0;
+ return err;
}
static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type)
@@ -1523,8 +1492,6 @@ static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
return 0;
}
-void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
-
static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
{
mlx5e_tc_clean_fdb_peer_flows(esw);
@@ -1607,6 +1574,169 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
}
+static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_act flow_act = {0};
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ /* For prio tag mode, there is only 1 FTEs:
+ * 1) Untagged packets - push prio tag VLAN, allow
+ * Unmatched traffic is allowed by default
+ */
+
+ if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
+ return -EOPNOTSUPP;
+
+ esw_vport_cleanup_ingress_rules(esw, vport);
+
+ err = esw_vport_enable_ingress_acl(esw, vport);
+ if (err) {
+ mlx5_core_warn(esw->dev,
+ "failed to enable prio tag ingress acl (%d) on vport[%d]\n",
+ err, vport->vport);
+ return err;
+ }
+
+ esw_debug(esw->dev,
+ "vport[%d] configure ingress rules\n", vport->vport);
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ err = -ENOMEM;
+ goto out_no_mem;
+ }
+
+ /* Untagged packets - push prio tag VLAN, allow */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0);
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ flow_act.vlan[0].ethtype = ETH_P_8021Q;
+ flow_act.vlan[0].vid = 0;
+ flow_act.vlan[0].prio = 0;
+ vport->ingress.allow_rule =
+ mlx5_add_flow_rules(vport->ingress.acl, spec,
+ &flow_act, NULL, 0);
+ if (IS_ERR(vport->ingress.allow_rule)) {
+ err = PTR_ERR(vport->ingress.allow_rule);
+ esw_warn(esw->dev,
+ "vport[%d] configure ingress untagged allow rule, err(%d)\n",
+ vport->vport, err);
+ vport->ingress.allow_rule = NULL;
+ goto out;
+ }
+
+out:
+ kvfree(spec);
+out_no_mem:
+ if (err)
+ esw_vport_cleanup_ingress_rules(esw, vport);
+ return err;
+}
+
+static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ struct mlx5_flow_act flow_act = {0};
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ /* For prio tag mode, there is only 1 FTEs:
+ * 1) prio tag packets - pop the prio tag VLAN, allow
+ * Unmatched traffic is allowed by default
+ */
+
+ esw_vport_cleanup_egress_rules(esw, vport);
+
+ err = esw_vport_enable_egress_acl(esw, vport);
+ if (err) {
+ mlx5_core_warn(esw->dev,
+ "failed to enable egress acl (%d) on vport[%d]\n",
+ err, vport->vport);
+ return err;
+ }
+
+ esw_debug(esw->dev,
+ "vport[%d] configure prio tag egress rules\n", vport->vport);
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ err = -ENOMEM;
+ goto out_no_mem;
+ }
+
+ /* prio tag vlan rule - pop it so VF receives untagged packets */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 0);
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ vport->egress.allowed_vlan =
+ mlx5_add_flow_rules(vport->egress.acl, spec,
+ &flow_act, NULL, 0);
+ if (IS_ERR(vport->egress.allowed_vlan)) {
+ err = PTR_ERR(vport->egress.allowed_vlan);
+ esw_warn(esw->dev,
+ "vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n",
+ vport->vport, err);
+ vport->egress.allowed_vlan = NULL;
+ goto out;
+ }
+
+out:
+ kvfree(spec);
+out_no_mem:
+ if (err)
+ esw_vport_cleanup_egress_rules(esw, vport);
+ return err;
+}
+
+static int esw_prio_tag_acls_config(struct mlx5_eswitch *esw, int nvports)
+{
+ struct mlx5_vport *vport = NULL;
+ int i, j;
+ int err;
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, nvports) {
+ err = esw_vport_ingress_prio_tag_config(esw, vport);
+ if (err)
+ goto err_ingress;
+ err = esw_vport_egress_prio_tag_config(esw, vport);
+ if (err)
+ goto err_egress;
+ }
+
+ return 0;
+
+err_egress:
+ esw_vport_disable_ingress_acl(esw, vport);
+err_ingress:
+ mlx5_esw_for_each_vf_vport_reverse(esw, j, vport, i - 1) {
+ esw_vport_disable_egress_acl(esw, vport);
+ esw_vport_disable_ingress_acl(esw, vport);
+ }
+
+ return err;
+}
+
+static void esw_prio_tag_acls_cleanup(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ int i;
+
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->nvports) {
+ esw_vport_disable_egress_acl(esw, vport);
+ esw_vport_disable_ingress_acl(esw, vport);
+ }
+}
+
static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
{
int err;
@@ -1614,6 +1744,12 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
+ if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) {
+ err = esw_prio_tag_acls_config(esw, nvports);
+ if (err)
+ return err;
+ }
+
err = esw_create_offloads_fdb_tables(esw, nvports);
if (err)
return err;
@@ -1642,6 +1778,8 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
esw_destroy_vport_rx_group(esw);
esw_destroy_offloads_table(esw);
esw_destroy_offloads_fdb_tables(esw);
+ if (MLX5_CAP_GEN(esw->dev, prio_tag_required))
+ esw_prio_tag_acls_cleanup(esw);
}
static void esw_host_params_event_handler(struct work_struct *work)
@@ -1700,8 +1838,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
{
int err;
- mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
-
err = esw_offloads_steering_init(esw, total_nvports);
if (err)
return err;
@@ -1719,6 +1855,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
esw->host_info.num_vfs = vf_nvports;
}
+ mlx5_rdma_enable_roce(esw->dev);
+
return 0;
err_reps:
@@ -1757,6 +1895,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw)
num_vfs = esw->dev->priv.sriov.num_vfs;
}
+ mlx5_rdma_disable_roce(esw->dev);
esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_all_reps(esw, num_vfs);
esw_offloads_steering_cleanup(esw);
@@ -2076,7 +2215,7 @@ void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
rep_if->get_proto_dev = __rep_if->get_proto_dev;
rep_if->priv = __rep_if->priv;
- rep_if->state = REP_REGISTERED;
+ atomic_set(&rep_if->state, REP_REGISTERED);
}
}
EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
@@ -2091,7 +2230,7 @@ void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
__unload_reps_all_vport(esw, max_vf, rep_type);
mlx5_esw_for_all_reps(esw, i, rep)
- rep->rep_if[rep_type].state = REP_UNREGISTERED;
+ atomic_set(&rep->rep_if[rep_type].state, REP_UNREGISTERED);
}
EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
@@ -2111,7 +2250,7 @@ void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
rep = mlx5_eswitch_get_rep(esw, vport);
- if (rep->rep_if[rep_type].state == REP_LOADED &&
+ if (atomic_read(&rep->rep_if[rep_type].state) == REP_LOADED &&
rep->rep_if[rep_type].get_proto_dev)
return rep->rep_if[rep_type].get_proto_dev(rep);
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index 5d5864e8df3c..a81e8d2168d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -21,6 +21,7 @@ struct mlx5_event_nb {
static int any_notifier(struct notifier_block *, unsigned long, void *);
static int temp_warn(struct notifier_block *, unsigned long, void *);
static int port_module(struct notifier_block *, unsigned long, void *);
+static int pcie_core(struct notifier_block *, unsigned long, void *);
/* handler which forwards the event to events->nh, driver notifiers */
static int forward_event(struct notifier_block *, unsigned long, void *);
@@ -30,6 +31,7 @@ static struct mlx5_nb events_nbs_ref[] = {
{.nb.notifier_call = any_notifier, .event_type = MLX5_EVENT_TYPE_NOTIFY_ANY },
{.nb.notifier_call = temp_warn, .event_type = MLX5_EVENT_TYPE_TEMP_WARN_EVENT },
{.nb.notifier_call = port_module, .event_type = MLX5_EVENT_TYPE_PORT_MODULE_EVENT },
+ {.nb.notifier_call = pcie_core, .event_type = MLX5_EVENT_TYPE_GENERAL_EVENT },
/* Events to be forwarded (as is) to mlx5 core interfaces (mlx5e/mlx5_ib) */
{.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PORT_CHANGE },
@@ -51,11 +53,14 @@ static struct mlx5_nb events_nbs_ref[] = {
struct mlx5_events {
struct mlx5_core_dev *dev;
+ struct workqueue_struct *wq;
struct mlx5_event_nb notifiers[ARRAY_SIZE(events_nbs_ref)];
/* driver notifier chain */
struct atomic_notifier_head nh;
/* port module events stats */
struct mlx5_pme_stats pme_stats;
+ /*pcie_core*/
+ struct work_struct pcie_core_work;
};
static const char *eqe_type_str(u8 type)
@@ -249,6 +254,69 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data
return NOTIFY_OK;
}
+enum {
+ MLX5_PCI_POWER_COULD_NOT_BE_READ = 0x0,
+ MLX5_PCI_POWER_SUFFICIENT_REPORTED = 0x1,
+ MLX5_PCI_POWER_INSUFFICIENT_REPORTED = 0x2,
+};
+
+static void mlx5_pcie_event(struct work_struct *work)
+{
+ u32 out[MLX5_ST_SZ_DW(mpein_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mpein_reg)] = {0};
+ struct mlx5_events *events;
+ struct mlx5_core_dev *dev;
+ u8 power_status;
+ u16 pci_power;
+
+ events = container_of(work, struct mlx5_events, pcie_core_work);
+ dev = events->dev;
+
+ if (!MLX5_CAP_MCAM_FEATURE(dev, pci_status_and_power))
+ return;
+
+ mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MPEIN, 0, 0);
+ power_status = MLX5_GET(mpein_reg, out, pwr_status);
+ pci_power = MLX5_GET(mpein_reg, out, pci_power);
+
+ switch (power_status) {
+ case MLX5_PCI_POWER_COULD_NOT_BE_READ:
+ mlx5_core_info_rl(dev,
+ "PCIe slot power capability was not advertised.\n");
+ break;
+ case MLX5_PCI_POWER_INSUFFICIENT_REPORTED:
+ mlx5_core_warn_rl(dev,
+ "Detected insufficient power on the PCIe slot (%uW).\n",
+ pci_power);
+ break;
+ case MLX5_PCI_POWER_SUFFICIENT_REPORTED:
+ mlx5_core_info_rl(dev,
+ "PCIe slot advertised sufficient power (%uW).\n",
+ pci_power);
+ break;
+ }
+}
+
+static int pcie_core(struct notifier_block *nb, unsigned long type, void *data)
+{
+ struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb,
+ struct mlx5_event_nb,
+ nb);
+ struct mlx5_events *events = event_nb->ctx;
+ struct mlx5_eqe *eqe = data;
+
+ switch (eqe->sub_type) {
+ case MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT:
+ queue_work(events->wq, &events->pcie_core_work);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats)
{
*stats = dev->priv.events->pme_stats;
@@ -277,11 +345,17 @@ int mlx5_events_init(struct mlx5_core_dev *dev)
ATOMIC_INIT_NOTIFIER_HEAD(&events->nh);
events->dev = dev;
dev->priv.events = events;
+ events->wq = create_singlethread_workqueue("mlx5_events");
+ if (!events->wq)
+ return -ENOMEM;
+ INIT_WORK(&events->pcie_core_work, mlx5_pcie_event);
+
return 0;
}
void mlx5_events_cleanup(struct mlx5_core_dev *dev)
{
+ destroy_workqueue(dev->priv.events->wq);
kvfree(dev->priv.events);
}
@@ -304,6 +378,7 @@ void mlx5_events_stop(struct mlx5_core_dev *dev)
for (i = ARRAY_SIZE(events_nbs_ref) - 1; i >= 0 ; i--)
mlx5_eq_notifier_unregister(dev, &events->notifiers[i].nb);
+ flush_workqueue(events->wq);
}
int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index 873541ef4c1b..ca2296a2f9ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -135,7 +135,7 @@ static void mlx5_fpga_conn_notify_hw(struct mlx5_fpga_conn *conn, void *wqe)
*conn->qp.wq.sq.db = cpu_to_be32(conn->qp.sq.pc);
/* Make sure that doorbell record is visible before ringing */
wmb();
- mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET, NULL);
+ mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET);
}
static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
index 7e2e871dbf83..52c9dee91ea4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
@@ -37,6 +37,7 @@
#include <linux/mlx5/eq.h>
+#include "mlx5_core.h"
#include "lib/eq.h"
#include "fpga/cmd.h"
@@ -62,26 +63,26 @@ struct mlx5_fpga_device {
};
#define mlx5_fpga_dbg(__adev, format, ...) \
- dev_dbg(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \
- __func__, __LINE__, current->pid, ##__VA_ARGS__)
+ mlx5_core_dbg((__adev)->mdev, "FPGA: %s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, ##__VA_ARGS__)
#define mlx5_fpga_err(__adev, format, ...) \
- dev_err(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \
- __func__, __LINE__, current->pid, ##__VA_ARGS__)
+ mlx5_core_err((__adev)->mdev, "FPGA: %s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, ##__VA_ARGS__)
#define mlx5_fpga_warn(__adev, format, ...) \
- dev_warn(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d:(pid %d): " format, \
- __func__, __LINE__, current->pid, ##__VA_ARGS__)
+ mlx5_core_warn((__adev)->mdev, "FPGA: %s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, ##__VA_ARGS__)
#define mlx5_fpga_warn_ratelimited(__adev, format, ...) \
- dev_warn_ratelimited(&(__adev)->mdev->pdev->dev, "FPGA: %s:%d: " \
- format, __func__, __LINE__, ##__VA_ARGS__)
+ mlx5_core_err_rl((__adev)->mdev, "FPGA: %s:%d: " \
+ format, __func__, __LINE__, ##__VA_ARGS__)
#define mlx5_fpga_notice(__adev, format, ...) \
- dev_notice(&(__adev)->mdev->pdev->dev, "FPGA: " format, ##__VA_ARGS__)
+ mlx5_core_info((__adev)->mdev, "FPGA: " format, ##__VA_ARGS__)
#define mlx5_fpga_info(__adev, format, ...) \
- dev_info(&(__adev)->mdev->pdev->dev, "FPGA: " format, ##__VA_ARGS__)
+ mlx5_core_info((__adev)->mdev, "FPGA: " format, ##__VA_ARGS__)
int mlx5_fpga_init(struct mlx5_core_dev *mdev);
void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 5a22c5874f3b..52c47d3dd5a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -989,32 +989,33 @@ static enum fs_flow_table_type egress_to_fs_ft(bool egress)
return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX;
}
-static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev,
+static int fpga_ipsec_fs_create_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
- unsigned int *group_id,
+ struct mlx5_flow_group *fg,
bool is_egress)
{
- int (*create_flow_group)(struct mlx5_core_dev *dev,
+ int (*create_flow_group)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, u32 *in,
- unsigned int *group_id) =
+ struct mlx5_flow_group *fg) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group;
char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in,
match_criteria.misc_parameters);
+ struct mlx5_core_dev *dev = ns->dev;
u32 saved_outer_esp_spi_mask;
u8 match_criteria_enable;
int ret;
if (MLX5_CAP_FLOWTABLE(dev,
flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
- return create_flow_group(dev, ft, in, group_id);
+ return create_flow_group(ns, ft, in, fg);
match_criteria_enable =
MLX5_GET(create_flow_group_in, in, match_criteria_enable);
saved_outer_esp_spi_mask =
MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
if (!match_criteria_enable || !saved_outer_esp_spi_mask)
- return create_flow_group(dev, ft, in, group_id);
+ return create_flow_group(ns, ft, in, fg);
MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0);
@@ -1023,7 +1024,7 @@ static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev,
MLX5_SET(create_flow_group_in, in, match_criteria_enable,
match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS);
- ret = create_flow_group(dev, ft, in, group_id);
+ ret = create_flow_group(ns, ft, in, fg);
MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask);
MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable);
@@ -1031,17 +1032,18 @@ static int fpga_ipsec_fs_create_flow_group(struct mlx5_core_dev *dev,
return ret;
}
-static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev,
+static int fpga_ipsec_fs_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte,
bool is_egress)
{
- int (*create_fte)(struct mlx5_core_dev *dev,
+ int (*create_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte;
+ struct mlx5_core_dev *dev = ns->dev;
struct mlx5_fpga_device *fdev = dev->fpga;
struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
struct mlx5_fpga_ipsec_rule *rule;
@@ -1053,7 +1055,7 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev,
!(fte->action.action &
(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
- return create_fte(dev, ft, fg, fte);
+ return create_fte(ns, ft, fg, fte);
rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule)
@@ -1070,7 +1072,7 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev,
WARN_ON(rule_insert(fipsec, rule));
modify_spec_mailbox(dev, fte, &mbox_mod);
- ret = create_fte(dev, ft, fg, fte);
+ ret = create_fte(ns, ft, fg, fte);
restore_spec_mailbox(fte, &mbox_mod);
if (ret) {
_rule_delete(fipsec, rule);
@@ -1081,19 +1083,20 @@ static int fpga_ipsec_fs_create_fte(struct mlx5_core_dev *dev,
return ret;
}
-static int fpga_ipsec_fs_update_fte(struct mlx5_core_dev *dev,
+static int fpga_ipsec_fs_update_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int group_id,
+ struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte,
bool is_egress)
{
- int (*update_fte)(struct mlx5_core_dev *dev,
+ int (*update_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int group_id,
+ struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte;
+ struct mlx5_core_dev *dev = ns->dev;
bool is_esp = fte->action.esp_id;
struct mailbox_mod mbox_mod;
int ret;
@@ -1102,24 +1105,25 @@ static int fpga_ipsec_fs_update_fte(struct mlx5_core_dev *dev,
!(fte->action.action &
(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
- return update_fte(dev, ft, group_id, modify_mask, fte);
+ return update_fte(ns, ft, fg, modify_mask, fte);
modify_spec_mailbox(dev, fte, &mbox_mod);
- ret = update_fte(dev, ft, group_id, modify_mask, fte);
+ ret = update_fte(ns, ft, fg, modify_mask, fte);
restore_spec_mailbox(fte, &mbox_mod);
return ret;
}
-static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev,
+static int fpga_ipsec_fs_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte,
bool is_egress)
{
- int (*delete_fte)(struct mlx5_core_dev *dev,
+ int (*delete_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte) =
mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte;
+ struct mlx5_core_dev *dev = ns->dev;
struct mlx5_fpga_device *fdev = dev->fpga;
struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
struct mlx5_fpga_ipsec_rule *rule;
@@ -1131,7 +1135,7 @@ static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev,
!(fte->action.action &
(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
- return delete_fte(dev, ft, fte);
+ return delete_fte(ns, ft, fte);
rule = rule_search(fipsec, fte);
if (!rule)
@@ -1141,84 +1145,84 @@ static int fpga_ipsec_fs_delete_fte(struct mlx5_core_dev *dev,
rule_delete(fipsec, rule);
modify_spec_mailbox(dev, fte, &mbox_mod);
- ret = delete_fte(dev, ft, fte);
+ ret = delete_fte(ns, ft, fte);
restore_spec_mailbox(fte, &mbox_mod);
return ret;
}
static int
-mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_core_dev *dev,
+mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
- unsigned int *group_id)
+ struct mlx5_flow_group *fg)
{
- return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, true);
+ return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, true);
}
static int
-mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_core_dev *dev,
+mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte)
{
- return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, true);
+ return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, true);
}
static int
-mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_core_dev *dev,
+mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int group_id,
+ struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte)
{
- return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte,
+ return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
true);
}
static int
-mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_core_dev *dev,
+mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
{
- return fpga_ipsec_fs_delete_fte(dev, ft, fte, true);
+ return fpga_ipsec_fs_delete_fte(ns, ft, fte, true);
}
static int
-mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_core_dev *dev,
+mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
- unsigned int *group_id)
+ struct mlx5_flow_group *fg)
{
- return fpga_ipsec_fs_create_flow_group(dev, ft, in, group_id, false);
+ return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, false);
}
static int
-mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_core_dev *dev,
+mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte)
{
- return fpga_ipsec_fs_create_fte(dev, ft, fg, fte, false);
+ return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, false);
}
static int
-mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_core_dev *dev,
+mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int group_id,
+ struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte)
{
- return fpga_ipsec_fs_update_fte(dev, ft, group_id, modify_mask, fte,
+ return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
false);
}
static int
-mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_core_dev *dev,
+mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
{
- return fpga_ipsec_fs_delete_fte(dev, ft, fte, false);
+ return fpga_ipsec_fs_delete_fte(ns, ft, fte, false);
}
static struct mlx5_flow_cmds fpga_ipsec_ingress;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index c44ccb67c4a3..013b1ca4a791 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -39,7 +39,7 @@
#include "mlx5_core.h"
#include "eswitch.h"
-static int mlx5_cmd_stub_update_root_ft(struct mlx5_core_dev *dev,
+static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 underlay_qpn,
bool disconnect)
@@ -47,47 +47,43 @@ static int mlx5_cmd_stub_update_root_ft(struct mlx5_core_dev *dev,
return 0;
}
-static int mlx5_cmd_stub_create_flow_table(struct mlx5_core_dev *dev,
- u16 vport,
- enum fs_flow_table_op_mod op_mod,
- enum fs_flow_table_type type,
- unsigned int level,
+static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
unsigned int log_size,
- struct mlx5_flow_table *next_ft,
- unsigned int *table_id, u32 flags)
+ struct mlx5_flow_table *next_ft)
{
return 0;
}
-static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_core_dev *dev,
+static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft)
{
return 0;
}
-static int mlx5_cmd_stub_modify_flow_table(struct mlx5_core_dev *dev,
+static int mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
return 0;
}
-static int mlx5_cmd_stub_create_flow_group(struct mlx5_core_dev *dev,
+static int mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
- unsigned int *group_id)
+ struct mlx5_flow_group *fg)
{
return 0;
}
-static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_core_dev *dev,
+static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int group_id)
+ struct mlx5_flow_group *fg)
{
return 0;
}
-static int mlx5_cmd_stub_create_fte(struct mlx5_core_dev *dev,
+static int mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *group,
struct fs_fte *fte)
@@ -95,28 +91,29 @@ static int mlx5_cmd_stub_create_fte(struct mlx5_core_dev *dev,
return 0;
}
-static int mlx5_cmd_stub_update_fte(struct mlx5_core_dev *dev,
+static int mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int group_id,
+ struct mlx5_flow_group *group,
int modify_mask,
struct fs_fte *fte)
{
return -EOPNOTSUPP;
}
-static int mlx5_cmd_stub_delete_fte(struct mlx5_core_dev *dev,
+static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
{
return 0;
}
-static int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
+static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, u32 underlay_qpn,
bool disconnect)
{
u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
+ struct mlx5_core_dev *dev = ns->dev;
if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
underlay_qpn == 0)
@@ -143,29 +140,26 @@ static int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
- u16 vport,
- enum fs_flow_table_op_mod op_mod,
- enum fs_flow_table_type type,
- unsigned int level,
+static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
unsigned int log_size,
- struct mlx5_flow_table *next_ft,
- unsigned int *table_id, u32 flags)
+ struct mlx5_flow_table *next_ft)
{
- int en_encap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
- int en_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
+ int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
+ int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
+ struct mlx5_core_dev *dev = ns->dev;
int err;
MLX5_SET(create_flow_table_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_TABLE);
- MLX5_SET(create_flow_table_in, in, table_type, type);
- MLX5_SET(create_flow_table_in, in, flow_table_context.level, level);
+ MLX5_SET(create_flow_table_in, in, table_type, ft->type);
+ MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
- if (vport) {
- MLX5_SET(create_flow_table_in, in, vport_number, vport);
+ if (ft->vport) {
+ MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(create_flow_table_in, in, other_vport, 1);
}
@@ -174,13 +168,18 @@ static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
en_encap);
- switch (op_mod) {
+ switch (ft->op_mod) {
case FS_FT_OP_MOD_NORMAL:
if (next_ft) {
MLX5_SET(create_flow_table_in, in,
- flow_table_context.table_miss_action, 1);
+ flow_table_context.table_miss_action,
+ MLX5_FLOW_TABLE_MISS_ACTION_FWD);
MLX5_SET(create_flow_table_in, in,
flow_table_context.table_miss_id, next_ft->id);
+ } else {
+ MLX5_SET(create_flow_table_in, in,
+ flow_table_context.table_miss_action,
+ ns->def_miss_action);
}
break;
@@ -195,16 +194,17 @@ static int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
- *table_id = MLX5_GET(create_flow_table_out, out,
- table_id);
+ ft->id = MLX5_GET(create_flow_table_out, out,
+ table_id);
return err;
}
-static int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
+static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft)
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
+ struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(destroy_flow_table_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_TABLE);
@@ -218,12 +218,13 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-static int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
+static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft)
{
u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {0};
u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
+ struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(modify_flow_table_in, in, opcode,
MLX5_CMD_OP_MODIFY_FLOW_TABLE);
@@ -250,26 +251,29 @@ static int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
if (next_ft) {
MLX5_SET(modify_flow_table_in, in,
- flow_table_context.table_miss_action, 1);
+ flow_table_context.table_miss_action,
+ MLX5_FLOW_TABLE_MISS_ACTION_FWD);
MLX5_SET(modify_flow_table_in, in,
flow_table_context.table_miss_id,
next_ft->id);
} else {
MLX5_SET(modify_flow_table_in, in,
- flow_table_context.table_miss_action, 0);
+ flow_table_context.table_miss_action,
+ ns->def_miss_action);
}
}
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
-static int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
+static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
- unsigned int *group_id)
+ struct mlx5_flow_group *fg)
{
u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_core_dev *dev = ns->dev;
int err;
MLX5_SET(create_flow_group_in, in, opcode,
@@ -283,23 +287,24 @@ static int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
if (!err)
- *group_id = MLX5_GET(create_flow_group_out, out,
- group_id);
+ fg->id = MLX5_GET(create_flow_group_out, out,
+ group_id);
return err;
}
-static int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
+static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int group_id)
+ struct mlx5_flow_group *fg)
{
u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
+ struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(destroy_flow_group_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_GROUP);
MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
- MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
+ MLX5_SET(destroy_flow_group_in, in, group_id, fg->id);
if (ft->vport) {
MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
@@ -505,23 +510,25 @@ err_out:
return err;
}
-static int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
+static int mlx5_cmd_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *group,
struct fs_fte *fte)
{
+ struct mlx5_core_dev *dev = ns->dev;
unsigned int group_id = group->id;
return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
}
-static int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
+static int mlx5_cmd_update_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int group_id,
+ struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte)
{
int opmod;
+ struct mlx5_core_dev *dev = ns->dev;
int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
flow_table_properties_nic_receive.
flow_modify_en);
@@ -529,15 +536,16 @@ static int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
return -EOPNOTSUPP;
opmod = 1;
- return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
+ return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, fg->id, fte);
}
-static int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
+static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
{
u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
+ struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
MLX5_SET(delete_fte_in, in, table_type, ft->type);
@@ -853,6 +861,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ
case FS_FT_SNIFFER_RX:
case FS_FT_SNIFFER_TX:
case FS_FT_NIC_TX:
+ case FS_FT_RDMA_RX:
return mlx5_fs_cmd_get_fw_cmds();
default:
return mlx5_fs_cmd_get_stub_cmds();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 6228ba7bfa1a..e340f9af2f5a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -36,45 +36,42 @@
#include "fs_core.h"
struct mlx5_flow_cmds {
- int (*create_flow_table)(struct mlx5_core_dev *dev,
- u16 vport,
- enum fs_flow_table_op_mod op_mod,
- enum fs_flow_table_type type,
- unsigned int level, unsigned int log_size,
- struct mlx5_flow_table *next_ft,
- unsigned int *table_id, u32 flags);
- int (*destroy_flow_table)(struct mlx5_core_dev *dev,
+ int (*create_flow_table)(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ unsigned int log_size,
+ struct mlx5_flow_table *next_ft);
+ int (*destroy_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft);
- int (*modify_flow_table)(struct mlx5_core_dev *dev,
+ int (*modify_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_table *next_ft);
- int (*create_flow_group)(struct mlx5_core_dev *dev,
+ int (*create_flow_group)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 *in,
- unsigned int *group_id);
+ struct mlx5_flow_group *fg);
- int (*destroy_flow_group)(struct mlx5_core_dev *dev,
+ int (*destroy_flow_group)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int group_id);
+ struct mlx5_flow_group *fg);
- int (*create_fte)(struct mlx5_core_dev *dev,
+ int (*create_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg,
struct fs_fte *fte);
- int (*update_fte)(struct mlx5_core_dev *dev,
+ int (*update_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int group_id,
+ struct mlx5_flow_group *fg,
int modify_mask,
struct fs_fte *fte);
- int (*delete_fte)(struct mlx5_core_dev *dev,
+ int (*delete_fte)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte);
- int (*update_root_ft)(struct mlx5_core_dev *dev,
+ int (*update_root_ft)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
u32 underlay_qpn,
bool disconnect);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 0be3eb86dd84..fb5b61727ee7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -403,7 +403,7 @@ static void del_hw_flow_table(struct fs_node *node)
trace_mlx5_fs_del_ft(ft);
if (node->active) {
- err = root->cmds->destroy_flow_table(dev, ft);
+ err = root->cmds->destroy_flow_table(root, ft);
if (err)
mlx5_core_warn(dev, "flow steering can't destroy ft\n");
}
@@ -435,7 +435,7 @@ static void modify_fte(struct fs_fte *fte)
dev = get_dev(&fte->node);
root = find_root(&ft->node);
- err = root->cmds->update_fte(dev, ft, fg->id, fte->modify_mask, fte);
+ err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
if (err)
mlx5_core_warn(dev,
"%s can't del rule fg id=%d fte_index=%d\n",
@@ -492,7 +492,7 @@ static void del_hw_fte(struct fs_node *node)
dev = get_dev(&ft->node);
root = find_root(&ft->node);
if (node->active) {
- err = root->cmds->delete_fte(dev, ft, fte);
+ err = root->cmds->delete_fte(root, ft, fte);
if (err)
mlx5_core_warn(dev,
"flow steering can't delete fte in index %d of flow group id %d\n",
@@ -532,7 +532,7 @@ static void del_hw_flow_group(struct fs_node *node)
trace_mlx5_fs_del_fg(fg);
root = find_root(&ft->node);
- if (fg->node.active && root->cmds->destroy_flow_group(dev, ft, fg->id))
+ if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
fg->id, ft->id);
}
@@ -783,7 +783,7 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev,
fs_for_each_ft(iter, prio) {
i++;
- err = root->cmds->modify_flow_table(dev, iter, ft);
+ err = root->cmds->modify_flow_table(root, iter, ft);
if (err) {
mlx5_core_warn(dev, "Failed to modify flow table %d\n",
iter->id);
@@ -819,7 +819,7 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
struct mlx5_flow_root_namespace *root = find_root(&prio->node);
struct mlx5_ft_underlay_qp *uqp;
int min_level = INT_MAX;
- int err;
+ int err = 0;
u32 qpn;
if (root->root_ft)
@@ -831,11 +831,11 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
if (list_empty(&root->underlay_qpns)) {
/* Don't set any QPN (zero) in case QPN list is empty */
qpn = 0;
- err = root->cmds->update_root_ft(root->dev, ft, qpn, false);
+ err = root->cmds->update_root_ft(root, ft, qpn, false);
} else {
list_for_each_entry(uqp, &root->underlay_qpns, list) {
qpn = uqp->qpn;
- err = root->cmds->update_root_ft(root->dev, ft,
+ err = root->cmds->update_root_ft(root, ft,
qpn, false);
if (err)
break;
@@ -871,7 +871,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
memcpy(&rule->dest_attr, dest, sizeof(*dest));
root = find_root(&ft->node);
- err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
+ err = root->cmds->update_fte(root, ft, fg,
modify_mask, fte);
up_write_ref_node(&fte->node, false);
@@ -1013,9 +1013,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
next_ft = find_next_chained_ft(fs_prio);
- err = root->cmds->create_flow_table(root->dev, ft->vport, ft->op_mod,
- ft->type, ft->level, log_table_sz,
- next_ft, &ft->id, ft->flags);
+ err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
if (err)
goto free_ft;
@@ -1032,7 +1030,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
trace_mlx5_fs_add_ft(ft);
return ft;
destroy_ft:
- root->cmds->destroy_flow_table(root->dev, ft);
+ root->cmds->destroy_flow_table(root, ft);
free_ft:
kfree(ft);
unlock_root:
@@ -1114,7 +1112,6 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
start_flow_index);
int end_index = MLX5_GET(create_flow_group_in, fg_in,
end_flow_index);
- struct mlx5_core_dev *dev = get_dev(&ft->node);
struct mlx5_flow_group *fg;
int err;
@@ -1129,7 +1126,7 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
if (IS_ERR(fg))
return fg;
- err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id);
+ err = root->cmds->create_flow_group(root, ft, fg_in, fg);
if (err) {
tree_put_node(&fg->node, false);
return ERR_PTR(err);
@@ -1269,11 +1266,9 @@ add_rule_fte(struct fs_fte *fte,
fs_get_obj(ft, fg->node.parent);
root = find_root(&fg->node);
if (!(fte->status & FS_FTE_STATUS_EXISTING))
- err = root->cmds->create_fte(get_dev(&ft->node),
- ft, fg, fte);
+ err = root->cmds->create_fte(root, ft, fg, fte);
else
- err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id,
- modify_mask, fte);
+ err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
if (err)
goto free_handle;
@@ -1339,7 +1334,6 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
struct mlx5_flow_group *fg)
{
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
- struct mlx5_core_dev *dev = get_dev(&ft->node);
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
void *match_criteria_addr;
u8 src_esw_owner_mask_on;
@@ -1369,7 +1363,7 @@ static int create_auto_flow_group(struct mlx5_flow_table *ft,
memcpy(match_criteria_addr, fg->mask.match_criteria,
sizeof(fg->mask.match_criteria));
- err = root->cmds->create_flow_group(dev, ft, in, &fg->id);
+ err = root->cmds->create_flow_group(root, ft, in, fg);
if (!err) {
fg->node.active = true;
trace_mlx5_fs_add_fg(fg);
@@ -1941,12 +1935,12 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft)
if (list_empty(&root->underlay_qpns)) {
/* Don't set any QPN (zero) in case QPN list is empty */
qpn = 0;
- err = root->cmds->update_root_ft(root->dev, new_root_ft,
+ err = root->cmds->update_root_ft(root, new_root_ft,
qpn, false);
} else {
list_for_each_entry(uqp, &root->underlay_qpns, list) {
qpn = uqp->qpn;
- err = root->cmds->update_root_ft(root->dev,
+ err = root->cmds->update_root_ft(root,
new_root_ft, qpn,
false);
if (err)
@@ -2060,6 +2054,10 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
if (steering->sniffer_tx_root_ns)
return &steering->sniffer_tx_root_ns->ns;
return NULL;
+ case MLX5_FLOW_NAMESPACE_RDMA_RX:
+ if (steering->rdma_rx_root_ns)
+ return &steering->rdma_rx_root_ns->ns;
+ return NULL;
default:
break;
}
@@ -2456,6 +2454,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
steering->fdb_sub_ns = NULL;
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
+ cleanup_root_ns(steering->rdma_rx_root_ns);
cleanup_root_ns(steering->egress_root_ns);
mlx5_cleanup_fc_stats(dev);
kmem_cache_destroy(steering->ftes_cache);
@@ -2497,6 +2496,25 @@ static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
return 0;
}
+static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct fs_prio *prio;
+
+ steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
+ if (!steering->rdma_rx_root_ns)
+ return -ENOMEM;
+
+ steering->rdma_rx_root_ns->def_miss_action =
+ MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN;
+
+ /* Create single prio */
+ prio = fs_create_prio(&steering->rdma_rx_root_ns->ns, 0, 1);
+ if (IS_ERR(prio)) {
+ cleanup_root_ns(steering->rdma_rx_root_ns);
+ return PTR_ERR(prio);
+ }
+ return 0;
+}
static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
{
struct mlx5_flow_namespace *ns;
@@ -2516,8 +2534,16 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
if (!steering->fdb_sub_ns)
return -ENOMEM;
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH,
+ 1);
+ if (IS_ERR(maj_prio)) {
+ err = PTR_ERR(maj_prio);
+ goto out_err;
+ }
+
levels = 2 * FDB_MAX_PRIO * (FDB_MAX_CHAIN + 1);
- maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns, 0,
+ maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
+ FDB_FAST_PATH,
levels);
if (IS_ERR(maj_prio)) {
err = PTR_ERR(maj_prio);
@@ -2542,7 +2568,7 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
steering->fdb_sub_ns[chain] = ns;
}
- maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1);
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
if (IS_ERR(maj_prio)) {
err = PTR_ERR(maj_prio);
goto out_err;
@@ -2725,6 +2751,13 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
+ if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
+ MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
+ err = init_rdma_rx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
if (MLX5_IPSEC_DEV(dev) || MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
err = init_egress_root_ns(steering);
if (err)
@@ -2754,7 +2787,7 @@ int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
goto update_ft_fail;
}
- err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn,
+ err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
false);
if (err) {
mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
@@ -2798,7 +2831,7 @@ int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
goto out;
}
- err = root->cmds->update_root_ft(dev, root->root_ft, underlay_qpn,
+ err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
true);
if (err)
mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 87de0e4d9124..a08c3d09a50f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -67,6 +67,7 @@ enum fs_flow_table_type {
FS_FT_FDB = 0X4,
FS_FT_SNIFFER_RX = 0X5,
FS_FT_SNIFFER_TX = 0X6,
+ FS_FT_RDMA_RX = 0X7,
FS_FT_MAX_TYPE = FS_FT_SNIFFER_TX,
};
@@ -90,6 +91,7 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace **esw_ingress_root_ns;
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
+ struct mlx5_flow_root_namespace *rdma_rx_root_ns;
struct mlx5_flow_root_namespace *egress_root_ns;
};
@@ -150,7 +152,7 @@ struct mlx5_ft_underlay_qp {
u32 qpn;
};
-#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_800
+#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_a00
/* Calculate the fte_match_param length and without the reserved length.
* Make sure the reserved field is the last.
*/
@@ -216,6 +218,7 @@ struct mlx5_flow_root_namespace {
struct mutex chain_lock;
struct list_head underlay_qpns;
const struct mlx5_flow_cmds *cmds;
+ enum mlx5_flow_table_miss_action def_miss_action;
};
int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index cb9fa3430c53..a2656f4008d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -152,11 +152,11 @@ static void health_recover(struct work_struct *work)
nic_state = mlx5_get_nic_state(dev);
if (nic_state == MLX5_NIC_IFC_INVALID) {
- dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n");
+ mlx5_core_err(dev, "health recovery flow aborted since the nic state is invalid\n");
return;
}
- dev_err(&dev->pdev->dev, "starting health recovery flow\n");
+ mlx5_core_err(dev, "starting health recovery flow\n");
mlx5_recover_device(dev);
}
@@ -180,8 +180,8 @@ static void health_care(struct work_struct *work)
if (!test_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags))
schedule_delayed_work(&health->recover_work, recover_delay);
else
- dev_err(&dev->pdev->dev,
- "new health works are not permitted at this stage\n");
+ mlx5_core_err(dev,
+ "new health works are not permitted at this stage\n");
spin_unlock_irqrestore(&health->wq_lock, flags);
}
@@ -228,18 +228,22 @@ static void print_health_info(struct mlx5_core_dev *dev)
return;
for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
- dev_err(&dev->pdev->dev, "assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i));
+ mlx5_core_err(dev, "assert_var[%d] 0x%08x\n", i,
+ ioread32be(h->assert_var + i));
- dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
- dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
+ mlx5_core_err(dev, "assert_exit_ptr 0x%08x\n",
+ ioread32be(&h->assert_exit_ptr));
+ mlx5_core_err(dev, "assert_callra 0x%08x\n",
+ ioread32be(&h->assert_callra));
sprintf(fw_str, "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
- dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str);
- dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
- dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index));
- dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd)));
- dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
+ mlx5_core_err(dev, "fw_ver %s\n", fw_str);
+ mlx5_core_err(dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
+ mlx5_core_err(dev, "irisc_index %d\n", ioread8(&h->irisc_index));
+ mlx5_core_err(dev, "synd 0x%x: %s\n", ioread8(&h->synd),
+ hsynd_str(ioread8(&h->synd)));
+ mlx5_core_err(dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
fw = ioread32be(&h->fw_ver);
- dev_err(&dev->pdev->dev, "raw fw_ver 0x%08x\n", fw);
+ mlx5_core_err(dev, "raw fw_ver 0x%08x\n", fw);
}
static unsigned long get_next_poll_jiffies(void)
@@ -262,8 +266,7 @@ void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
queue_work(health->wq, &health->work);
else
- dev_err(&dev->pdev->dev,
- "new health works are not permitted at this stage\n");
+ mlx5_core_err(dev, "new health works are not permitted at this stage\n");
spin_unlock_irqrestore(&health->wq_lock, flags);
}
@@ -284,7 +287,7 @@ static void poll_health(struct timer_list *t)
health->prev = count;
if (health->miss_counter == MAX_MISSES) {
- dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n");
+ mlx5_core_err(dev, "device's health compromised - reached miss count\n");
print_health_info(dev);
}
@@ -352,6 +355,13 @@ void mlx5_drain_health_recovery(struct mlx5_core_dev *dev)
cancel_delayed_work_sync(&dev->priv.health.recover_work);
}
+void mlx5_health_flush(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_health *health = &dev->priv.health;
+
+ flush_workqueue(health->wq);
+}
+
void mlx5_health_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
@@ -370,7 +380,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
return -ENOMEM;
strcpy(name, "mlx5_health");
- strcat(name, dev_name(&dev->pdev->dev));
+ strcat(name, dev_name(dev->device));
health->wq = create_singlethread_workqueue(name);
kfree(name);
if (!health->wq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 4eac42555c7d..ada1b7c0e0b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -68,6 +68,7 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
params->lro_en = false;
params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
+ params->tunneled_offload_en = false;
}
/* Called directly after IPoIB netdevice was created to initialize SW structs */
@@ -77,15 +78,14 @@ int mlx5i_init(struct mlx5_core_dev *mdev,
void *ppriv)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
- u16 max_mtu;
int err;
err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
if (err)
return err;
- mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
- netdev->mtu = max_mtu;
+ mlx5e_set_netdev_mtu_boundaries(priv);
+ netdev->mtu = netdev->max_mtu;
mlx5e_build_nic_params(mdev, &priv->rss_params, &priv->channels.params,
mlx5e_get_netdev_max_channels(netdev),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index 5633f8572800..8212bfd05733 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -122,7 +122,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
/* Handle add/replace event */
if (fi->fib_nhs == 1) {
if (__mlx5_lag_is_active(ldev)) {
- struct net_device *nh_dev = fi->fib_nh[0].nh_dev;
+ struct net_device *nh_dev = fi->fib_nh[0].fib_nh_dev;
int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev);
mlx5_lag_set_port_affinity(ldev, ++i);
@@ -134,10 +134,10 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
return;
/* Verify next hops are ports of the same hca */
- if (!(fi->fib_nh[0].nh_dev == ldev->pf[0].netdev &&
- fi->fib_nh[1].nh_dev == ldev->pf[1].netdev) &&
- !(fi->fib_nh[0].nh_dev == ldev->pf[1].netdev &&
- fi->fib_nh[1].nh_dev == ldev->pf[0].netdev)) {
+ if (!(fi->fib_nh[0].fib_nh_dev == ldev->pf[0].netdev &&
+ fi->fib_nh[1].fib_nh_dev == ldev->pf[1].netdev) &&
+ !(fi->fib_nh[0].fib_nh_dev == ldev->pf[1].netdev &&
+ fi->fib_nh[1].fib_nh_dev == ldev->pf[0].netdev)) {
mlx5_core_warn(ldev->pf[0].dev, "Multipath offload require two ports of the same HCA\n");
return;
}
@@ -167,7 +167,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
/* nh added/removed */
if (event == FIB_EVENT_NH_DEL) {
- int i = mlx5_lag_dev_get_netdev_idx(ldev, fib_nh->nh_dev);
+ int i = mlx5_lag_dev_get_netdev_idx(ldev, fib_nh->fib_nh_dev);
if (i >= 0) {
i = (i + 1) % 2 + 1; /* peer port */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
index 40f4a19b1ce1..be69c1d7941a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
@@ -80,10 +80,8 @@ void mlx5_init_port_tun_entropy(struct mlx5_tun_entropy *tun_entropy,
mlx5_query_port_tun_entropy(mdev, &entropy_flags);
tun_entropy->num_enabling_entries = 0;
tun_entropy->num_disabling_entries = 0;
- tun_entropy->enabled = entropy_flags.calc_enabled;
- tun_entropy->enabled =
- (entropy_flags.calc_supported) ?
- entropy_flags.calc_enabled : true;
+ tun_entropy->enabled = entropy_flags.calc_supported ?
+ entropy_flags.calc_enabled : true;
}
static int mlx5_set_entropy(struct mlx5_tun_entropy *tun_entropy,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
index 9a8fd762167b..b9d4f4e19ff9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -33,6 +33,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
+#include <net/vxlan.h>
#include "mlx5_core.h"
#include "vxlan.h"
@@ -204,8 +205,8 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
spin_lock_init(&vxlan->lock);
hash_init(vxlan->htable);
- /* Hardware adds 4789 by default */
- mlx5_vxlan_add_port(vxlan, 4789);
+ /* Hardware adds 4789 (IANA_VXLAN_UDP_PORT) by default */
+ mlx5_vxlan_add_port(vxlan, IANA_VXLAN_UDP_PORT);
return vxlan;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 76716419370d..61fa1d162d28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -567,24 +567,23 @@ query_ex:
static int set_hca_cap(struct mlx5_core_dev *dev)
{
- struct pci_dev *pdev = dev->pdev;
int err;
err = handle_hca_cap(dev);
if (err) {
- dev_err(&pdev->dev, "handle_hca_cap failed\n");
+ mlx5_core_err(dev, "handle_hca_cap failed\n");
goto out;
}
err = handle_hca_cap_atomic(dev);
if (err) {
- dev_err(&pdev->dev, "handle_hca_cap_atomic failed\n");
+ mlx5_core_err(dev, "handle_hca_cap_atomic failed\n");
goto out;
}
err = handle_hca_cap_odp(dev);
if (err) {
- dev_err(&pdev->dev, "handle_hca_cap_odp failed\n");
+ mlx5_core_err(dev, "handle_hca_cap_odp failed\n");
goto out;
}
@@ -716,36 +715,28 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
return -EOPNOTSUPP;
}
-static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
+ const struct pci_device_id *id)
{
- struct pci_dev *pdev = dev->pdev;
+ struct mlx5_priv *priv = &dev->priv;
int err = 0;
- pci_set_drvdata(dev->pdev, dev);
- strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
- priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
-
- mutex_init(&priv->pgdir_mutex);
- INIT_LIST_HEAD(&priv->pgdir_list);
- spin_lock_init(&priv->mkey_lock);
+ priv->pci_dev_data = id->driver_data;
- mutex_init(&priv->alloc_mutex);
+ pci_set_drvdata(dev->pdev, dev);
+ dev->bar_addr = pci_resource_start(pdev, 0);
priv->numa_node = dev_to_node(&dev->pdev->dev);
- if (mlx5_debugfs_root)
- priv->dbg_root =
- debugfs_create_dir(pci_name(pdev), mlx5_debugfs_root);
-
err = mlx5_pci_enable_device(dev);
if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
- goto err_dbg;
+ mlx5_core_err(dev, "Cannot enable PCI device, aborting\n");
+ return err;
}
err = request_bar(pdev);
if (err) {
- dev_err(&pdev->dev, "error requesting BARs, aborting\n");
+ mlx5_core_err(dev, "error requesting BARs, aborting\n");
goto err_disable;
}
@@ -753,7 +744,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
err = set_dma_caps(pdev);
if (err) {
- dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n");
+ mlx5_core_err(dev, "Failed setting DMA capabilities mask, aborting\n");
goto err_clr_master;
}
@@ -762,11 +753,11 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128))
mlx5_core_dbg(dev, "Enabling pci atomics failed\n");
- dev->iseg_base = pci_resource_start(dev->pdev, 0);
+ dev->iseg_base = dev->bar_addr;
dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
if (!dev->iseg) {
err = -ENOMEM;
- dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n");
+ mlx5_core_err(dev, "Failed mapping initialization segment, aborting\n");
goto err_clr_master;
}
@@ -777,52 +768,47 @@ err_clr_master:
release_bar(dev->pdev);
err_disable:
mlx5_pci_disable_device(dev);
-
-err_dbg:
- debugfs_remove(priv->dbg_root);
return err;
}
-static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+static void mlx5_pci_close(struct mlx5_core_dev *dev)
{
iounmap(dev->iseg);
pci_clear_master(dev->pdev);
release_bar(dev->pdev);
mlx5_pci_disable_device(dev);
- debugfs_remove_recursive(priv->dbg_root);
}
-static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+static int mlx5_init_once(struct mlx5_core_dev *dev)
{
- struct pci_dev *pdev = dev->pdev;
int err;
- priv->devcom = mlx5_devcom_register_device(dev);
- if (IS_ERR(priv->devcom))
- dev_err(&pdev->dev, "failed to register with devcom (0x%p)\n",
- priv->devcom);
+ dev->priv.devcom = mlx5_devcom_register_device(dev);
+ if (IS_ERR(dev->priv.devcom))
+ mlx5_core_err(dev, "failed to register with devcom (0x%p)\n",
+ dev->priv.devcom);
err = mlx5_query_board_id(dev);
if (err) {
- dev_err(&pdev->dev, "query board id failed\n");
+ mlx5_core_err(dev, "query board id failed\n");
goto err_devcom;
}
err = mlx5_eq_table_init(dev);
if (err) {
- dev_err(&pdev->dev, "failed to initialize eq\n");
+ mlx5_core_err(dev, "failed to initialize eq\n");
goto err_devcom;
}
err = mlx5_events_init(dev);
if (err) {
- dev_err(&pdev->dev, "failed to initialize events\n");
+ mlx5_core_err(dev, "failed to initialize events\n");
goto err_eq_cleanup;
}
err = mlx5_cq_debugfs_init(dev);
if (err) {
- dev_err(&pdev->dev, "failed to initialize cq debugfs\n");
+ mlx5_core_err(dev, "failed to initialize cq debugfs\n");
goto err_events_cleanup;
}
@@ -838,31 +824,31 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
err = mlx5_init_rl_table(dev);
if (err) {
- dev_err(&pdev->dev, "Failed to init rate limiting\n");
+ mlx5_core_err(dev, "Failed to init rate limiting\n");
goto err_tables_cleanup;
}
err = mlx5_mpfs_init(dev);
if (err) {
- dev_err(&pdev->dev, "Failed to init l2 table %d\n", err);
+ mlx5_core_err(dev, "Failed to init l2 table %d\n", err);
goto err_rl_cleanup;
}
err = mlx5_eswitch_init(dev);
if (err) {
- dev_err(&pdev->dev, "Failed to init eswitch %d\n", err);
+ mlx5_core_err(dev, "Failed to init eswitch %d\n", err);
goto err_mpfs_cleanup;
}
err = mlx5_sriov_init(dev);
if (err) {
- dev_err(&pdev->dev, "Failed to init sriov %d\n", err);
+ mlx5_core_err(dev, "Failed to init sriov %d\n", err);
goto err_eswitch_cleanup;
}
err = mlx5_fpga_init(dev);
if (err) {
- dev_err(&pdev->dev, "Failed to init fpga device %d\n", err);
+ mlx5_core_err(dev, "Failed to init fpga device %d\n", err);
goto err_sriov_cleanup;
}
@@ -912,93 +898,78 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_devcom_unregister_device(dev->priv.devcom);
}
-static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
- bool boot)
+static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
{
- struct pci_dev *pdev = dev->pdev;
int err;
- dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
- mutex_lock(&dev->intf_state_mutex);
- if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
- dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
- __func__);
- goto out;
- }
-
- dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
- fw_rev_min(dev), fw_rev_sub(dev));
+ mlx5_core_info(dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
+ fw_rev_min(dev), fw_rev_sub(dev));
/* Only PFs hold the relevant PCIe information for this query */
if (mlx5_core_is_pf(dev))
pcie_print_link_status(dev->pdev);
- /* on load removing any previous indication of internal error, device is
- * up
- */
- dev->state = MLX5_DEVICE_STATE_UP;
-
/* wait for firmware to accept initialization segments configurations
*/
err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
if (err) {
- dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
- FW_PRE_INIT_TIMEOUT_MILI);
- goto out_err;
+ mlx5_core_err(dev, "Firmware over %d MS in pre-initializing state, aborting\n",
+ FW_PRE_INIT_TIMEOUT_MILI);
+ return err;
}
err = mlx5_cmd_init(dev);
if (err) {
- dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
- goto out_err;
+ mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
+ return err;
}
err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
if (err) {
- dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
- FW_INIT_TIMEOUT_MILI);
+ mlx5_core_err(dev, "Firmware over %d MS in initializing state, aborting\n",
+ FW_INIT_TIMEOUT_MILI);
goto err_cmd_cleanup;
}
err = mlx5_core_enable_hca(dev, 0);
if (err) {
- dev_err(&pdev->dev, "enable hca failed\n");
+ mlx5_core_err(dev, "enable hca failed\n");
goto err_cmd_cleanup;
}
err = mlx5_core_set_issi(dev);
if (err) {
- dev_err(&pdev->dev, "failed to set issi\n");
+ mlx5_core_err(dev, "failed to set issi\n");
goto err_disable_hca;
}
err = mlx5_satisfy_startup_pages(dev, 1);
if (err) {
- dev_err(&pdev->dev, "failed to allocate boot pages\n");
+ mlx5_core_err(dev, "failed to allocate boot pages\n");
goto err_disable_hca;
}
err = set_hca_ctrl(dev);
if (err) {
- dev_err(&pdev->dev, "set_hca_ctrl failed\n");
+ mlx5_core_err(dev, "set_hca_ctrl failed\n");
goto reclaim_boot_pages;
}
err = set_hca_cap(dev);
if (err) {
- dev_err(&pdev->dev, "set_hca_cap failed\n");
+ mlx5_core_err(dev, "set_hca_cap failed\n");
goto reclaim_boot_pages;
}
err = mlx5_satisfy_startup_pages(dev, 0);
if (err) {
- dev_err(&pdev->dev, "failed to allocate init pages\n");
+ mlx5_core_err(dev, "failed to allocate init pages\n");
goto reclaim_boot_pages;
}
err = mlx5_cmd_init_hca(dev, sw_owner_id);
if (err) {
- dev_err(&pdev->dev, "init hca failed\n");
+ mlx5_core_err(dev, "init hca failed\n");
goto reclaim_boot_pages;
}
@@ -1008,23 +979,50 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
err = mlx5_query_hca_caps(dev);
if (err) {
- dev_err(&pdev->dev, "query hca failed\n");
- goto err_stop_poll;
+ mlx5_core_err(dev, "query hca failed\n");
+ goto stop_health;
}
- if (boot) {
- err = mlx5_init_once(dev, priv);
- if (err) {
- dev_err(&pdev->dev, "sw objs init failed\n");
- goto err_stop_poll;
- }
+ return 0;
+
+stop_health:
+ mlx5_stop_health_poll(dev, boot);
+reclaim_boot_pages:
+ mlx5_reclaim_startup_pages(dev);
+err_disable_hca:
+ mlx5_core_disable_hca(dev, 0);
+err_cmd_cleanup:
+ mlx5_cmd_cleanup(dev);
+
+ return err;
+}
+
+static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
+{
+ int err;
+
+ mlx5_stop_health_poll(dev, boot);
+ err = mlx5_cmd_teardown_hca(dev);
+ if (err) {
+ mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
+ return err;
}
+ mlx5_reclaim_startup_pages(dev);
+ mlx5_core_disable_hca(dev, 0);
+ mlx5_cmd_cleanup(dev);
+
+ return 0;
+}
+
+static int mlx5_load(struct mlx5_core_dev *dev)
+{
+ int err;
dev->priv.uar = mlx5_get_uars_page(dev);
if (IS_ERR(dev->priv.uar)) {
- dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
+ mlx5_core_err(dev, "Failed allocating uar, aborting\n");
err = PTR_ERR(dev->priv.uar);
- goto err_get_uars;
+ return err;
}
mlx5_events_start(dev);
@@ -1032,132 +1030,155 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
err = mlx5_eq_table_create(dev);
if (err) {
- dev_err(&pdev->dev, "Failed to create EQs\n");
+ mlx5_core_err(dev, "Failed to create EQs\n");
goto err_eq_table;
}
err = mlx5_fw_tracer_init(dev->tracer);
if (err) {
- dev_err(&pdev->dev, "Failed to init FW tracer\n");
+ mlx5_core_err(dev, "Failed to init FW tracer\n");
goto err_fw_tracer;
}
err = mlx5_fpga_device_start(dev);
if (err) {
- dev_err(&pdev->dev, "fpga device start failed %d\n", err);
+ mlx5_core_err(dev, "fpga device start failed %d\n", err);
goto err_fpga_start;
}
err = mlx5_accel_ipsec_init(dev);
if (err) {
- dev_err(&pdev->dev, "IPSec device start failed %d\n", err);
+ mlx5_core_err(dev, "IPSec device start failed %d\n", err);
goto err_ipsec_start;
}
err = mlx5_accel_tls_init(dev);
if (err) {
- dev_err(&pdev->dev, "TLS device start failed %d\n", err);
+ mlx5_core_err(dev, "TLS device start failed %d\n", err);
goto err_tls_start;
}
err = mlx5_init_fs(dev);
if (err) {
- dev_err(&pdev->dev, "Failed to init flow steering\n");
+ mlx5_core_err(dev, "Failed to init flow steering\n");
goto err_fs;
}
err = mlx5_core_set_hca_defaults(dev);
if (err) {
- dev_err(&pdev->dev, "Failed to set hca defaults\n");
+ mlx5_core_err(dev, "Failed to set hca defaults\n");
goto err_fs;
}
err = mlx5_sriov_attach(dev);
if (err) {
- dev_err(&pdev->dev, "sriov init failed %d\n", err);
+ mlx5_core_err(dev, "sriov init failed %d\n", err);
goto err_sriov;
}
err = mlx5_ec_init(dev);
if (err) {
- dev_err(&pdev->dev, "Failed to init embedded CPU\n");
+ mlx5_core_err(dev, "Failed to init embedded CPU\n");
goto err_ec;
}
- if (mlx5_device_registered(dev)) {
- mlx5_attach_device(dev);
- } else {
- err = mlx5_register_device(dev);
- if (err) {
- dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
- goto err_reg_dev;
- }
- }
-
- set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
-out:
- mutex_unlock(&dev->intf_state_mutex);
-
return 0;
-err_reg_dev:
- mlx5_ec_cleanup(dev);
-
err_ec:
mlx5_sriov_detach(dev);
-
err_sriov:
mlx5_cleanup_fs(dev);
-
err_fs:
mlx5_accel_tls_cleanup(dev);
-
err_tls_start:
mlx5_accel_ipsec_cleanup(dev);
-
err_ipsec_start:
mlx5_fpga_device_stop(dev);
-
err_fpga_start:
mlx5_fw_tracer_cleanup(dev->tracer);
-
err_fw_tracer:
mlx5_eq_table_destroy(dev);
-
err_eq_table:
mlx5_pagealloc_stop(dev);
mlx5_events_stop(dev);
- mlx5_put_uars_page(dev, priv->uar);
+ mlx5_put_uars_page(dev, dev->priv.uar);
+ return err;
+}
+
+static void mlx5_unload(struct mlx5_core_dev *dev)
+{
+ mlx5_ec_cleanup(dev);
+ mlx5_sriov_detach(dev);
+ mlx5_cleanup_fs(dev);
+ mlx5_accel_ipsec_cleanup(dev);
+ mlx5_accel_tls_cleanup(dev);
+ mlx5_fpga_device_stop(dev);
+ mlx5_fw_tracer_cleanup(dev->tracer);
+ mlx5_eq_table_destroy(dev);
+ mlx5_pagealloc_stop(dev);
+ mlx5_events_stop(dev);
+ mlx5_put_uars_page(dev, dev->priv.uar);
+}
-err_get_uars:
- if (boot)
- mlx5_cleanup_once(dev);
+static int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
+{
+ int err = 0;
-err_stop_poll:
- mlx5_stop_health_poll(dev, boot);
- if (mlx5_cmd_teardown_hca(dev)) {
- dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
- goto out_err;
+ dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
+ mutex_lock(&dev->intf_state_mutex);
+ if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
+ mlx5_core_warn(dev, "interface is up, NOP\n");
+ goto out;
}
+ /* remove any previous indication of internal error */
+ dev->state = MLX5_DEVICE_STATE_UP;
-reclaim_boot_pages:
- mlx5_reclaim_startup_pages(dev);
+ err = mlx5_function_setup(dev, boot);
+ if (err)
+ goto out;
-err_disable_hca:
- mlx5_core_disable_hca(dev, 0);
+ if (boot) {
+ err = mlx5_init_once(dev);
+ if (err) {
+ mlx5_core_err(dev, "sw objs init failed\n");
+ goto function_teardown;
+ }
+ }
-err_cmd_cleanup:
- mlx5_cmd_cleanup(dev);
+ err = mlx5_load(dev);
+ if (err)
+ goto err_load;
-out_err:
+ if (mlx5_device_registered(dev)) {
+ mlx5_attach_device(dev);
+ } else {
+ err = mlx5_register_device(dev);
+ if (err) {
+ mlx5_core_err(dev, "register device failed %d\n", err);
+ goto err_reg_dev;
+ }
+ }
+
+ set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
+out:
+ mutex_unlock(&dev->intf_state_mutex);
+
+ return err;
+
+err_reg_dev:
+ mlx5_unload(dev);
+err_load:
+ if (boot)
+ mlx5_cleanup_once(dev);
+function_teardown:
+ mlx5_function_teardown(dev, boot);
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
mutex_unlock(&dev->intf_state_mutex);
return err;
}
-static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
- bool cleanup)
+static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
{
int err = 0;
@@ -1166,8 +1187,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mutex_lock(&dev->intf_state_mutex);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
- dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
- __func__);
+ mlx5_core_warn(dev, "%s: interface is down, NOP\n",
+ __func__);
if (cleanup)
mlx5_cleanup_once(dev);
goto out;
@@ -1178,30 +1199,12 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
if (mlx5_device_registered(dev))
mlx5_detach_device(dev);
- mlx5_ec_cleanup(dev);
- mlx5_sriov_detach(dev);
- mlx5_cleanup_fs(dev);
- mlx5_accel_ipsec_cleanup(dev);
- mlx5_accel_tls_cleanup(dev);
- mlx5_fpga_device_stop(dev);
- mlx5_fw_tracer_cleanup(dev->tracer);
- mlx5_eq_table_destroy(dev);
- mlx5_pagealloc_stop(dev);
- mlx5_events_stop(dev);
- mlx5_put_uars_page(dev, priv->uar);
+ mlx5_unload(dev);
+
if (cleanup)
mlx5_cleanup_once(dev);
- mlx5_stop_health_poll(dev, cleanup);
-
- err = mlx5_cmd_teardown_hca(dev);
- if (err) {
- dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
- goto out;
- }
- mlx5_reclaim_startup_pages(dev);
- mlx5_core_disable_hca(dev, 0);
- mlx5_cmd_cleanup(dev);
+ mlx5_function_teardown(dev, cleanup);
out:
mutex_unlock(&dev->intf_state_mutex);
return err;
@@ -1218,29 +1221,12 @@ static const struct devlink_ops mlx5_devlink_ops = {
#endif
};
-#define MLX5_IB_MOD "mlx5_ib"
-static int init_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
{
- struct mlx5_core_dev *dev;
- struct devlink *devlink;
- struct mlx5_priv *priv;
+ struct mlx5_priv *priv = &dev->priv;
int err;
- devlink = devlink_alloc(&mlx5_devlink_ops, sizeof(*dev));
- if (!devlink) {
- dev_err(&pdev->dev, "kzalloc failed\n");
- return -ENOMEM;
- }
-
- dev = devlink_priv(devlink);
- priv = &dev->priv;
- priv->pci_dev_data = id->driver_data;
-
- pci_set_drvdata(pdev, dev);
-
- dev->pdev = pdev;
- dev->profile = &profile[prof_sel];
+ dev->profile = &profile[profile_idx];
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
@@ -1252,25 +1238,75 @@ static int init_one(struct pci_dev *pdev,
INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
INIT_LIST_HEAD(&priv->bfregs.wc_head.list);
- err = mlx5_pci_init(dev, priv);
- if (err) {
- dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err);
- goto clean_dev;
+ mutex_init(&priv->alloc_mutex);
+ mutex_init(&priv->pgdir_mutex);
+ INIT_LIST_HEAD(&priv->pgdir_list);
+ spin_lock_init(&priv->mkey_lock);
+
+ priv->dbg_root = debugfs_create_dir(dev_name(dev->device),
+ mlx5_debugfs_root);
+ if (!priv->dbg_root) {
+ dev_err(dev->device, "mlx5_core: error, Cannot create debugfs dir, aborting\n");
+ return -ENOMEM;
}
err = mlx5_health_init(dev);
- if (err) {
- dev_err(&pdev->dev, "mlx5_health_init failed with error code %d\n", err);
- goto close_pci;
- }
+ if (err)
+ goto err_health_init;
err = mlx5_pagealloc_init(dev);
if (err)
goto err_pagealloc_init;
- err = mlx5_load_one(dev, priv, true);
+ return 0;
+
+err_pagealloc_init:
+ mlx5_health_cleanup(dev);
+err_health_init:
+ debugfs_remove(dev->priv.dbg_root);
+
+ return err;
+}
+
+static void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
+{
+ mlx5_pagealloc_cleanup(dev);
+ mlx5_health_cleanup(dev);
+ debugfs_remove_recursive(dev->priv.dbg_root);
+}
+
+#define MLX5_IB_MOD "mlx5_ib"
+static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct mlx5_core_dev *dev;
+ struct devlink *devlink;
+ int err;
+
+ devlink = devlink_alloc(&mlx5_devlink_ops, sizeof(*dev));
+ if (!devlink) {
+ dev_err(&pdev->dev, "kzalloc failed\n");
+ return -ENOMEM;
+ }
+
+ dev = devlink_priv(devlink);
+ dev->device = &pdev->dev;
+ dev->pdev = pdev;
+
+ err = mlx5_mdev_init(dev, prof_sel);
+ if (err)
+ goto mdev_init_err;
+
+ err = mlx5_pci_init(dev, pdev, id);
+ if (err) {
+ mlx5_core_err(dev, "mlx5_pci_init failed with error code %d\n",
+ err);
+ goto pci_init_err;
+ }
+
+ err = mlx5_load_one(dev, true);
if (err) {
- dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err);
+ mlx5_core_err(dev, "mlx5_load_one failed with error code %d\n",
+ err);
goto err_load_one;
}
@@ -1284,14 +1320,13 @@ static int init_one(struct pci_dev *pdev,
return 0;
clean_load:
- mlx5_unload_one(dev, priv, true);
+ mlx5_unload_one(dev, true);
+
err_load_one:
- mlx5_pagealloc_cleanup(dev);
-err_pagealloc_init:
- mlx5_health_cleanup(dev);
-close_pci:
- mlx5_pci_close(dev, priv);
-clean_dev:
+ mlx5_pci_close(dev);
+pci_init_err:
+ mlx5_mdev_uninit(dev);
+mdev_init_err:
devlink_free(devlink);
return err;
@@ -1301,20 +1336,18 @@ static void remove_one(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_priv *priv = &dev->priv;
devlink_unregister(devlink);
mlx5_unregister_device(dev);
- if (mlx5_unload_one(dev, priv, true)) {
- dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
- mlx5_health_cleanup(dev);
+ if (mlx5_unload_one(dev, true)) {
+ mlx5_core_err(dev, "mlx5_unload_one failed\n");
+ mlx5_health_flush(dev);
return;
}
- mlx5_pagealloc_cleanup(dev);
- mlx5_health_cleanup(dev);
- mlx5_pci_close(dev, priv);
+ mlx5_pci_close(dev);
+ mlx5_mdev_uninit(dev);
devlink_free(devlink);
}
@@ -1322,12 +1355,11 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- struct mlx5_priv *priv = &dev->priv;
- dev_info(&pdev->dev, "%s was called\n", __func__);
+ mlx5_core_info(dev, "%s was called\n", __func__);
mlx5_enter_error_state(dev, false);
- mlx5_unload_one(dev, priv, false);
+ mlx5_unload_one(dev, false);
/* In case of kernel call drain the health wq */
if (state) {
mlx5_drain_health_wq(dev);
@@ -1354,7 +1386,9 @@ static int wait_vital(struct pci_dev *pdev)
count = ioread32be(health->health_counter);
if (count && count != 0xffffffff) {
if (last_count && last_count != count) {
- dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
+ mlx5_core_info(dev,
+ "wait vital counter value 0x%x after %d iterations\n",
+ count, i);
return 0;
}
last_count = count;
@@ -1370,12 +1404,12 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
int err;
- dev_info(&pdev->dev, "%s was called\n", __func__);
+ mlx5_core_info(dev, "%s was called\n", __func__);
err = mlx5_pci_enable_device(dev);
if (err) {
- dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
- , __func__, err);
+ mlx5_core_err(dev, "%s: mlx5_pci_enable_device failed with error code: %d\n",
+ __func__, err);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -1384,7 +1418,7 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
pci_save_state(pdev);
if (wait_vital(pdev)) {
- dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
+ mlx5_core_err(dev, "%s: wait_vital timed out\n", __func__);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -1394,17 +1428,16 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
static void mlx5_pci_resume(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- struct mlx5_priv *priv = &dev->priv;
int err;
- dev_info(&pdev->dev, "%s was called\n", __func__);
+ mlx5_core_info(dev, "%s was called\n", __func__);
- err = mlx5_load_one(dev, priv, false);
+ err = mlx5_load_one(dev, false);
if (err)
- dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
- , __func__, err);
+ mlx5_core_err(dev, "%s: mlx5_load_one failed with error code: %d\n",
+ __func__, err);
else
- dev_info(&pdev->dev, "%s: device recovered\n", __func__);
+ mlx5_core_info(dev, "%s: device recovered\n", __func__);
}
static const struct pci_error_handlers mlx5_err_handler = {
@@ -1466,13 +1499,12 @@ succeed:
static void shutdown(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- struct mlx5_priv *priv = &dev->priv;
int err;
- dev_info(&pdev->dev, "Shutdown was called\n");
+ mlx5_core_info(dev, "Shutdown was called\n");
err = mlx5_try_fast_unload(dev);
if (err)
- mlx5_unload_one(dev, priv, false);
+ mlx5_unload_one(dev, false);
mlx5_pci_disable_device(dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 7b331674622c..22e69d4813e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -41,6 +41,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/fs.h>
+#include <linux/mlx5/driver.h>
#define DRIVER_NAME "mlx5_core"
#define DRIVER_VERSION "5.0-0"
@@ -48,44 +49,57 @@
extern uint mlx5_core_debug_mask;
#define mlx5_core_dbg(__dev, format, ...) \
- dev_dbg(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
+ dev_dbg((__dev)->device, "%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
-#define mlx5_core_dbg_once(__dev, format, ...) \
- dev_dbg_once(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
- __func__, __LINE__, current->pid, \
+#define mlx5_core_dbg_once(__dev, format, ...) \
+ dev_dbg_once((__dev)->device, \
+ "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
##__VA_ARGS__)
-#define mlx5_core_dbg_mask(__dev, mask, format, ...) \
-do { \
- if ((mask) & mlx5_core_debug_mask) \
- mlx5_core_dbg(__dev, format, ##__VA_ARGS__); \
+#define mlx5_core_dbg_mask(__dev, mask, format, ...) \
+do { \
+ if ((mask) & mlx5_core_debug_mask) \
+ mlx5_core_dbg(__dev, format, ##__VA_ARGS__); \
} while (0)
-#define mlx5_core_err(__dev, format, ...) \
- dev_err(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
- __func__, __LINE__, current->pid, \
+#define mlx5_core_err(__dev, format, ...) \
+ dev_err((__dev)->device, "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
##__VA_ARGS__)
-#define mlx5_core_err_rl(__dev, format, ...) \
- dev_err_ratelimited(&(__dev)->pdev->dev, \
- "%s:%d:(pid %d): " format, \
- __func__, __LINE__, current->pid, \
- ##__VA_ARGS__)
+#define mlx5_core_err_rl(__dev, format, ...) \
+ dev_err_ratelimited((__dev)->device, \
+ "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
-#define mlx5_core_warn(__dev, format, ...) \
- dev_warn(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
- __func__, __LINE__, current->pid, \
- ##__VA_ARGS__)
+#define mlx5_core_warn(__dev, format, ...) \
+ dev_warn((__dev)->device, "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
#define mlx5_core_warn_once(__dev, format, ...) \
- dev_warn_once(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \
+ dev_warn_once((__dev)->device, "%s:%d:(pid %d): " format, \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
-#define mlx5_core_info(__dev, format, ...) \
- dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__)
+#define mlx5_core_warn_rl(__dev, format, ...) \
+ dev_warn_ratelimited((__dev)->device, \
+ "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
+
+#define mlx5_core_info(__dev, format, ...) \
+ dev_info((__dev)->device, format, ##__VA_ARGS__)
+
+#define mlx5_core_info_rl(__dev, format, ...) \
+ dev_info_ratelimited((__dev)->device, \
+ "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
enum {
MLX5_CMD_DATA, /* print command payload only */
@@ -111,7 +125,6 @@ void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
int mlx5_sriov_attach(struct mlx5_core_dev *dev);
void mlx5_sriov_detach(struct mlx5_core_dev *dev);
int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
-bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev);
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
@@ -176,6 +189,11 @@ int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw);
void mlx5e_init(void);
void mlx5e_cleanup(void);
+static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
+{
+ return pci_num_vf(dev->pdev) ? true : false;
+}
+
static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
{
/* LACP owner conditions:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 41025387ff2c..91bd258ecf1b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -200,7 +200,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr)
rb_erase(&fwp->rb_node, &dev->priv.page_root);
if (fwp->free_count != 1)
list_del(&fwp->list);
- dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK,
+ dma_unmap_page(dev->device, addr & MLX5_U64_4K_PAGE_MASK,
PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(fwp->page);
kfree(fwp);
@@ -211,11 +211,12 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr)
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
{
+ struct device *device = dev->device;
+ int nid = dev_to_node(device);
struct page *page;
u64 zero_addr = 1;
u64 addr;
int err;
- int nid = dev_to_node(&dev->pdev->dev);
page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
if (!page) {
@@ -223,9 +224,8 @@ static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
return -ENOMEM;
}
map:
- addr = dma_map_page(&dev->pdev->dev, page, 0,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&dev->pdev->dev, addr)) {
+ addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(device, addr)) {
mlx5_core_warn(dev, "failed dma mapping page\n");
err = -ENOMEM;
goto err_mapping;
@@ -240,8 +240,7 @@ map:
err = insert_page(dev, addr, page, func_id);
if (err) {
mlx5_core_err(dev, "failed to track allocated page\n");
- dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE,
- DMA_BIDIRECTIONAL);
+ dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
}
err_mapping:
@@ -249,7 +248,7 @@ err_mapping:
__free_page(page);
if (zero_addr == 0)
- dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE,
+ dma_unmap_page(device, zero_addr, PAGE_SIZE,
DMA_BIDIRECTIONAL);
return err;
@@ -600,8 +599,7 @@ int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
return 0;
}
- mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_pages,
- dev->priv.name);
+ mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages);
while (*pages) {
if (time_after(jiffies, end)) {
mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
@@ -614,6 +612,6 @@ int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
msleep(50);
}
- mlx5_core_dbg(dev, "All pages received from %s\n", dev->priv.name);
+ mlx5_core_dbg(dev, "All pages received\n");
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 361468e0435d..cc262b30aed5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -293,15 +293,36 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
return 0;
}
+static int mlx5_eeprom_page(int offset)
+{
+ if (offset < MLX5_EEPROM_PAGE_LENGTH)
+ /* Addresses between 0-255 - page 00 */
+ return 0;
+
+ /* Addresses between 256 - 639 belongs to pages 01, 02 and 03
+ * For example, offset = 400 belongs to page 02:
+ * 1 + ((400 - 256)/128) = 2
+ */
+ return 1 + ((offset - MLX5_EEPROM_PAGE_LENGTH) /
+ MLX5_EEPROM_HIGH_PAGE_LENGTH);
+}
+
+static int mlx5_eeprom_high_page_offset(int page_num)
+{
+ if (!page_num) /* Page 0 always start from low page */
+ return 0;
+
+ /* High page */
+ return page_num * MLX5_EEPROM_HIGH_PAGE_LENGTH;
+}
+
int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
u16 offset, u16 size, u8 *data)
{
+ int module_num, page_num, status, err;
u32 out[MLX5_ST_SZ_DW(mcia_reg)];
u32 in[MLX5_ST_SZ_DW(mcia_reg)];
- int module_num;
u16 i2c_addr;
- int status;
- int err;
void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
err = mlx5_query_module_num(dev, &module_num);
@@ -311,8 +332,15 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
memset(in, 0, sizeof(in));
size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
- if (offset < MLX5_EEPROM_PAGE_LENGTH &&
- offset + size > MLX5_EEPROM_PAGE_LENGTH)
+ /* Get the page number related to the given offset */
+ page_num = mlx5_eeprom_page(offset);
+
+ /* Set the right offset according to the page number,
+ * For page_num > 0, relative offset is always >= 128 (high page).
+ */
+ offset -= mlx5_eeprom_high_page_offset(page_num);
+
+ if (offset + size > MLX5_EEPROM_PAGE_LENGTH)
/* Cross pages read, read until offset 256 in low page */
size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
@@ -321,7 +349,7 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
MLX5_SET(mcia_reg, in, l, 0);
MLX5_SET(mcia_reg, in, module, module_num);
MLX5_SET(mcia_reg, in, i2c_device_address, i2c_addr);
- MLX5_SET(mcia_reg, in, page_number, 0);
+ MLX5_SET(mcia_reg, in, page_number, page_num);
MLX5_SET(mcia_reg, in, device_address, offset);
MLX5_SET(mcia_reg, in, size, size);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
new file mode 100644
index 000000000000..86f77456f873
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies */
+
+#include <linux/mlx5/vport.h>
+#include <rdma/ib_verbs.h>
+#include <net/addrconf.h>
+
+#include "lib/mlx5.h"
+#include "eswitch.h"
+#include "fs_core.h"
+#include "rdma.h"
+
+static void mlx5_rdma_disable_roce_steering(struct mlx5_core_dev *dev)
+{
+ struct mlx5_core_roce *roce = &dev->priv.roce;
+
+ if (!roce->ft)
+ return;
+
+ mlx5_del_flow_rules(roce->allow_rule);
+ mlx5_destroy_flow_group(roce->fg);
+ mlx5_destroy_flow_table(roce->ft);
+}
+
+static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_core_roce *roce = &dev->priv.roce;
+ struct mlx5_flow_handle *flow_rule = NULL;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns = NULL;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ void *match_criteria;
+ u32 *flow_group_in;
+ void *misc;
+ int err;
+
+ if (!(MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
+ MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)))
+ return -EOPNOTSUPP;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ kvfree(flow_group_in);
+ return -ENOMEM;
+ }
+
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_RDMA_RX);
+ if (!ns) {
+ mlx5_core_err(dev, "Failed to get RDMA RX namespace");
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+
+ ft_attr.max_fte = 1;
+ ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ mlx5_core_err(dev, "Failed to create RDMA RX flow table");
+ err = PTR_ERR(ft);
+ goto free;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ misc_parameters.source_port);
+
+ fg = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(fg)) {
+ err = PTR_ERR(fg);
+ mlx5_core_err(dev, "Failed to create RDMA RX flow group err(%d)\n", err);
+ goto destroy_flow_table;
+ }
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+ MLX5_SET(fte_match_set_misc, misc, source_port,
+ dev->priv.eswitch->manager_vport);
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, NULL, 0);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ mlx5_core_err(dev, "Failed to add RoCE allow rule, err=%d\n",
+ err);
+ goto destroy_flow_group;
+ }
+
+ kvfree(spec);
+ kvfree(flow_group_in);
+ roce->ft = ft;
+ roce->fg = fg;
+ roce->allow_rule = flow_rule;
+
+ return 0;
+
+destroy_flow_table:
+ mlx5_destroy_flow_table(ft);
+destroy_flow_group:
+ mlx5_destroy_flow_group(fg);
+free:
+ kvfree(spec);
+ kvfree(flow_group_in);
+ return err;
+}
+
+static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev)
+{
+ mlx5_core_roce_gid_set(dev, 0, 0, 0,
+ NULL, NULL, false, 0, 0);
+}
+
+static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid)
+{
+ u8 hw_id[ETH_ALEN];
+
+ mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
+ gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+ addrconf_addr_eui48(&gid->raw[8], hw_id);
+}
+
+static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
+{
+ union ib_gid gid;
+ u8 mac[ETH_ALEN];
+
+ mlx5_rdma_make_default_gid(dev, &gid);
+ return mlx5_core_roce_gid_set(dev, 0,
+ MLX5_ROCE_VERSION_1,
+ 0, gid.raw, mac,
+ false, 0, 1);
+}
+
+void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev)
+{
+ mlx5_rdma_disable_roce_steering(dev);
+ mlx5_rdma_del_roce_addr(dev);
+ mlx5_nic_vport_disable_roce(dev);
+}
+
+void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
+{
+ int err;
+
+ err = mlx5_nic_vport_enable_roce(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
+ return;
+ }
+
+ err = mlx5_rdma_add_roce_addr(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to add RoCE address: %d\n", err);
+ goto disable_roce;
+ }
+
+ err = mlx5_rdma_enable_roce_steering(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to enable RoCE steering: %d\n", err);
+ goto del_roce_addr;
+ }
+
+ return;
+
+del_roce_addr:
+ mlx5_rdma_del_roce_addr(dev);
+disable_roce:
+ mlx5_nic_vport_disable_roce(dev);
+ return;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.h b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
new file mode 100644
index 000000000000..750cff2a71a4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#ifndef __MLX5_RDMA_H__
+#define __MLX5_RDMA_H__
+
+#include "mlx5_core.h"
+
+#ifdef CONFIG_MLX5_ESWITCH
+
+void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev);
+void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev);
+
+#else /* CONFIG_MLX5_ESWITCH */
+
+static inline void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev) {}
+static inline void mlx5_rdma_disable_roce(struct mlx5_core_dev *dev) {}
+
+#endif /* CONFIG_MLX5_ESWITCH */
+#endif /* __MLX5_RDMA_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 7b23fa8d2d60..a249b3c3843d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -36,13 +36,6 @@
#include "mlx5_core.h"
#include "eswitch.h"
-bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
-{
- struct mlx5_core_sriov *sriov = &dev->priv.sriov;
-
- return !!sriov->num_vfs;
-}
-
static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
@@ -151,33 +144,10 @@ out:
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
}
-static int mlx5_pci_enable_sriov(struct pci_dev *pdev, int num_vfs)
-{
- struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- int err = 0;
-
- if (pci_num_vf(pdev)) {
- mlx5_core_warn(dev, "Unable to enable pci sriov, already enabled\n");
- return -EBUSY;
- }
-
- err = pci_enable_sriov(pdev, num_vfs);
- if (err)
- mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
-
- return err;
-}
-
-static void mlx5_pci_disable_sriov(struct pci_dev *pdev)
-{
- pci_disable_sriov(pdev);
-}
-
static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- int err = 0;
+ int err;
err = mlx5_device_enable_sriov(dev, num_vfs);
if (err) {
@@ -185,42 +155,37 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
return err;
}
- err = mlx5_pci_enable_sriov(pdev, num_vfs);
+ err = pci_enable_sriov(pdev, num_vfs);
if (err) {
- mlx5_core_warn(dev, "mlx5_pci_enable_sriov failed : %d\n", err);
+ mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
mlx5_device_disable_sriov(dev);
- return err;
}
-
- sriov->num_vfs = num_vfs;
-
- return 0;
+ return err;
}
static void mlx5_sriov_disable(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- mlx5_pci_disable_sriov(pdev);
+ pci_disable_sriov(pdev);
mlx5_device_disable_sriov(dev);
- sriov->num_vfs = 0;
}
int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct mlx5_core_sriov *sriov = &dev->priv.sriov;
int err = 0;
mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs);
- if (!mlx5_core_is_pf(dev))
- return -EPERM;
if (num_vfs)
err = mlx5_sriov_enable(pdev, num_vfs);
else
mlx5_sriov_disable(pdev);
+ if (!err)
+ sriov->num_vfs = num_vfs;
return err ? err : num_vfs;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index c4d4b76096dc..b1068500f1df 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -182,16 +182,24 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_core_query_sq_state);
+int mlx5_core_create_tir_out(struct mlx5_core_dev *dev,
+ u32 *in, int inlen,
+ u32 *out, int outlen)
+{
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+
+ return mlx5_cmd_exec(dev, in, inlen, out, outlen);
+}
+EXPORT_SYMBOL(mlx5_core_create_tir_out);
+
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn)
{
- u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {};
int err;
- MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
-
- memset(out, 0, sizeof(out));
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ err = mlx5_core_create_tir_out(dev, in, inlen,
+ out, sizeof(out));
if (!err)
*tirn = MLX5_GET(create_tir_out, out, tirn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 94464723ff77..0d006224d7b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -79,7 +79,7 @@ static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index)
else
system_page_index = index;
- return (pci_resource_start(mdev->pdev, 0) >> PAGE_SHIFT) + system_page_index;
+ return (mdev->bar_addr >> PAGE_SHIFT) + system_page_index;
}
static void up_rel_func(struct kref *kref)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index ef95feca9961..95cdc8cbcba4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -371,67 +371,6 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
-int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
- u16 vport,
- u16 vlans[],
- int *size)
-{
- u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
- void *nic_vport_ctx;
- int req_list_size;
- int max_list_size;
- int out_sz;
- void *out;
- int err;
- int i;
-
- req_list_size = *size;
- max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
- if (req_list_size > max_list_size) {
- mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
- req_list_size, max_list_size);
- req_list_size = max_list_size;
- }
-
- out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
- req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
-
- memset(in, 0, sizeof(in));
- out = kzalloc(out_sz, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
-
- MLX5_SET(query_nic_vport_context_in, in, opcode,
- MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
- MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
- MLX5_NVPRT_LIST_TYPE_VLAN);
- MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
-
- if (vport)
- MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
-
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
- if (err)
- goto out;
-
- nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
- nic_vport_context);
- req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
- allowed_list_size);
-
- *size = req_list_size;
- for (i = 0; i < req_list_size; i++) {
- void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
- nic_vport_ctx,
- current_uc_mac_address[i]);
- vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
- }
-out:
- kfree(out);
- return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
-
int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
u16 vlans[],
int list_size)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index ea934a48c90a..1f87cce421e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -134,6 +134,11 @@ static inline void mlx5_wq_cyc_update_db_record(struct mlx5_wq_cyc *wq)
*wq->db = cpu_to_be32(wq->wqe_ctr);
}
+static inline u16 mlx5_wq_cyc_get_ctr_wrap_cnt(struct mlx5_wq_cyc *wq, u16 ctr)
+{
+ return ctr >> wq->fbc.log_sz;
+}
+
static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
{
return ctr & wq->fbc.sz_m1;
@@ -243,6 +248,13 @@ static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
}
+static inline u16 mlx5_wq_ll_get_wqe_next_ix(struct mlx5_wq_ll *wq, u16 ix)
+{
+ struct mlx5_wqe_srq_next_seg *wqe = mlx5_wq_ll_get_wqe(wq, ix);
+
+ return be16_to_cpu(wqe->next_wqe_index);
+}
+
static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next)
{
wq->head = head_next;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 9c195dfed031..b6b3ff0fe17f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -4,6 +4,7 @@
config MLXSW_CORE
tristate "Mellanox Technologies Switch ASICs support"
+ select NET_DEVLINK
---help---
This driver supports Mellanox Technologies Switch ASICs family.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index a01d15546e37..c4dc72e1ce63 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -28,8 +28,8 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum1_mr_tcam.o spectrum2_mr_tcam.o \
spectrum_mr_tcam.o spectrum_mr.o \
spectrum_qdisc.o spectrum_span.o \
- spectrum_nve.o spectrum_nve_vxlan.o
+ spectrum_nve.o spectrum_nve_vxlan.o \
+ spectrum_dpipe.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
-mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
mlxsw_minimal-objs := minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f26a4ca29363..bcbe07ec22be 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -781,7 +781,8 @@ mlxsw_devlink_sb_pool_get(struct devlink *devlink,
static int
mlxsw_devlink_sb_pool_set(struct devlink *devlink,
unsigned int sb_index, u16 pool_index, u32 size,
- enum devlink_sb_threshold_type threshold_type)
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
@@ -789,7 +790,8 @@ mlxsw_devlink_sb_pool_set(struct devlink *devlink,
if (!mlxsw_driver->sb_pool_set)
return -EOPNOTSUPP;
return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
- pool_index, size, threshold_type);
+ pool_index, size, threshold_type,
+ extack);
}
static void *__dl_port(struct devlink_port *devlink_port)
@@ -829,7 +831,8 @@ static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
unsigned int sb_index, u16 pool_index,
- u32 threshold)
+ u32 threshold,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
@@ -839,7 +842,7 @@ static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
!mlxsw_core_port_check(mlxsw_core_port))
return -EOPNOTSUPP;
return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
- pool_index, threshold);
+ pool_index, threshold, extack);
}
static int
@@ -864,7 +867,8 @@ static int
mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
- u16 pool_index, u32 threshold)
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
@@ -875,7 +879,7 @@ mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
return -EOPNOTSUPP;
return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
tc_index, pool_type,
- pool_index, threshold);
+ pool_index, threshold, extack);
}
static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
@@ -934,6 +938,46 @@ mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
pool_type, p_cur, p_max);
}
+static int
+mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ char fw_info_psid[MLXSW_REG_MGIR_FW_INFO_PSID_SIZE];
+ u32 hw_rev, fw_major, fw_minor, fw_sub_minor;
+ char mgir_pl[MLXSW_REG_MGIR_LEN];
+ char buf[32];
+ int err;
+
+ err = devlink_info_driver_name_put(req,
+ mlxsw_core->bus_info->device_kind);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgir_pack(mgir_pl);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), mgir_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mgir_unpack(mgir_pl, &hw_rev, fw_info_psid, &fw_major,
+ &fw_minor, &fw_sub_minor);
+
+ sprintf(buf, "%X", hw_rev);
+ err = devlink_info_version_fixed_put(req, "hw.revision", buf);
+ if (err)
+ return err;
+
+ err = devlink_info_version_fixed_put(req, "fw.psid", fw_info_psid);
+ if (err)
+ return err;
+
+ sprintf(buf, "%d.%d.%d", fw_major, fw_minor, fw_sub_minor);
+ err = devlink_info_version_running_put(req, "fw.version", buf);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
@@ -968,6 +1012,7 @@ static const struct devlink_ops mlxsw_devlink_ops = {
.sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear,
.sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
+ .info_get = mlxsw_devlink_info_get,
};
static int
@@ -1718,7 +1763,11 @@ u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_res_get);
-int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port)
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
+ u32 port_number, bool split,
+ u32 split_port_subnumber,
+ const unsigned char *switch_id,
+ unsigned char switch_id_len)
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
struct mlxsw_core_port *mlxsw_core_port =
@@ -1727,6 +1776,9 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port)
int err;
mlxsw_core_port->local_port = local_port;
+ devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ port_number, split, split_port_subnumber,
+ switch_id, switch_id_len);
err = devlink_port_register(devlink, devlink_port, local_port);
if (err)
memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
@@ -1746,17 +1798,13 @@ void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
EXPORT_SYMBOL(mlxsw_core_port_fini);
void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
- void *port_driver_priv, struct net_device *dev,
- u32 port_number, bool split,
- u32 split_port_subnumber)
+ void *port_driver_priv, struct net_device *dev)
{
struct mlxsw_core_port *mlxsw_core_port =
&mlxsw_core->ports[local_port];
struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
mlxsw_core_port->port_driver_priv = port_driver_priv;
- devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
- port_number, split, split_port_subnumber);
devlink_port_type_eth_set(devlink_port, dev);
}
EXPORT_SYMBOL(mlxsw_core_port_eth_set);
@@ -1796,16 +1844,18 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_port_type_get);
-int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core,
- u8 local_port, char *name, size_t len)
+
+struct devlink_port *
+mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
+ u8 local_port)
{
struct mlxsw_core_port *mlxsw_core_port =
&mlxsw_core->ports[local_port];
struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
- return devlink_port_get_phys_port_name(devlink_port, name, len);
+ return devlink_port;
}
-EXPORT_SYMBOL(mlxsw_core_port_get_phys_port_name);
+EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
const char *buf, size_t size)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 8ec53f027575..917be621c904 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -164,20 +164,23 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
u16 lag_id, u8 local_port);
void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port);
-int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port);
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
+ u32 port_number, bool split,
+ u32 split_port_subnumber,
+ const unsigned char *switch_id,
+ unsigned char switch_id_len);
void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port);
void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
- void *port_driver_priv, struct net_device *dev,
- u32 port_number, bool split,
- u32 split_port_subnumber);
+ void *port_driver_priv, struct net_device *dev);
void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
void *port_driver_priv);
void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
void *port_driver_priv);
enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
u8 local_port);
-int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core,
- u8 local_port, char *name, size_t len);
+struct devlink_port *
+mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
+ u8 local_port);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
bool mlxsw_core_schedule_work(struct work_struct *work);
@@ -251,13 +254,14 @@ struct mlxsw_driver {
struct devlink_sb_pool_info *pool_info);
int (*sb_pool_set)(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index, u32 size,
- enum devlink_sb_threshold_type threshold_type);
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack);
int (*sb_port_pool_get)(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold);
int (*sb_port_pool_set)(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 pool_index,
- u32 threshold);
+ u32 threshold, struct netlink_ext_ack *extack);
int (*sb_tc_pool_bind_get)(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
@@ -265,7 +269,8 @@ struct mlxsw_driver {
int (*sb_tc_pool_bind_set)(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
- u16 pool_index, u32 threshold);
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack);
int (*sb_occ_snapshot)(struct mlxsw_core *mlxsw_core,
unsigned int sb_index);
int (*sb_occ_max_clear)(struct mlxsw_core *mlxsw_core,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 00c390024350..cf2114273b72 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -51,33 +51,20 @@ static int mlxsw_m_port_dummy_open_stop(struct net_device *dev)
return 0;
}
-static int
-mlxsw_m_port_get_phys_port_name(struct net_device *dev, char *name, size_t len)
-{
- struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
- struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- u8 local_port = mlxsw_m_port->local_port;
-
- return mlxsw_core_port_get_phys_port_name(core, local_port, name, len);
-}
-
-static int mlxsw_m_port_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
+static struct devlink_port *
+mlxsw_m_port_get_devlink_port(struct net_device *dev)
{
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
- ppid->id_len = sizeof(mlxsw_m->base_mac);
- memcpy(&ppid->id, &mlxsw_m->base_mac, ppid->id_len);
-
- return 0;
+ return mlxsw_core_port_devlink_port_get(mlxsw_m->core,
+ mlxsw_m_port->local_port);
}
static const struct net_device_ops mlxsw_m_port_netdev_ops = {
.ndo_open = mlxsw_m_port_dummy_open_stop,
.ndo_stop = mlxsw_m_port_dummy_open_stop,
- .ndo_get_phys_port_name = mlxsw_m_port_get_phys_port_name,
- .ndo_get_port_parent_id = mlxsw_m_port_get_port_parent_id,
+ .ndo_get_devlink_port = mlxsw_m_port_get_devlink_port,
};
static int mlxsw_m_get_module_info(struct net_device *netdev,
@@ -150,7 +137,10 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module)
struct net_device *dev;
int err;
- err = mlxsw_core_port_init(mlxsw_m->core, local_port);
+ err = mlxsw_core_port_init(mlxsw_m->core, local_port,
+ module + 1, false, 0,
+ mlxsw_m->base_mac,
+ sizeof(mlxsw_m->base_mac));
if (err) {
dev_err(mlxsw_m->bus_info->dev, "Port %d: Failed to init core port\n",
local_port);
@@ -190,7 +180,7 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module)
}
mlxsw_core_port_eth_set(mlxsw_m->core, mlxsw_m_port->local_port,
- mlxsw_m_port, dev, module + 1, false, 0);
+ mlxsw_m_port, dev);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index eb4c5e8964cd..e8002bfc1e8f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -5210,6 +5210,42 @@ static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
mlxsw_reg_pspa_sub_port_set(payload, 0);
}
+/* PPLR - Port Physical Loopback Register
+ * --------------------------------------
+ * This register allows configuration of the port's loopback mode.
+ */
+#define MLXSW_REG_PPLR_ID 0x5018
+#define MLXSW_REG_PPLR_LEN 0x8
+
+MLXSW_REG_DEFINE(pplr, MLXSW_REG_PPLR_ID, MLXSW_REG_PPLR_LEN);
+
+/* reg_pplr_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pplr, local_port, 0x00, 16, 8);
+
+/* Phy local loopback. When set the port's egress traffic is looped back
+ * to the receiver and the port transmitter is disabled.
+ */
+#define MLXSW_REG_PPLR_LB_TYPE_BIT_PHY_LOCAL BIT(1)
+
+/* reg_pplr_lb_en
+ * Loopback enable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pplr, lb_en, 0x04, 0, 8);
+
+static inline void mlxsw_reg_pplr_pack(char *payload, u8 local_port,
+ bool phy_local)
+{
+ MLXSW_REG_ZERO(pplr, payload);
+ mlxsw_reg_pplr_local_port_set(payload, local_port);
+ mlxsw_reg_pplr_lb_en_set(payload,
+ phy_local ?
+ MLXSW_REG_PPLR_LB_TYPE_BIT_PHY_LOCAL : 0);
+}
+
/* HTGT - Host Trap Group Table
* ----------------------------
* Configures the properties for forwarding to CPU.
@@ -8534,6 +8570,60 @@ static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port,
mlxsw_reg_mpar_pa_id_set(payload, pa_id);
}
+/* MGIR - Management General Information Register
+ * ----------------------------------------------
+ * MGIR register allows software to query the hardware and firmware general
+ * information.
+ */
+#define MLXSW_REG_MGIR_ID 0x9020
+#define MLXSW_REG_MGIR_LEN 0x9C
+
+MLXSW_REG_DEFINE(mgir, MLXSW_REG_MGIR_ID, MLXSW_REG_MGIR_LEN);
+
+/* reg_mgir_hw_info_device_hw_revision
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgir, hw_info_device_hw_revision, 0x0, 16, 16);
+
+#define MLXSW_REG_MGIR_FW_INFO_PSID_SIZE 16
+
+/* reg_mgir_fw_info_psid
+ * PSID (ASCII string).
+ * Access: RO
+ */
+MLXSW_ITEM_BUF(reg, mgir, fw_info_psid, 0x30, MLXSW_REG_MGIR_FW_INFO_PSID_SIZE);
+
+/* reg_mgir_fw_info_extended_major
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgir, fw_info_extended_major, 0x44, 0, 32);
+
+/* reg_mgir_fw_info_extended_minor
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgir, fw_info_extended_minor, 0x48, 0, 32);
+
+/* reg_mgir_fw_info_extended_sub_minor
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgir, fw_info_extended_sub_minor, 0x4C, 0, 32);
+
+static inline void mlxsw_reg_mgir_pack(char *payload)
+{
+ MLXSW_REG_ZERO(mgir, payload);
+}
+
+static inline void
+mlxsw_reg_mgir_unpack(char *payload, u32 *hw_rev, char *fw_info_psid,
+ u32 *fw_major, u32 *fw_minor, u32 *fw_sub_minor)
+{
+ *hw_rev = mlxsw_reg_mgir_hw_info_device_hw_revision_get(payload);
+ mlxsw_reg_mgir_fw_info_psid_memcpy_from(payload, fw_info_psid);
+ *fw_major = mlxsw_reg_mgir_fw_info_extended_major_get(payload);
+ *fw_minor = mlxsw_reg_mgir_fw_info_extended_minor_get(payload);
+ *fw_sub_minor = mlxsw_reg_mgir_fw_info_extended_sub_minor_get(payload);
+}
+
/* MRSR - Management Reset and Shutdown Register
* ---------------------------------------------
* MRSR register is used to reset or shutdown the switch or
@@ -9927,6 +10017,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(pptb),
MLXSW_REG(pbmc),
MLXSW_REG(pspa),
+ MLXSW_REG(pplr),
MLXSW_REG(htgt),
MLXSW_REG(hpkt),
MLXSW_REG(rgcr),
@@ -9958,6 +10049,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mcia),
MLXSW_REG(mpat),
MLXSW_REG(mpar),
+ MLXSW_REG(mgir),
MLXSW_REG(mrsr),
MLXSW_REG(mlcr),
MLXSW_REG(mpsc),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index 773ef7fdb285..33a9fc9ef6a4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -24,6 +24,8 @@ enum mlxsw_res_id {
MLXSW_RES_ID_MAX_SYSTEM_PORT,
MLXSW_RES_ID_MAX_LAG,
MLXSW_RES_ID_MAX_LAG_MEMBERS,
+ MLXSW_RES_ID_LOCAL_PORTS_IN_1X,
+ MLXSW_RES_ID_LOCAL_PORTS_IN_2X,
MLXSW_RES_ID_MAX_BUFFER_SIZE,
MLXSW_RES_ID_CELL_SIZE,
MLXSW_RES_ID_MAX_HEADROOM_SIZE,
@@ -78,6 +80,8 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502,
[MLXSW_RES_ID_MAX_LAG] = 0x2520,
[MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
+ [MLXSW_RES_ID_LOCAL_PORTS_IN_1X] = 0x2610,
+ [MLXSW_RES_ID_LOCAL_PORTS_IN_2X] = 0x2611,
[MLXSW_RES_ID_MAX_BUFFER_SIZE] = 0x2802, /* Bytes */
[MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */
[MLXSW_RES_ID_MAX_HEADROOM_SIZE] = 0x2811, /* Bytes */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 6b8aa3761899..dbb425717f5e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -21,7 +21,7 @@
#include <linux/dcbnl.h>
#include <linux/inetdevice.h>
#include <linux/netlink.h>
-#include <linux/random.h>
+#include <linux/jhash.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
@@ -46,8 +46,8 @@
#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
#define MLXSW_SP1_FWREV_MAJOR 13
-#define MLXSW_SP1_FWREV_MINOR 1910
-#define MLXSW_SP1_FWREV_SUBMINOR 622
+#define MLXSW_SP1_FWREV_MINOR 2000
+#define MLXSW_SP1_FWREV_SUBMINOR 1122
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -1254,16 +1254,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
return 0;
}
-static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
- size_t len)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-
- return mlxsw_core_port_get_phys_port_name(mlxsw_sp_port->mlxsw_sp->core,
- mlxsw_sp_port->local_port,
- name, len);
-}
-
static struct mlxsw_sp_port_mall_tc_entry *
mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
unsigned long cookie) {
@@ -1279,21 +1269,19 @@ mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
static int
mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
- const struct tc_action *a,
+ const struct flow_action_entry *act,
bool ingress)
{
enum mlxsw_sp_span_type span_type;
- struct net_device *to_dev;
- to_dev = tcf_mirred_dev(a);
- if (!to_dev) {
+ if (!act->dev) {
netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
return -EINVAL;
}
mirror->ingress = ingress;
span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_dev, span_type,
+ return mlxsw_sp_span_mirror_add(mlxsw_sp_port, act->dev, span_type,
true, &mirror->span_id);
}
@@ -1312,7 +1300,7 @@ mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
static int
mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_cls_matchall_offload *cls,
- const struct tc_action *a,
+ const struct flow_action_entry *act,
bool ingress)
{
int err;
@@ -1323,18 +1311,18 @@ mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
netdev_err(mlxsw_sp_port->dev, "sample already active\n");
return -EEXIST;
}
- if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
+ if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
return -EOPNOTSUPP;
}
rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
- tcf_sample_psample_group(a));
- mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
- mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
- mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
+ act->sample.psample_group);
+ mlxsw_sp_port->sample->truncate = act->sample.truncate;
+ mlxsw_sp_port->sample->trunc_size = act->sample.trunc_size;
+ mlxsw_sp_port->sample->rate = act->sample.rate;
- err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
+ err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, act->sample.rate);
if (err)
goto err_port_sample_set;
return 0;
@@ -1360,10 +1348,10 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
__be16 protocol = f->common.protocol;
- const struct tc_action *a;
+ struct flow_action_entry *act;
int err;
- if (!tcf_exts_has_one_action(f->exts)) {
+ if (!flow_offload_has_one_action(&f->rule->action)) {
netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
return -EOPNOTSUPP;
}
@@ -1373,19 +1361,21 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
return -ENOMEM;
mall_tc_entry->cookie = f->cookie;
- a = tcf_exts_first_action(f->exts);
+ act = &f->rule->action.entries[0];
- if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
+ if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
mirror = &mall_tc_entry->mirror;
err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
- mirror, a, ingress);
- } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
+ mirror, act,
+ ingress);
+ } else if (act->id == FLOW_ACTION_SAMPLE &&
+ protocol == htons(ETH_P_ALL)) {
mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f,
- a, ingress);
+ act, ingress);
} else {
err = -EOPNOTSUPP;
}
@@ -1679,6 +1669,25 @@ static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
return 0;
}
+static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ char pplr_pl[MLXSW_REG_PPLR_LEN];
+ int err;
+
+ if (netif_running(dev))
+ mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+
+ mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
+ err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
+ pplr_pl);
+
+ if (netif_running(dev))
+ mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+
+ return err;
+}
+
typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
static int mlxsw_sp_handle_feature(struct net_device *dev,
@@ -1710,20 +1719,30 @@ static int mlxsw_sp_handle_feature(struct net_device *dev,
static int mlxsw_sp_set_features(struct net_device *dev,
netdev_features_t features)
{
- return mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
+ netdev_features_t oper_features = dev->features;
+ int err = 0;
+
+ err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
mlxsw_sp_feature_hw_tc);
+ err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
+ mlxsw_sp_feature_loopback);
+
+ if (err) {
+ dev->features = oper_features;
+ return -EINVAL;
+ }
+
+ return 0;
}
-static int mlxsw_sp_port_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
+static struct devlink_port *
+mlxsw_sp_port_get_devlink_port(struct net_device *dev)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- ppid->id_len = sizeof(mlxsw_sp->base_mac);
- memcpy(&ppid->id, &mlxsw_sp->base_mac, ppid->id_len);
-
- return 0;
+ return mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
+ mlxsw_sp_port->local_port);
}
static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
@@ -1739,9 +1758,8 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
- .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
.ndo_set_features = mlxsw_sp_set_features,
- .ndo_get_port_parent_id = mlxsw_sp_port_get_port_parent_id,
+ .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port,
};
static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
@@ -3391,7 +3409,10 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
struct net_device *dev;
int err;
- err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
+ err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
+ module + 1, split, lane / width,
+ mlxsw_sp->base_mac,
+ sizeof(mlxsw_sp->base_mac));
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
local_port);
@@ -3462,7 +3483,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
- dev->hw_features |= NETIF_F_HW_TC;
+ dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
dev->min_mtu = 0;
dev->max_mtu = ETH_MAX_MTU;
@@ -3573,8 +3594,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
}
mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
- mlxsw_sp_port, dev, module + 1,
- mlxsw_sp_port->split, lane / width);
+ mlxsw_sp_port, dev);
mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
return 0;
@@ -3710,14 +3730,14 @@ static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
}
static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
- u8 module, unsigned int count)
+ u8 module, unsigned int count, u8 offset)
{
u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
int err, i;
for (i = 0; i < count; i++) {
- err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
- module, width, i * width);
+ err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset,
+ true, module, width, i * width);
if (err)
goto err_port_create;
}
@@ -3726,8 +3746,8 @@ static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
err_port_create:
for (i--; i >= 0; i--)
- if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+ if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
+ mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
return err;
}
@@ -3758,11 +3778,19 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u8 local_ports_in_1x, local_ports_in_2x, offset;
struct mlxsw_sp_port *mlxsw_sp_port;
u8 module, cur_width, base_port;
int i;
int err;
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X))
+ return -EIO;
+
+ local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X);
+ local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X);
+
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port) {
dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
@@ -3788,13 +3816,15 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
/* Make sure we have enough slave (even) ports for the split. */
if (count == 2) {
+ offset = local_ports_in_2x;
base_port = local_port;
- if (mlxsw_sp->ports[base_port + 1]) {
+ if (mlxsw_sp->ports[base_port + local_ports_in_2x]) {
netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
return -EINVAL;
}
} else {
+ offset = local_ports_in_1x;
base_port = mlxsw_sp_cluster_base_port_get(local_port);
if (mlxsw_sp->ports[base_port + 1] ||
mlxsw_sp->ports[base_port + 3]) {
@@ -3805,10 +3835,11 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
}
for (i = 0; i < count; i++)
- if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+ if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
+ mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
- err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
+ err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count,
+ offset);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
goto err_port_split_create;
@@ -3825,11 +3856,19 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u8 local_ports_in_1x, local_ports_in_2x, offset;
struct mlxsw_sp_port *mlxsw_sp_port;
u8 cur_width, base_port;
unsigned int count;
int i;
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_1X) ||
+ !MLXSW_CORE_RES_VALID(mlxsw_core, LOCAL_PORTS_IN_2X))
+ return -EIO;
+
+ local_ports_in_1x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_1X);
+ local_ports_in_2x = MLXSW_CORE_RES_GET(mlxsw_core, LOCAL_PORTS_IN_2X);
+
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port) {
dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
@@ -3847,6 +3886,11 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
cur_width = mlxsw_sp_port->mapping.width;
count = cur_width == 1 ? 4 : 2;
+ if (count == 2)
+ offset = local_ports_in_2x;
+ else
+ offset = local_ports_in_1x;
+
base_port = mlxsw_sp_cluster_base_port_get(local_port);
/* Determine which ports to remove. */
@@ -3854,8 +3898,8 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
base_port = base_port + 2;
for (i = 0; i < count; i++)
- if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+ if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
+ mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
@@ -4238,7 +4282,7 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
u32 seed;
int err;
- get_random_bytes(&seed, sizeof(seed));
+ seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
MLXSW_REG_SLCR_LAG_HASH_DMAC |
MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index da6278b0caa4..8601b3041acd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -371,13 +371,14 @@ int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
struct devlink_sb_pool_info *pool_info);
int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index, u32 size,
- enum devlink_sb_threshold_type threshold_type);
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold);
int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 pool_index,
- u32 threshold);
+ u32 threshold, struct netlink_ext_ack *extack);
int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
@@ -385,7 +386,8 @@ int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
- u16 pool_index, u32 threshold);
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack);
int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
unsigned int sb_index);
int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 8811f6513e36..e993159e8e4c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -216,7 +216,6 @@ struct mlxsw_sp_acl_tcam_vregion {
struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
} rehash;
struct mlxsw_sp *mlxsw_sp;
- bool failed_rollback; /* Indicates failed rollback during migration */
unsigned int ref_count;
};
@@ -1256,11 +1255,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_chunk *new_chunk;
new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
- if (IS_ERR(new_chunk)) {
- if (ctx->this_is_rollback)
- vchunk->vregion->failed_rollback = true;
+ if (IS_ERR(new_chunk))
return PTR_ERR(new_chunk);
- }
vchunk->chunk2 = vchunk->chunk;
vchunk->chunk = new_chunk;
ctx->current_vchunk = vchunk;
@@ -1318,8 +1314,13 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
vchunk->chunk, credits);
if (err) {
- if (ctx->this_is_rollback)
+ if (ctx->this_is_rollback) {
+ /* Save the ventry which we ended with and try
+ * to continue later on.
+ */
+ ctx->start_ventry = ventry;
return err;
+ }
/* Swap the chunk and chunk2 pointers so the follow-up
* rollback call will see the original chunk pointer
* in vchunk->chunk.
@@ -1397,8 +1398,12 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
ctx->this_is_rollback = true;
err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
- if (err2)
- vregion->failed_rollback = true;
+ if (err2) {
+ trace_mlxsw_sp_acl_tcam_vregion_rehash_rollback_failed(mlxsw_sp,
+ vregion);
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
+ /* Let the rollback to be continued later on. */
+ }
}
mutex_unlock(&vregion->lock);
trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
@@ -1423,8 +1428,6 @@ mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
int err;
trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion);
- if (vregion->failed_rollback)
- return -EBUSY;
hints_priv = ops->region_rehash_hints_get(vregion->region->priv);
if (IS_ERR(hints_priv))
@@ -1471,11 +1474,9 @@ mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *unused_region = vregion->region2;
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
- if (!vregion->failed_rollback) {
- vregion->region2 = NULL;
- mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
- mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
- }
+ vregion->region2 = NULL;
+ mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
+ mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
ops->region_rehash_hints_put(ctx->hints_priv);
ctx->hints_priv = NULL;
}
@@ -1506,11 +1507,6 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
ctx, credits);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
- if (vregion->failed_rollback) {
- trace_mlxsw_sp_acl_tcam_vregion_rehash_dis(mlxsw_sp,
- vregion);
- dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
- }
}
if (*credits >= 0)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index d633bef5f105..8512dd49e420 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -6,6 +6,7 @@
#include <linux/dcbnl.h>
#include <linux/if_ether.h>
#include <linux/list.h>
+#include <linux/netlink.h>
#include "spectrum.h"
#include "core.h"
@@ -15,6 +16,8 @@
struct mlxsw_sp_sb_pr {
enum mlxsw_reg_sbpr_mode mode;
u32 size;
+ u8 freeze_mode:1,
+ freeze_size:1;
};
struct mlxsw_cp_sb_occ {
@@ -27,6 +30,8 @@ struct mlxsw_sp_sb_cm {
u32 max_buff;
u16 pool_index;
struct mlxsw_cp_sb_occ occ;
+ u8 freeze_pool:1,
+ freeze_thresh:1;
};
#define MLXSW_SP_SB_INFI -1U
@@ -48,7 +53,12 @@ struct mlxsw_sp_sb_pool_des {
u8 pool;
};
-/* Order ingress pools before egress pools. */
+#define MLXSW_SP_SB_POOL_ING 0
+#define MLXSW_SP_SB_POOL_EGR 4
+#define MLXSW_SP_SB_POOL_EGR_MC 8
+#define MLXSW_SP_SB_POOL_ING_CPU 9
+#define MLXSW_SP_SB_POOL_EGR_CPU 10
+
static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = {
{MLXSW_REG_SBXX_DIR_INGRESS, 0},
{MLXSW_REG_SBXX_DIR_INGRESS, 1},
@@ -59,6 +69,8 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = {
{MLXSW_REG_SBXX_DIR_EGRESS, 2},
{MLXSW_REG_SBXX_DIR_EGRESS, 3},
{MLXSW_REG_SBXX_DIR_EGRESS, 15},
+ {MLXSW_REG_SBXX_DIR_INGRESS, 4},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 4},
};
static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
@@ -71,6 +83,8 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
{MLXSW_REG_SBXX_DIR_EGRESS, 2},
{MLXSW_REG_SBXX_DIR_EGRESS, 3},
{MLXSW_REG_SBXX_DIR_EGRESS, 15},
+ {MLXSW_REG_SBXX_DIR_INGRESS, 4},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 4},
};
#define MLXSW_SP_SB_ING_TC_COUNT 8
@@ -94,6 +108,7 @@ struct mlxsw_sp_sb_vals {
unsigned int pool_count;
const struct mlxsw_sp_sb_pool_des *pool_dess;
const struct mlxsw_sp_sb_pm *pms;
+ const struct mlxsw_sp_sb_pm *pms_cpu;
const struct mlxsw_sp_sb_pr *prs;
const struct mlxsw_sp_sb_mm *mms;
const struct mlxsw_sp_sb_cm *cms_ingress;
@@ -275,7 +290,7 @@ static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
const u32 pbs[] = {
[0] = MLXSW_SP_PB_HEADROOM * mlxsw_sp_port->mapping.width,
- [9] = 2 * MLXSW_PORT_MAX_MTU,
+ [9] = MLXSW_PORT_MAX_MTU,
};
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pbmc_pl[MLXSW_REG_PBMC_LEN];
@@ -390,46 +405,60 @@ static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
.size = _size, \
}
+#define MLXSW_SP_SB_PR_EXT(_mode, _size, _freeze_mode, _freeze_size) \
+ { \
+ .mode = _mode, \
+ .size = _size, \
+ .freeze_mode = _freeze_mode, \
+ .freeze_size = _freeze_size, \
+ }
+
#define MLXSW_SP1_SB_PR_INGRESS_SIZE 12440000
-#define MLXSW_SP1_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
#define MLXSW_SP1_SB_PR_EGRESS_SIZE 13232000
+#define MLXSW_SP1_SB_PR_CPU_SIZE (256 * 1000)
+/* Order according to mlxsw_sp1_sb_pool_dess */
static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
- /* Ingress pools. */
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
MLXSW_SP1_SB_PR_INGRESS_SIZE),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
- MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP1_SB_PR_INGRESS_MNG_SIZE),
- /* Egress pools. */
- MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP1_SB_PR_EGRESS_SIZE),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP1_SB_PR_EGRESS_SIZE, true, false),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
- MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
+ true, true),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
};
#define MLXSW_SP2_SB_PR_INGRESS_SIZE 40960000
-#define MLXSW_SP2_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
#define MLXSW_SP2_SB_PR_EGRESS_SIZE 40960000
+#define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000)
+/* Order according to mlxsw_sp2_sb_pool_dess */
static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
- /* Ingress pools. */
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
MLXSW_SP2_SB_PR_INGRESS_SIZE),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
- MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP2_SB_PR_INGRESS_MNG_SIZE),
- /* Egress pools. */
- MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
- MLXSW_SP2_SB_PR_EGRESS_SIZE),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP2_SB_PR_EGRESS_SIZE, true, false),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
- MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
+ true, true),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
+ MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
};
static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
@@ -464,83 +493,106 @@ static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
.pool_index = _pool, \
}
+#define MLXSW_SP_SB_CM_ING(_min_buff, _max_buff) \
+ { \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ .pool_index = MLXSW_SP_SB_POOL_ING, \
+ }
+
+#define MLXSW_SP_SB_CM_EGR(_min_buff, _max_buff) \
+ { \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ .pool_index = MLXSW_SP_SB_POOL_EGR, \
+ }
+
+#define MLXSW_SP_SB_CM_EGR_MC(_min_buff, _max_buff) \
+ { \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ .pool_index = MLXSW_SP_SB_POOL_EGR_MC, \
+ .freeze_pool = true, \
+ .freeze_thresh = true, \
+ }
+
static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = {
- MLXSW_SP_SB_CM(10000, 8, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
- MLXSW_SP_SB_CM(20000, 1, 3),
+ MLXSW_SP_SB_CM_ING(10000, 8),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
+ MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
};
static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = {
- MLXSW_SP_SB_CM(0, 7, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
- MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
- MLXSW_SP_SB_CM(20000, 1, 3),
+ MLXSW_SP_SB_CM_ING(0, 7),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
+ MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
};
static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = {
- MLXSW_SP_SB_CM(1500, 9, 4),
- MLXSW_SP_SB_CM(1500, 9, 4),
- MLXSW_SP_SB_CM(1500, 9, 4),
- MLXSW_SP_SB_CM(1500, 9, 4),
- MLXSW_SP_SB_CM(1500, 9, 4),
- MLXSW_SP_SB_CM(1500, 9, 4),
- MLXSW_SP_SB_CM(1500, 9, 4),
- MLXSW_SP_SB_CM(1500, 9, 4),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(1, 0xff, 4),
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR(1500, 9),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR(1, 0xff),
};
static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
- MLXSW_SP_SB_CM(1, 0xff, 4),
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR(0, 7),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
+ MLXSW_SP_SB_CM_EGR(1, 0xff),
};
-#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 4)
+#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, MLXSW_SP_SB_POOL_EGR_CPU)
static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
- MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
- MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
- MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
- MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
+ MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
+ MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
+ MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
+ MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
+ MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 4),
+ MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
@@ -648,80 +700,116 @@ static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
.max_buff = _max_buff, \
}
+/* Order according to mlxsw_sp1_sb_pool_dess */
static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = {
- /* Ingress pools. */
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
- MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
- /* Egress pools. */
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
MLXSW_SP_SB_PM(0, 7),
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
MLXSW_SP_SB_PM(10000, 90000),
+ MLXSW_SP_SB_PM(0, 8), /* 50% occupancy */
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
};
+/* Order according to mlxsw_sp2_sb_pool_dess */
static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
- /* Ingress pools. */
MLXSW_SP_SB_PM(0, 7),
MLXSW_SP_SB_PM(0, 0),
MLXSW_SP_SB_PM(0, 0),
- MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
- /* Egress pools. */
+ MLXSW_SP_SB_PM(0, 0),
MLXSW_SP_SB_PM(0, 7),
MLXSW_SP_SB_PM(0, 0),
MLXSW_SP_SB_PM(0, 0),
MLXSW_SP_SB_PM(0, 0),
MLXSW_SP_SB_PM(10000, 90000),
+ MLXSW_SP_SB_PM(0, 8), /* 50% occupancy */
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
};
-static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+/* Order according to mlxsw_sp*_sb_pool_dess */
+static const struct mlxsw_sp_sb_pm mlxsw_sp_cpu_port_sb_pms[] = {
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, 90000),
+ MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
+};
+
+static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ const struct mlxsw_sp_sb_pm *pms,
+ bool skip_ingress)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- int i;
- int err;
+ int i, err;
for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
- const struct mlxsw_sp_sb_pm *pm = &mlxsw_sp->sb_vals->pms[i];
+ const struct mlxsw_sp_sb_pm *pm = &pms[i];
+ const struct mlxsw_sp_sb_pool_des *des;
u32 max_buff;
u32 min_buff;
+ des = &mlxsw_sp->sb_vals->pool_dess[i];
+ if (skip_ingress && des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
+ continue;
+
min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff);
max_buff = pm->max_buff;
if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i))
max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff);
- err = mlxsw_sp_sb_pm_write(mlxsw_sp, mlxsw_sp_port->local_port,
- i, min_buff, max_buff);
+ err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, min_buff,
+ max_buff);
if (err)
return err;
}
return 0;
}
-#define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \
+static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ return mlxsw_sp_sb_pms_init(mlxsw_sp, mlxsw_sp_port->local_port,
+ mlxsw_sp->sb_vals->pms, false);
+}
+
+static int mlxsw_sp_cpu_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp)
+{
+ return mlxsw_sp_sb_pms_init(mlxsw_sp, 0, mlxsw_sp->sb_vals->pms_cpu,
+ true);
+}
+
+#define MLXSW_SP_SB_MM(_min_buff, _max_buff) \
{ \
.min_buff = _min_buff, \
.max_buff = _max_buff, \
- .pool_index = _pool, \
+ .pool_index = MLXSW_SP_SB_POOL_EGR, \
}
static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
- MLXSW_SP_SB_MM(0, 6, 4),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
+ MLXSW_SP_SB_MM(0, 6),
};
static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
@@ -755,21 +843,22 @@ static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp,
{
int i;
- for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i)
+ for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i) {
if (mlxsw_sp->sb_vals->pool_dess[i].dir ==
- MLXSW_REG_SBXX_DIR_EGRESS)
- goto out;
- WARN(1, "No egress pools\n");
+ MLXSW_REG_SBXX_DIR_INGRESS)
+ (*p_ingress_len)++;
+ else
+ (*p_egress_len)++;
+ }
-out:
- *p_ingress_len = i;
- *p_egress_len = mlxsw_sp->sb_vals->pool_count - i;
+ WARN(*p_egress_len == 0, "No egress pools\n");
}
const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = {
.pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess),
.pool_dess = mlxsw_sp1_sb_pool_dess,
.pms = mlxsw_sp1_sb_pms,
+ .pms_cpu = mlxsw_sp_cpu_port_sb_pms,
.prs = mlxsw_sp1_sb_prs,
.mms = mlxsw_sp_sb_mms,
.cms_ingress = mlxsw_sp1_sb_cms_ingress,
@@ -785,6 +874,7 @@ const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
.pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess),
.pool_dess = mlxsw_sp2_sb_pool_dess,
.pms = mlxsw_sp2_sb_pms,
+ .pms_cpu = mlxsw_sp_cpu_port_sb_pms,
.prs = mlxsw_sp2_sb_prs,
.mms = mlxsw_sp_sb_mms,
.cms_ingress = mlxsw_sp2_sb_cms_ingress,
@@ -799,8 +889,8 @@ const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
{
u32 max_headroom_size;
- u16 ing_pool_count;
- u16 eg_pool_count;
+ u16 ing_pool_count = 0;
+ u16 eg_pool_count = 0;
int err;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
@@ -834,6 +924,9 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
if (err)
goto err_sb_cpu_port_sb_cms_init;
+ err = mlxsw_sp_cpu_port_sb_pms_init(mlxsw_sp);
+ if (err)
+ goto err_sb_cpu_port_pms_init;
err = mlxsw_sp_sb_mms_init(mlxsw_sp);
if (err)
goto err_sb_mms_init;
@@ -851,6 +944,7 @@ int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
err_devlink_sb_register:
err_sb_mms_init:
+err_sb_cpu_port_pms_init:
err_sb_cpu_port_sb_cms_init:
err_sb_prs_init:
mlxsw_sp_sb_ports_fini(mlxsw_sp);
@@ -900,16 +994,32 @@ int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index, u32 size,
- enum devlink_sb_threshold_type threshold_type)
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
+ const struct mlxsw_sp_sb_pr *pr;
enum mlxsw_reg_sbpr_mode mode;
- if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE))
+ mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
+ pr = &mlxsw_sp->sb_vals->prs[pool_index];
+
+ if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded shared buffer size");
return -EINVAL;
+ }
+
+ if (pr->freeze_mode && pr->mode != mode) {
+ NL_SET_ERR_MSG_MOD(extack, "Changing this pool's threshold type is forbidden");
+ return -EINVAL;
+ };
+
+ if (pr->freeze_size && pr->size != size) {
+ NL_SET_ERR_MSG_MOD(extack, "Changing this pool's size is forbidden");
+ return -EINVAL;
+ };
- mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
pool_size, false);
}
@@ -927,7 +1037,8 @@ static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
}
static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
- u32 threshold, u32 *p_max_buff)
+ u32 threshold, u32 *p_max_buff,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
@@ -936,8 +1047,10 @@ static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
- val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX)
+ val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid dynamic threshold value");
return -EINVAL;
+ }
*p_max_buff = val;
} else {
*p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
@@ -963,7 +1076,7 @@ int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 pool_index,
- u32 threshold)
+ u32 threshold, struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
@@ -973,7 +1086,7 @@ int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
int err;
err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
- threshold, &max_buff);
+ threshold, &max_buff, extack);
if (err)
return err;
@@ -1004,22 +1117,41 @@ int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
- u16 pool_index, u32 threshold)
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port =
mlxsw_core_port_driver_priv(mlxsw_core_port);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u8 local_port = mlxsw_sp_port->local_port;
+ const struct mlxsw_sp_sb_cm *cm;
u8 pg_buff = tc_index;
enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
u32 max_buff;
int err;
- if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir)
+ if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir) {
+ NL_SET_ERR_MSG_MOD(extack, "Binding egress TC to ingress pool and vice versa is forbidden");
return -EINVAL;
+ }
+
+ if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
+ cm = &mlxsw_sp->sb_vals->cms_ingress[tc_index];
+ else
+ cm = &mlxsw_sp->sb_vals->cms_egress[tc_index];
+
+ if (cm->freeze_pool && cm->pool_index != pool_index) {
+ NL_SET_ERR_MSG_MOD(extack, "Binding this TC to a different pool is forbidden");
+ return -EINVAL;
+ }
+
+ if (cm->freeze_thresh && cm->max_buff != threshold) {
+ NL_SET_ERR_MSG_MOD(extack, "Changing this TC's threshold is forbidden");
+ return -EINVAL;
+ }
err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
- threshold, &max_buff);
+ threshold, &max_buff, extack);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
index e689576231ab..246dbb3c0e1b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h
@@ -4,24 +4,9 @@
#ifndef _MLXSW_PIPELINE_H_
#define _MLXSW_PIPELINE_H_
-#if IS_ENABLED(CONFIG_NET_DEVLINK)
-
int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp);
-#else
-
-static inline int mlxsw_sp_dpipe_init(struct mlxsw_sp *mlxsw_sp)
-{
- return 0;
-}
-
-static inline void mlxsw_sp_dpipe_fini(struct mlxsw_sp *mlxsw_sp)
-{
-}
-
-#endif
-
#define MLXSW_SP_DPIPE_TABLE_NAME_ERIF "mlxsw_erif"
#define MLXSW_SP_DPIPE_TABLE_NAME_HOST4 "mlxsw_host4"
#define MLXSW_SP_DPIPE_TABLE_NAME_HOST6 "mlxsw_host6"
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 902e766a8ed3..1cda8a248b12 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -13,9 +13,9 @@
#include <linux/socket.h>
#include <linux/route.h>
#include <linux/gcd.h>
-#include <linux/random.h>
#include <linux/if_macvlan.h>
#include <linux/refcount.h>
+#include <linux/jhash.h>
#include <net/netevent.h>
#include <net/neighbour.h>
#include <net/arp.h>
@@ -2371,7 +2371,7 @@ static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
MLXSW_REG_RAUHT_OP_WRITE_DELETE;
}
-static void
+static int
mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_neigh_entry *neigh_entry,
enum mlxsw_reg_rauht_op op)
@@ -2385,10 +2385,10 @@ mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
if (neigh_entry->counter_valid)
mlxsw_reg_rauht_pack_counter(rauht_pl,
neigh_entry->counter_index);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
}
-static void
+static int
mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_neigh_entry *neigh_entry,
enum mlxsw_reg_rauht_op op)
@@ -2402,7 +2402,7 @@ mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
if (neigh_entry->counter_valid)
mlxsw_reg_rauht_pack_counter(rauht_pl,
neigh_entry->counter_index);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
}
bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
@@ -2424,20 +2424,33 @@ mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_neigh_entry *neigh_entry,
bool adding)
{
+ enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
+ int err;
+
if (!adding && !neigh_entry->connected)
return;
neigh_entry->connected = adding;
if (neigh_entry->key.n->tbl->family == AF_INET) {
- mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
- mlxsw_sp_rauht_op(adding));
+ err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
+ op);
+ if (err)
+ return;
} else if (neigh_entry->key.n->tbl->family == AF_INET6) {
if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
return;
- mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
- mlxsw_sp_rauht_op(adding));
+ err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
+ op);
+ if (err)
+ return;
} else {
WARN_ON_ONCE(1);
+ return;
}
+
+ if (adding)
+ neigh_entry->key.n->flags |= NTF_OFFLOADED;
+ else
+ neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
}
void
@@ -2873,12 +2886,13 @@ mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
return false;
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
+ struct fib6_nh *fib6_nh = &mlxsw_sp_rt6->rt->fib6_nh;
struct in6_addr *gw;
int ifindex, weight;
- ifindex = mlxsw_sp_rt6->rt->fib6_nh.nh_dev->ifindex;
- weight = mlxsw_sp_rt6->rt->fib6_nh.nh_weight;
- gw = &mlxsw_sp_rt6->rt->fib6_nh.nh_gw;
+ ifindex = fib6_nh->fib_nh_dev->ifindex;
+ weight = fib6_nh->fib_nh_weight;
+ gw = &fib6_nh->fib_nh_gw6;
if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
weight))
return false;
@@ -2944,7 +2958,7 @@ mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
struct net_device *dev;
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
- dev = mlxsw_sp_rt6->rt->fib6_nh.nh_dev;
+ dev = mlxsw_sp_rt6->rt->fib6_nh.fib_nh_dev;
val ^= dev->ifindex;
}
@@ -3610,7 +3624,7 @@ static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
const struct fib_nh *fib_nh,
enum mlxsw_sp_ipip_type *p_ipipt)
{
- struct net_device *dev = fib_nh->nh_dev;
+ struct net_device *dev = fib_nh->fib_nh_dev;
return dev &&
fib_nh->nh_parent->fib_type == RTN_UNICAST &&
@@ -3637,7 +3651,7 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
struct fib_nh *fib_nh)
{
const struct mlxsw_sp_ipip_ops *ipip_ops;
- struct net_device *dev = fib_nh->nh_dev;
+ struct net_device *dev = fib_nh->fib_nh_dev;
struct mlxsw_sp_ipip_entry *ipip_entry;
struct mlxsw_sp_rif *rif;
int err;
@@ -3681,18 +3695,18 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh,
struct fib_nh *fib_nh)
{
- struct net_device *dev = fib_nh->nh_dev;
+ struct net_device *dev = fib_nh->fib_nh_dev;
struct in_device *in_dev;
int err;
nh->nh_grp = nh_grp;
nh->key.fib_nh = fib_nh;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- nh->nh_weight = fib_nh->nh_weight;
+ nh->nh_weight = fib_nh->fib_nh_weight;
#else
nh->nh_weight = 1;
#endif
- memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
+ memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
if (err)
return err;
@@ -3705,7 +3719,7 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
in_dev = __in_dev_get_rtnl(dev);
if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
- fib_nh->nh_flags & RTNH_F_LINKDOWN)
+ fib_nh->fib_nh_flags & RTNH_F_LINKDOWN)
return 0;
err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
@@ -3804,7 +3818,7 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
const struct fib_info *fi)
{
- return fi->fib_nh->nh_scope == RT_SCOPE_LINK ||
+ return fi->fib_nh->fib_nh_scope == RT_SCOPE_LINK ||
mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
}
@@ -3946,9 +3960,9 @@ mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
struct fib6_info *rt = mlxsw_sp_rt6->rt;
- if (nh->rif && nh->rif->dev == rt->fib6_nh.nh_dev &&
+ if (nh->rif && nh->rif->dev == rt->fib6_nh.fib_nh_dev &&
ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
- &rt->fib6_nh.nh_gw))
+ &rt->fib6_nh.fib_nh_gw6))
return nh;
continue;
}
@@ -3966,7 +3980,7 @@ mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE ||
fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP ||
fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP) {
- nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
+ nh_grp->nexthops->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
return;
}
@@ -3974,9 +3988,9 @@ mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
if (nh->offloaded)
- nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
+ nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
else
- nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
+ nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
}
}
@@ -3992,7 +4006,7 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
for (i = 0; i < nh_grp->count; i++) {
struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
- nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
+ nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
}
}
@@ -4008,19 +4022,20 @@ mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE) {
list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
- list)->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD;
+ list)->rt->fib6_nh.fib_nh_flags |= RTNH_F_OFFLOAD;
return;
}
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
+ struct fib6_nh *fib6_nh = &mlxsw_sp_rt6->rt->fib6_nh;
struct mlxsw_sp_nexthop *nh;
nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
if (nh && nh->offloaded)
- mlxsw_sp_rt6->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD;
+ fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
else
- mlxsw_sp_rt6->rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD;
+ fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
}
}
@@ -4035,7 +4050,7 @@ mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
struct fib6_info *rt = mlxsw_sp_rt6->rt;
- rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD;
+ rt->fib6_nh.fib_nh_flags &= ~RTNH_F_OFFLOAD;
}
}
@@ -4913,7 +4928,7 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
{
/* RTF_CACHE routes are ignored */
- return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
+ return !(rt->fib6_flags & RTF_ADDRCONF) && rt->fib6_nh.fib_nh_gw_family;
}
static struct fib6_info *
@@ -4972,8 +4987,8 @@ static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
const struct fib6_info *rt,
enum mlxsw_sp_ipip_type *ret)
{
- return rt->fib6_nh.nh_dev &&
- mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh.nh_dev, ret);
+ return rt->fib6_nh.fib_nh_dev &&
+ mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh.fib_nh_dev, ret);
}
static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
@@ -4983,7 +4998,7 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
{
const struct mlxsw_sp_ipip_ops *ipip_ops;
struct mlxsw_sp_ipip_entry *ipip_entry;
- struct net_device *dev = rt->fib6_nh.nh_dev;
+ struct net_device *dev = rt->fib6_nh.fib_nh_dev;
struct mlxsw_sp_rif *rif;
int err;
@@ -5026,11 +5041,11 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh,
const struct fib6_info *rt)
{
- struct net_device *dev = rt->fib6_nh.nh_dev;
+ struct net_device *dev = rt->fib6_nh.fib_nh_dev;
nh->nh_grp = nh_grp;
- nh->nh_weight = rt->fib6_nh.nh_weight;
- memcpy(&nh->gw_addr, &rt->fib6_nh.nh_gw, sizeof(nh->gw_addr));
+ nh->nh_weight = rt->fib6_nh.fib_nh_weight;
+ memcpy(&nh->gw_addr, &rt->fib6_nh.fib_nh_gw6, sizeof(nh->gw_addr));
mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
@@ -5053,7 +5068,7 @@ static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
const struct fib6_info *rt)
{
- return rt->fib6_flags & RTF_GATEWAY ||
+ return rt->fib6_nh.fib_nh_gw_family ||
mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
}
@@ -6035,6 +6050,10 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event,
fr_info = container_of(info, struct fib_rule_notifier_info, info);
rule = fr_info->rule;
+ /* Rule only affects locally generated traffic */
+ if (rule->iifindex == info->net->loopback_dev->ifindex)
+ return 0;
+
switch (info->family) {
case AF_INET:
if (!fib4_rule_default(rule) && !rule->l3mdev)
@@ -6086,10 +6105,20 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
return notifier_from_errno(err);
break;
case FIB_EVENT_ENTRY_ADD:
+ case FIB_EVENT_ENTRY_REPLACE: /* fall through */
+ case FIB_EVENT_ENTRY_APPEND: /* fall through */
if (router->aborted) {
NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
return notifier_from_errno(-EINVAL);
}
+ if (info->family == AF_INET) {
+ struct fib_entry_notifier_info *fen_info = ptr;
+
+ if (fen_info->fi->fib_nh_is_v6) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
+ return notifier_from_errno(-EINVAL);
+ }
+ }
break;
}
@@ -7808,7 +7837,7 @@ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
char recr2_pl[MLXSW_REG_RECR2_LEN];
u32 seed;
- get_random_bytes(&seed, sizeof(seed));
+ seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
mlxsw_reg_recr2_pack(recr2_pl, seed);
mlxsw_sp_mp4_hash_init(recr2_pl);
mlxsw_sp_mp6_hash_init(recr2_pl);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 536c23c578c3..560a60e522f9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -316,7 +316,11 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
dev = rt->dst.dev;
*saddrp = fl4.saddr;
- *daddrp = rt->rt_gateway;
+ if (rt->rt_gw_family == AF_INET)
+ *daddrp = rt->rt_gw4;
+ /* can not offload if route has an IPv6 gateway */
+ else if (rt->rt_gw_family == AF_INET6)
+ dev = NULL;
out:
ip_rt_put(rt);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchib.c b/drivers/net/ethernet/mellanox/mlxsw/switchib.c
index bcf2e79a21c8..0d9356b3f65d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchib.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchib.c
@@ -30,6 +30,7 @@ struct mlxsw_sib {
struct mlxsw_sib_port **ports;
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
+ u8 hw_id[ETH_ALEN];
};
struct mlxsw_sib_port {
@@ -102,6 +103,18 @@ mlxsw_sib_tx_v1_hdr_construct(struct sk_buff *skb,
mlxsw_tx_v1_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
}
+static int mlxsw_sib_hw_id_get(struct mlxsw_sib *mlxsw_sib)
+{
+ char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
+ int err;
+
+ err = mlxsw_reg_query(mlxsw_sib->core, MLXSW_REG(spad), spad_pl);
+ if (err)
+ return err;
+ mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sib->hw_id);
+ return 0;
+}
+
static int
mlxsw_sib_port_admin_status_set(struct mlxsw_sib_port *mlxsw_sib_port,
bool is_up)
@@ -267,7 +280,9 @@ static int mlxsw_sib_port_create(struct mlxsw_sib *mlxsw_sib, u8 local_port,
{
int err;
- err = mlxsw_core_port_init(mlxsw_sib->core, local_port);
+ err = mlxsw_core_port_init(mlxsw_sib->core, local_port,
+ module + 1, false, 0,
+ mlxsw_sib->hw_id, sizeof(mlxsw_sib->hw_id));
if (err) {
dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to init core port\n",
local_port);
@@ -439,6 +454,12 @@ static int mlxsw_sib_init(struct mlxsw_core *mlxsw_core,
mlxsw_sib->core = mlxsw_core;
mlxsw_sib->bus_info = mlxsw_bus_info;
+ err = mlxsw_sib_hw_id_get(mlxsw_sib);
+ if (err) {
+ dev_err(mlxsw_sib->bus_info->dev, "Failed to get switch HW ID\n");
+ return err;
+ }
+
err = mlxsw_sib_ports_create(mlxsw_sib);
if (err) {
dev_err(mlxsw_sib->bus_info->dev, "Failed to create ports\n");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 533fe6235b7c..fc4f19167262 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -379,26 +379,14 @@ mlxsw_sx_port_get_stats64(struct net_device *dev,
stats->tx_dropped = tx_dropped;
}
-static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name,
- size_t len)
-{
- struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
-
- return mlxsw_core_port_get_phys_port_name(mlxsw_sx_port->mlxsw_sx->core,
- mlxsw_sx_port->local_port,
- name, len);
-}
-
-static int mlxsw_sx_port_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
+static struct devlink_port *
+mlxsw_sx_port_get_devlink_port(struct net_device *dev)
{
struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
- ppid->id_len = sizeof(mlxsw_sx->hw_id);
- memcpy(&ppid->id, &mlxsw_sx->hw_id, ppid->id_len);
-
- return 0;
+ return mlxsw_core_port_devlink_port_get(mlxsw_sx->core,
+ mlxsw_sx_port->local_port);
}
static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
@@ -407,8 +395,7 @@ static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
.ndo_start_xmit = mlxsw_sx_port_xmit,
.ndo_change_mtu = mlxsw_sx_port_change_mtu,
.ndo_get_stats64 = mlxsw_sx_port_get_stats64,
- .ndo_get_phys_port_name = mlxsw_sx_port_get_phys_port_name,
- .ndo_get_port_parent_id = mlxsw_sx_port_get_port_parent_id,
+ .ndo_get_devlink_port = mlxsw_sx_port_get_devlink_port,
};
static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
@@ -1102,7 +1089,7 @@ static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
}
mlxsw_core_port_eth_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
- mlxsw_sx_port, dev, module + 1, false, 0);
+ mlxsw_sx_port, dev);
mlxsw_sx->ports[local_port] = mlxsw_sx_port;
return 0;
@@ -1127,7 +1114,9 @@ static int mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
{
int err;
- err = mlxsw_core_port_init(mlxsw_sx->core, local_port);
+ err = mlxsw_core_port_init(mlxsw_sx->core, local_port,
+ module + 1, false, 0,
+ mlxsw_sx->hw_id, sizeof(mlxsw_sx->hw_id));
if (err) {
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n",
local_port);
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 7849119d407a..b44172a901ed 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -425,7 +425,7 @@ static void ks8851_init_mac(struct ks8851_net *ks)
const u8 *mac_addr;
mac_addr = of_get_mac_address(ks->spidev->dev.of_node);
- if (mac_addr) {
+ if (!IS_ERR(mac_addr)) {
memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
ks8851_write_mac_addr(dev);
return;
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index c946841c0a06..dc76b0d15234 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -1327,7 +1327,7 @@ static int ks8851_probe(struct platform_device *pdev)
/* overwriting the default MAC address */
if (pdev->dev.of_node) {
mac = of_get_mac_address(pdev->dev.of_node);
- if (mac)
+ if (!IS_ERR(mac))
memcpy(ks->mac_addr, mac, ETH_ALEN);
} else {
struct ks8851_mll_platform_data *pdata;
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 8f72587b5a2c..0567e4f387a5 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Microchip ENC28J60 ethernet driver (MAC + PHY)
*
@@ -5,11 +6,6 @@
* Author: Claudio Lanconelli <lanconelli.claudio@eptar.com>
* based on enc28j60.c written by David Anders for 2.4 kernel version
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* $Id: enc28j60.c,v 1.22 2007/12/20 10:47:01 claudio Exp $
*/
@@ -18,9 +14,9 @@
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
+#include <linux/property.h>
#include <linux/string.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -28,7 +24,6 @@
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
-#include <linux/of_net.h>
#include "enc28j60_hw.h"
@@ -41,10 +36,11 @@
(NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK)
/* Buffer size required for the largest SPI transfer (i.e., reading a
- * frame). */
+ * frame).
+ */
#define SPI_TRANSFER_BUF_LEN (4 + MAX_FRAMELEN)
-#define TX_TIMEOUT (4 * HZ)
+#define TX_TIMEOUT (4 * HZ)
/* Max TX retries in case of collision as suggested by errata datasheet */
#define MAX_TX_RETRYCOUNT 16
@@ -83,11 +79,12 @@ static struct {
/*
* SPI read buffer
- * wait for the SPI transfer and copy received data to destination
+ * Wait for the SPI transfer and copy received data to destination.
*/
static int
spi_read_buf(struct enc28j60_net *priv, int len, u8 *data)
{
+ struct device *dev = &priv->spi->dev;
u8 *rx_buf = priv->spi_transfer_buf + 4;
u8 *tx_buf = priv->spi_transfer_buf;
struct spi_transfer tx = {
@@ -113,8 +110,8 @@ spi_read_buf(struct enc28j60_net *priv, int len, u8 *data)
ret = msg.status;
}
if (ret && netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
- __func__, ret);
+ dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n",
+ __func__, ret);
return ret;
}
@@ -122,9 +119,9 @@ spi_read_buf(struct enc28j60_net *priv, int len, u8 *data)
/*
* SPI write buffer
*/
-static int spi_write_buf(struct enc28j60_net *priv, int len,
- const u8 *data)
+static int spi_write_buf(struct enc28j60_net *priv, int len, const u8 *data)
{
+ struct device *dev = &priv->spi->dev;
int ret;
if (len > SPI_TRANSFER_BUF_LEN - 1 || len <= 0)
@@ -134,8 +131,8 @@ static int spi_write_buf(struct enc28j60_net *priv, int len,
memcpy(&priv->spi_transfer_buf[1], data, len);
ret = spi_write(priv->spi, priv->spi_transfer_buf, len + 1);
if (ret && netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
- __func__, ret);
+ dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n",
+ __func__, ret);
}
return ret;
}
@@ -143,9 +140,9 @@ static int spi_write_buf(struct enc28j60_net *priv, int len,
/*
* basic SPI read operation
*/
-static u8 spi_read_op(struct enc28j60_net *priv, u8 op,
- u8 addr)
+static u8 spi_read_op(struct enc28j60_net *priv, u8 op, u8 addr)
{
+ struct device *dev = &priv->spi->dev;
u8 tx_buf[2];
u8 rx_buf[4];
u8 val = 0;
@@ -159,8 +156,8 @@ static u8 spi_read_op(struct enc28j60_net *priv, u8 op,
tx_buf[0] = op | (addr & ADDR_MASK);
ret = spi_write_then_read(priv->spi, tx_buf, 1, rx_buf, slen);
if (ret)
- printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
- __func__, ret);
+ dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n",
+ __func__, ret);
else
val = rx_buf[slen - 1];
@@ -170,28 +167,25 @@ static u8 spi_read_op(struct enc28j60_net *priv, u8 op,
/*
* basic SPI write operation
*/
-static int spi_write_op(struct enc28j60_net *priv, u8 op,
- u8 addr, u8 val)
+static int spi_write_op(struct enc28j60_net *priv, u8 op, u8 addr, u8 val)
{
+ struct device *dev = &priv->spi->dev;
int ret;
priv->spi_transfer_buf[0] = op | (addr & ADDR_MASK);
priv->spi_transfer_buf[1] = val;
ret = spi_write(priv->spi, priv->spi_transfer_buf, 2);
if (ret && netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
- __func__, ret);
+ dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n",
+ __func__, ret);
return ret;
}
static void enc28j60_soft_reset(struct enc28j60_net *priv)
{
- if (netif_msg_hw(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
-
spi_write_op(priv, ENC28J60_SOFT_RESET, 0, ENC28J60_SOFT_RESET);
/* Errata workaround #1, CLKRDY check is unreliable,
- * delay at least 1 mS instead */
+ * delay at least 1 ms instead */
udelay(2000);
}
@@ -203,7 +197,7 @@ static void enc28j60_set_bank(struct enc28j60_net *priv, u8 addr)
u8 b = (addr & BANK_MASK) >> 5;
/* These registers (EIE, EIR, ESTAT, ECON2, ECON1)
- * are present in all banks, no need to switch bank
+ * are present in all banks, no need to switch bank.
*/
if (addr >= EIE && addr <= ECON1)
return;
@@ -242,15 +236,13 @@ static void enc28j60_set_bank(struct enc28j60_net *priv, u8 addr)
/*
* Register bit field Set
*/
-static void nolock_reg_bfset(struct enc28j60_net *priv,
- u8 addr, u8 mask)
+static void nolock_reg_bfset(struct enc28j60_net *priv, u8 addr, u8 mask)
{
enc28j60_set_bank(priv, addr);
spi_write_op(priv, ENC28J60_BIT_FIELD_SET, addr, mask);
}
-static void locked_reg_bfset(struct enc28j60_net *priv,
- u8 addr, u8 mask)
+static void locked_reg_bfset(struct enc28j60_net *priv, u8 addr, u8 mask)
{
mutex_lock(&priv->lock);
nolock_reg_bfset(priv, addr, mask);
@@ -260,15 +252,13 @@ static void locked_reg_bfset(struct enc28j60_net *priv,
/*
* Register bit field Clear
*/
-static void nolock_reg_bfclr(struct enc28j60_net *priv,
- u8 addr, u8 mask)
+static void nolock_reg_bfclr(struct enc28j60_net *priv, u8 addr, u8 mask)
{
enc28j60_set_bank(priv, addr);
spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, addr, mask);
}
-static void locked_reg_bfclr(struct enc28j60_net *priv,
- u8 addr, u8 mask)
+static void locked_reg_bfclr(struct enc28j60_net *priv, u8 addr, u8 mask)
{
mutex_lock(&priv->lock);
nolock_reg_bfclr(priv, addr, mask);
@@ -278,15 +268,13 @@ static void locked_reg_bfclr(struct enc28j60_net *priv,
/*
* Register byte read
*/
-static int nolock_regb_read(struct enc28j60_net *priv,
- u8 address)
+static int nolock_regb_read(struct enc28j60_net *priv, u8 address)
{
enc28j60_set_bank(priv, address);
return spi_read_op(priv, ENC28J60_READ_CTRL_REG, address);
}
-static int locked_regb_read(struct enc28j60_net *priv,
- u8 address)
+static int locked_regb_read(struct enc28j60_net *priv, u8 address)
{
int ret;
@@ -300,8 +288,7 @@ static int locked_regb_read(struct enc28j60_net *priv,
/*
* Register word read
*/
-static int nolock_regw_read(struct enc28j60_net *priv,
- u8 address)
+static int nolock_regw_read(struct enc28j60_net *priv, u8 address)
{
int rl, rh;
@@ -312,8 +299,7 @@ static int nolock_regw_read(struct enc28j60_net *priv,
return (rh << 8) | rl;
}
-static int locked_regw_read(struct enc28j60_net *priv,
- u8 address)
+static int locked_regw_read(struct enc28j60_net *priv, u8 address)
{
int ret;
@@ -327,15 +313,13 @@ static int locked_regw_read(struct enc28j60_net *priv,
/*
* Register byte write
*/
-static void nolock_regb_write(struct enc28j60_net *priv,
- u8 address, u8 data)
+static void nolock_regb_write(struct enc28j60_net *priv, u8 address, u8 data)
{
enc28j60_set_bank(priv, address);
spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address, data);
}
-static void locked_regb_write(struct enc28j60_net *priv,
- u8 address, u8 data)
+static void locked_regb_write(struct enc28j60_net *priv, u8 address, u8 data)
{
mutex_lock(&priv->lock);
nolock_regb_write(priv, address, data);
@@ -345,8 +329,7 @@ static void locked_regb_write(struct enc28j60_net *priv,
/*
* Register word write
*/
-static void nolock_regw_write(struct enc28j60_net *priv,
- u8 address, u16 data)
+static void nolock_regw_write(struct enc28j60_net *priv, u8 address, u16 data)
{
enc28j60_set_bank(priv, address);
spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address, (u8) data);
@@ -354,8 +337,7 @@ static void nolock_regw_write(struct enc28j60_net *priv,
(u8) (data >> 8));
}
-static void locked_regw_write(struct enc28j60_net *priv,
- u8 address, u16 data)
+static void locked_regw_write(struct enc28j60_net *priv, u8 address, u16 data)
{
mutex_lock(&priv->lock);
nolock_regw_write(priv, address, data);
@@ -364,20 +346,23 @@ static void locked_regw_write(struct enc28j60_net *priv,
/*
* Buffer memory read
- * Select the starting address and execute a SPI buffer read
+ * Select the starting address and execute a SPI buffer read.
*/
-static void enc28j60_mem_read(struct enc28j60_net *priv,
- u16 addr, int len, u8 *data)
+static void enc28j60_mem_read(struct enc28j60_net *priv, u16 addr, int len,
+ u8 *data)
{
mutex_lock(&priv->lock);
nolock_regw_write(priv, ERDPTL, addr);
#ifdef CONFIG_ENC28J60_WRITEVERIFY
if (netif_msg_drv(priv)) {
+ struct device *dev = &priv->spi->dev;
u16 reg;
+
reg = nolock_regw_read(priv, ERDPTL);
if (reg != addr)
- printk(KERN_DEBUG DRV_NAME ": %s() error writing ERDPT "
- "(0x%04x - 0x%04x)\n", __func__, reg, addr);
+ dev_printk(KERN_DEBUG, dev,
+ "%s() error writing ERDPT (0x%04x - 0x%04x)\n",
+ __func__, reg, addr);
}
#endif
spi_read_buf(priv, len, data);
@@ -390,6 +375,8 @@ static void enc28j60_mem_read(struct enc28j60_net *priv,
static void
enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data)
{
+ struct device *dev = &priv->spi->dev;
+
mutex_lock(&priv->lock);
/* Set the write pointer to start of transmit buffer area */
nolock_regw_write(priv, EWRPTL, TXSTART_INIT);
@@ -398,9 +385,9 @@ enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data)
u16 reg;
reg = nolock_regw_read(priv, EWRPTL);
if (reg != TXSTART_INIT)
- printk(KERN_DEBUG DRV_NAME
- ": %s() ERWPT:0x%04x != 0x%04x\n",
- __func__, reg, TXSTART_INIT);
+ dev_printk(KERN_DEBUG, dev,
+ "%s() ERWPT:0x%04x != 0x%04x\n",
+ __func__, reg, TXSTART_INIT);
}
#endif
/* Set the TXND pointer to correspond to the packet size given */
@@ -408,30 +395,28 @@ enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data)
/* write per-packet control byte */
spi_write_op(priv, ENC28J60_WRITE_BUF_MEM, 0, 0x00);
if (netif_msg_hw(priv))
- printk(KERN_DEBUG DRV_NAME
- ": %s() after control byte ERWPT:0x%04x\n",
- __func__, nolock_regw_read(priv, EWRPTL));
+ dev_printk(KERN_DEBUG, dev,
+ "%s() after control byte ERWPT:0x%04x\n",
+ __func__, nolock_regw_read(priv, EWRPTL));
/* copy the packet into the transmit buffer */
spi_write_buf(priv, len, data);
if (netif_msg_hw(priv))
- printk(KERN_DEBUG DRV_NAME
- ": %s() after write packet ERWPT:0x%04x, len=%d\n",
- __func__, nolock_regw_read(priv, EWRPTL), len);
+ dev_printk(KERN_DEBUG, dev,
+ "%s() after write packet ERWPT:0x%04x, len=%d\n",
+ __func__, nolock_regw_read(priv, EWRPTL), len);
mutex_unlock(&priv->lock);
}
-static unsigned long msec20_to_jiffies;
-
static int poll_ready(struct enc28j60_net *priv, u8 reg, u8 mask, u8 val)
{
- unsigned long timeout = jiffies + msec20_to_jiffies;
+ struct device *dev = &priv->spi->dev;
+ unsigned long timeout = jiffies + msecs_to_jiffies(20);
/* 20 msec timeout read */
while ((nolock_regb_read(priv, reg) & mask) != val) {
if (time_after(jiffies, timeout)) {
if (netif_msg_drv(priv))
- dev_dbg(&priv->spi->dev,
- "reg %02x ready timeout!\n", reg);
+ dev_dbg(dev, "reg %02x ready timeout!\n", reg);
return -ETIMEDOUT;
}
cpu_relax();
@@ -449,7 +434,7 @@ static int wait_phy_ready(struct enc28j60_net *priv)
/*
* PHY register read
- * PHY registers are not accessed directly, but through the MII
+ * PHY registers are not accessed directly, but through the MII.
*/
static u16 enc28j60_phy_read(struct enc28j60_net *priv, u8 address)
{
@@ -465,7 +450,7 @@ static u16 enc28j60_phy_read(struct enc28j60_net *priv, u8 address)
/* quit reading */
nolock_regb_write(priv, MICMD, 0x00);
/* return the data */
- ret = nolock_regw_read(priv, MIRDL);
+ ret = nolock_regw_read(priv, MIRDL);
mutex_unlock(&priv->lock);
return ret;
@@ -494,13 +479,13 @@ static int enc28j60_set_hw_macaddr(struct net_device *ndev)
{
int ret;
struct enc28j60_net *priv = netdev_priv(ndev);
+ struct device *dev = &priv->spi->dev;
mutex_lock(&priv->lock);
if (!priv->hw_enable) {
if (netif_msg_drv(priv))
- printk(KERN_INFO DRV_NAME
- ": %s: Setting MAC address to %pM\n",
- ndev->name, ndev->dev_addr);
+ dev_info(dev, "%s: Setting MAC address to %pM\n",
+ ndev->name, ndev->dev_addr);
/* NOTE: MAC address in ENC28J60 is byte-backward */
nolock_regb_write(priv, MAADR5, ndev->dev_addr[0]);
nolock_regb_write(priv, MAADR4, ndev->dev_addr[1]);
@@ -511,9 +496,9 @@ static int enc28j60_set_hw_macaddr(struct net_device *ndev)
ret = 0;
} else {
if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME
- ": %s() Hardware must be disabled to set "
- "Mac address\n", __func__);
+ dev_printk(KERN_DEBUG, dev,
+ "%s() Hardware must be disabled to set Mac address\n",
+ __func__);
ret = -EBUSY;
}
mutex_unlock(&priv->lock);
@@ -532,7 +517,7 @@ static int enc28j60_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(address->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
+ ether_addr_copy(dev->dev_addr, address->sa_data);
return enc28j60_set_hw_macaddr(dev);
}
@@ -541,33 +526,36 @@ static int enc28j60_set_mac_address(struct net_device *dev, void *addr)
*/
static void enc28j60_dump_regs(struct enc28j60_net *priv, const char *msg)
{
+ struct device *dev = &priv->spi->dev;
+
mutex_lock(&priv->lock);
- printk(KERN_DEBUG DRV_NAME " %s\n"
- "HwRevID: 0x%02x\n"
- "Cntrl: ECON1 ECON2 ESTAT EIR EIE\n"
- " 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n"
- "MAC : MACON1 MACON3 MACON4\n"
- " 0x%02x 0x%02x 0x%02x\n"
- "Rx : ERXST ERXND ERXWRPT ERXRDPT ERXFCON EPKTCNT MAMXFL\n"
- " 0x%04x 0x%04x 0x%04x 0x%04x "
- "0x%02x 0x%02x 0x%04x\n"
- "Tx : ETXST ETXND MACLCON1 MACLCON2 MAPHSUP\n"
- " 0x%04x 0x%04x 0x%02x 0x%02x 0x%02x\n",
- msg, nolock_regb_read(priv, EREVID),
- nolock_regb_read(priv, ECON1), nolock_regb_read(priv, ECON2),
- nolock_regb_read(priv, ESTAT), nolock_regb_read(priv, EIR),
- nolock_regb_read(priv, EIE), nolock_regb_read(priv, MACON1),
- nolock_regb_read(priv, MACON3), nolock_regb_read(priv, MACON4),
- nolock_regw_read(priv, ERXSTL), nolock_regw_read(priv, ERXNDL),
- nolock_regw_read(priv, ERXWRPTL),
- nolock_regw_read(priv, ERXRDPTL),
- nolock_regb_read(priv, ERXFCON),
- nolock_regb_read(priv, EPKTCNT),
- nolock_regw_read(priv, MAMXFLL), nolock_regw_read(priv, ETXSTL),
- nolock_regw_read(priv, ETXNDL),
- nolock_regb_read(priv, MACLCON1),
- nolock_regb_read(priv, MACLCON2),
- nolock_regb_read(priv, MAPHSUP));
+ dev_printk(KERN_DEBUG, dev,
+ " %s\n"
+ "HwRevID: 0x%02x\n"
+ "Cntrl: ECON1 ECON2 ESTAT EIR EIE\n"
+ " 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n"
+ "MAC : MACON1 MACON3 MACON4\n"
+ " 0x%02x 0x%02x 0x%02x\n"
+ "Rx : ERXST ERXND ERXWRPT ERXRDPT ERXFCON EPKTCNT MAMXFL\n"
+ " 0x%04x 0x%04x 0x%04x 0x%04x "
+ "0x%02x 0x%02x 0x%04x\n"
+ "Tx : ETXST ETXND MACLCON1 MACLCON2 MAPHSUP\n"
+ " 0x%04x 0x%04x 0x%02x 0x%02x 0x%02x\n",
+ msg, nolock_regb_read(priv, EREVID),
+ nolock_regb_read(priv, ECON1), nolock_regb_read(priv, ECON2),
+ nolock_regb_read(priv, ESTAT), nolock_regb_read(priv, EIR),
+ nolock_regb_read(priv, EIE), nolock_regb_read(priv, MACON1),
+ nolock_regb_read(priv, MACON3), nolock_regb_read(priv, MACON4),
+ nolock_regw_read(priv, ERXSTL), nolock_regw_read(priv, ERXNDL),
+ nolock_regw_read(priv, ERXWRPTL),
+ nolock_regw_read(priv, ERXRDPTL),
+ nolock_regb_read(priv, ERXFCON),
+ nolock_regb_read(priv, EPKTCNT),
+ nolock_regw_read(priv, MAMXFLL), nolock_regw_read(priv, ETXSTL),
+ nolock_regw_read(priv, ETXNDL),
+ nolock_regb_read(priv, MACLCON1),
+ nolock_regb_read(priv, MACLCON2),
+ nolock_regb_read(priv, MAPHSUP));
mutex_unlock(&priv->lock);
}
@@ -599,12 +587,13 @@ static u16 rx_packet_start(u16 ptr)
static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
{
+ struct device *dev = &priv->spi->dev;
u16 erxrdpt;
if (start > 0x1FFF || end > 0x1FFF || start > end) {
if (netif_msg_drv(priv))
- printk(KERN_ERR DRV_NAME ": %s(%d, %d) RXFIFO "
- "bad parameters!\n", __func__, start, end);
+ dev_err(dev, "%s(%d, %d) RXFIFO bad parameters!\n",
+ __func__, start, end);
return;
}
/* set receive buffer start + end */
@@ -617,10 +606,12 @@ static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
static void nolock_txfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
{
+ struct device *dev = &priv->spi->dev;
+
if (start > 0x1FFF || end > 0x1FFF || start > end) {
if (netif_msg_drv(priv))
- printk(KERN_ERR DRV_NAME ": %s(%d, %d) TXFIFO "
- "bad parameters!\n", __func__, start, end);
+ dev_err(dev, "%s(%d, %d) TXFIFO bad parameters!\n",
+ __func__, start, end);
return;
}
/* set transmit buffer start + end */
@@ -630,14 +621,15 @@ static void nolock_txfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
/*
* Low power mode shrinks power consumption about 100x, so we'd like
- * the chip to be in that mode whenever it's inactive. (However, we
- * can't stay in lowpower mode during suspend with WOL active.)
+ * the chip to be in that mode whenever it's inactive. (However, we
+ * can't stay in low power mode during suspend with WOL active.)
*/
static void enc28j60_lowpower(struct enc28j60_net *priv, bool is_low)
{
+ struct device *dev = &priv->spi->dev;
+
if (netif_msg_drv(priv))
- dev_dbg(&priv->spi->dev, "%s power...\n",
- is_low ? "low" : "high");
+ dev_dbg(dev, "%s power...\n", is_low ? "low" : "high");
mutex_lock(&priv->lock);
if (is_low) {
@@ -656,11 +648,12 @@ static void enc28j60_lowpower(struct enc28j60_net *priv, bool is_low)
static int enc28j60_hw_init(struct enc28j60_net *priv)
{
+ struct device *dev = &priv->spi->dev;
u8 reg;
if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() - %s\n", __func__,
- priv->full_duplex ? "FullDuplex" : "HalfDuplex");
+ dev_printk(KERN_DEBUG, dev, "%s() - %s\n", __func__,
+ priv->full_duplex ? "FullDuplex" : "HalfDuplex");
mutex_lock(&priv->lock);
/* first reset the chip */
@@ -682,15 +675,15 @@ static int enc28j60_hw_init(struct enc28j60_net *priv)
/*
* Check the RevID.
* If it's 0x00 or 0xFF probably the enc28j60 is not mounted or
- * damaged
+ * damaged.
*/
reg = locked_regb_read(priv, EREVID);
if (netif_msg_drv(priv))
- printk(KERN_INFO DRV_NAME ": chip RevID: 0x%02x\n", reg);
+ dev_info(dev, "chip RevID: 0x%02x\n", reg);
if (reg == 0x00 || reg == 0xff) {
if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() Invalid RevId %d\n",
- __func__, reg);
+ dev_printk(KERN_DEBUG, dev, "%s() Invalid RevId %d\n",
+ __func__, reg);
return 0;
}
@@ -723,7 +716,7 @@ static int enc28j60_hw_init(struct enc28j60_net *priv)
/*
* MACLCON1 (default)
* MACLCON2 (default)
- * Set the maximum packet size which the controller will accept
+ * Set the maximum packet size which the controller will accept.
*/
locked_regw_write(priv, MAMXFLL, MAX_FRAMELEN);
@@ -750,10 +743,12 @@ static int enc28j60_hw_init(struct enc28j60_net *priv)
static void enc28j60_hw_enable(struct enc28j60_net *priv)
{
+ struct device *dev = &priv->spi->dev;
+
/* enable interrupts */
if (netif_msg_hw(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() enabling interrupts.\n",
- __func__);
+ dev_printk(KERN_DEBUG, dev, "%s() enabling interrupts.\n",
+ __func__);
enc28j60_phy_write(priv, PHIE, PHIE_PGEIE | PHIE_PLNKIE);
@@ -772,7 +767,7 @@ static void enc28j60_hw_enable(struct enc28j60_net *priv)
static void enc28j60_hw_disable(struct enc28j60_net *priv)
{
mutex_lock(&priv->lock);
- /* disable interrutps and packet reception */
+ /* disable interrupts and packet reception */
nolock_regb_write(priv, EIE, 0x00);
nolock_reg_bfclr(priv, ECON1, ECON1_RXEN);
priv->hw_enable = false;
@@ -793,14 +788,12 @@ enc28j60_setlink(struct net_device *ndev, u8 autoneg, u16 speed, u8 duplex)
priv->full_duplex = (duplex == DUPLEX_FULL);
else {
if (netif_msg_link(priv))
- dev_warn(&ndev->dev,
- "unsupported link setting\n");
+ netdev_warn(ndev, "unsupported link setting\n");
ret = -EOPNOTSUPP;
}
} else {
if (netif_msg_link(priv))
- dev_warn(&ndev->dev, "Warning: hw must be disabled "
- "to set link mode\n");
+ netdev_warn(ndev, "Warning: hw must be disabled to set link mode\n");
ret = -EBUSY;
}
return ret;
@@ -811,21 +804,23 @@ enc28j60_setlink(struct net_device *ndev, u8 autoneg, u16 speed, u8 duplex)
*/
static void enc28j60_read_tsv(struct enc28j60_net *priv, u8 tsv[TSV_SIZE])
{
+ struct device *dev = &priv->spi->dev;
int endptr;
endptr = locked_regw_read(priv, ETXNDL);
if (netif_msg_hw(priv))
- printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n",
- endptr + 1);
+ dev_printk(KERN_DEBUG, dev, "reading TSV at addr:0x%04x\n",
+ endptr + 1);
enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv);
}
static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg,
- u8 tsv[TSV_SIZE])
+ u8 tsv[TSV_SIZE])
{
+ struct device *dev = &priv->spi->dev;
u16 tmp1, tmp2;
- printk(KERN_DEBUG DRV_NAME ": %s - TSV:\n", msg);
+ dev_printk(KERN_DEBUG, dev, "%s - TSV:\n", msg);
tmp1 = tsv[1];
tmp1 <<= 8;
tmp1 |= tsv[0];
@@ -834,30 +829,32 @@ static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg,
tmp2 <<= 8;
tmp2 |= tsv[4];
- printk(KERN_DEBUG DRV_NAME ": ByteCount: %d, CollisionCount: %d,"
- " TotByteOnWire: %d\n", tmp1, tsv[2] & 0x0f, tmp2);
- printk(KERN_DEBUG DRV_NAME ": TxDone: %d, CRCErr:%d, LenChkErr: %d,"
- " LenOutOfRange: %d\n", TSV_GETBIT(tsv, TSV_TXDONE),
- TSV_GETBIT(tsv, TSV_TXCRCERROR),
- TSV_GETBIT(tsv, TSV_TXLENCHKERROR),
- TSV_GETBIT(tsv, TSV_TXLENOUTOFRANGE));
- printk(KERN_DEBUG DRV_NAME ": Multicast: %d, Broadcast: %d, "
- "PacketDefer: %d, ExDefer: %d\n",
- TSV_GETBIT(tsv, TSV_TXMULTICAST),
- TSV_GETBIT(tsv, TSV_TXBROADCAST),
- TSV_GETBIT(tsv, TSV_TXPACKETDEFER),
- TSV_GETBIT(tsv, TSV_TXEXDEFER));
- printk(KERN_DEBUG DRV_NAME ": ExCollision: %d, LateCollision: %d, "
- "Giant: %d, Underrun: %d\n",
- TSV_GETBIT(tsv, TSV_TXEXCOLLISION),
- TSV_GETBIT(tsv, TSV_TXLATECOLLISION),
- TSV_GETBIT(tsv, TSV_TXGIANT), TSV_GETBIT(tsv, TSV_TXUNDERRUN));
- printk(KERN_DEBUG DRV_NAME ": ControlFrame: %d, PauseFrame: %d, "
- "BackPressApp: %d, VLanTagFrame: %d\n",
- TSV_GETBIT(tsv, TSV_TXCONTROLFRAME),
- TSV_GETBIT(tsv, TSV_TXPAUSEFRAME),
- TSV_GETBIT(tsv, TSV_BACKPRESSUREAPP),
- TSV_GETBIT(tsv, TSV_TXVLANTAGFRAME));
+ dev_printk(KERN_DEBUG, dev,
+ "ByteCount: %d, CollisionCount: %d, TotByteOnWire: %d\n",
+ tmp1, tsv[2] & 0x0f, tmp2);
+ dev_printk(KERN_DEBUG, dev,
+ "TxDone: %d, CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n",
+ TSV_GETBIT(tsv, TSV_TXDONE),
+ TSV_GETBIT(tsv, TSV_TXCRCERROR),
+ TSV_GETBIT(tsv, TSV_TXLENCHKERROR),
+ TSV_GETBIT(tsv, TSV_TXLENOUTOFRANGE));
+ dev_printk(KERN_DEBUG, dev,
+ "Multicast: %d, Broadcast: %d, PacketDefer: %d, ExDefer: %d\n",
+ TSV_GETBIT(tsv, TSV_TXMULTICAST),
+ TSV_GETBIT(tsv, TSV_TXBROADCAST),
+ TSV_GETBIT(tsv, TSV_TXPACKETDEFER),
+ TSV_GETBIT(tsv, TSV_TXEXDEFER));
+ dev_printk(KERN_DEBUG, dev,
+ "ExCollision: %d, LateCollision: %d, Giant: %d, Underrun: %d\n",
+ TSV_GETBIT(tsv, TSV_TXEXCOLLISION),
+ TSV_GETBIT(tsv, TSV_TXLATECOLLISION),
+ TSV_GETBIT(tsv, TSV_TXGIANT), TSV_GETBIT(tsv, TSV_TXUNDERRUN));
+ dev_printk(KERN_DEBUG, dev,
+ "ControlFrame: %d, PauseFrame: %d, BackPressApp: %d, VLanTagFrame: %d\n",
+ TSV_GETBIT(tsv, TSV_TXCONTROLFRAME),
+ TSV_GETBIT(tsv, TSV_TXPAUSEFRAME),
+ TSV_GETBIT(tsv, TSV_BACKPRESSUREAPP),
+ TSV_GETBIT(tsv, TSV_TXVLANTAGFRAME));
}
/*
@@ -866,27 +863,29 @@ static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg,
static void enc28j60_dump_rsv(struct enc28j60_net *priv, const char *msg,
u16 pk_ptr, int len, u16 sts)
{
- printk(KERN_DEBUG DRV_NAME ": %s - NextPk: 0x%04x - RSV:\n",
- msg, pk_ptr);
- printk(KERN_DEBUG DRV_NAME ": ByteCount: %d, DribbleNibble: %d\n", len,
- RSV_GETBIT(sts, RSV_DRIBBLENIBBLE));
- printk(KERN_DEBUG DRV_NAME ": RxOK: %d, CRCErr:%d, LenChkErr: %d,"
- " LenOutOfRange: %d\n", RSV_GETBIT(sts, RSV_RXOK),
- RSV_GETBIT(sts, RSV_CRCERROR),
- RSV_GETBIT(sts, RSV_LENCHECKERR),
- RSV_GETBIT(sts, RSV_LENOUTOFRANGE));
- printk(KERN_DEBUG DRV_NAME ": Multicast: %d, Broadcast: %d, "
- "LongDropEvent: %d, CarrierEvent: %d\n",
- RSV_GETBIT(sts, RSV_RXMULTICAST),
- RSV_GETBIT(sts, RSV_RXBROADCAST),
- RSV_GETBIT(sts, RSV_RXLONGEVDROPEV),
- RSV_GETBIT(sts, RSV_CARRIEREV));
- printk(KERN_DEBUG DRV_NAME ": ControlFrame: %d, PauseFrame: %d,"
- " UnknownOp: %d, VLanTagFrame: %d\n",
- RSV_GETBIT(sts, RSV_RXCONTROLFRAME),
- RSV_GETBIT(sts, RSV_RXPAUSEFRAME),
- RSV_GETBIT(sts, RSV_RXUNKNOWNOPCODE),
- RSV_GETBIT(sts, RSV_RXTYPEVLAN));
+ struct device *dev = &priv->spi->dev;
+
+ dev_printk(KERN_DEBUG, dev, "%s - NextPk: 0x%04x - RSV:\n", msg, pk_ptr);
+ dev_printk(KERN_DEBUG, dev, "ByteCount: %d, DribbleNibble: %d\n",
+ len, RSV_GETBIT(sts, RSV_DRIBBLENIBBLE));
+ dev_printk(KERN_DEBUG, dev,
+ "RxOK: %d, CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n",
+ RSV_GETBIT(sts, RSV_RXOK),
+ RSV_GETBIT(sts, RSV_CRCERROR),
+ RSV_GETBIT(sts, RSV_LENCHECKERR),
+ RSV_GETBIT(sts, RSV_LENOUTOFRANGE));
+ dev_printk(KERN_DEBUG, dev,
+ "Multicast: %d, Broadcast: %d, LongDropEvent: %d, CarrierEvent: %d\n",
+ RSV_GETBIT(sts, RSV_RXMULTICAST),
+ RSV_GETBIT(sts, RSV_RXBROADCAST),
+ RSV_GETBIT(sts, RSV_RXLONGEVDROPEV),
+ RSV_GETBIT(sts, RSV_CARRIEREV));
+ dev_printk(KERN_DEBUG, dev,
+ "ControlFrame: %d, PauseFrame: %d, UnknownOp: %d, VLanTagFrame: %d\n",
+ RSV_GETBIT(sts, RSV_RXCONTROLFRAME),
+ RSV_GETBIT(sts, RSV_RXPAUSEFRAME),
+ RSV_GETBIT(sts, RSV_RXUNKNOWNOPCODE),
+ RSV_GETBIT(sts, RSV_RXTYPEVLAN));
}
static void dump_packet(const char *msg, int len, const char *data)
@@ -904,20 +903,20 @@ static void dump_packet(const char *msg, int len, const char *data)
static void enc28j60_hw_rx(struct net_device *ndev)
{
struct enc28j60_net *priv = netdev_priv(ndev);
+ struct device *dev = &priv->spi->dev;
struct sk_buff *skb = NULL;
u16 erxrdpt, next_packet, rxstat;
u8 rsv[RSV_SIZE];
int len;
if (netif_msg_rx_status(priv))
- printk(KERN_DEBUG DRV_NAME ": RX pk_addr:0x%04x\n",
- priv->next_pk_ptr);
+ netdev_printk(KERN_DEBUG, ndev, "RX pk_addr:0x%04x\n",
+ priv->next_pk_ptr);
if (unlikely(priv->next_pk_ptr > RXEND_INIT)) {
if (netif_msg_rx_err(priv))
- dev_err(&ndev->dev,
- "%s() Invalid packet address!! 0x%04x\n",
- __func__, priv->next_pk_ptr);
+ netdev_err(ndev, "%s() Invalid packet address!! 0x%04x\n",
+ __func__, priv->next_pk_ptr);
/* packet address corrupted: reset RX logic */
mutex_lock(&priv->lock);
nolock_reg_bfclr(priv, ECON1, ECON1_RXEN);
@@ -950,7 +949,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
if (!RSV_GETBIT(rxstat, RSV_RXOK) || len > MAX_FRAMELEN) {
if (netif_msg_rx_err(priv))
- dev_err(&ndev->dev, "Rx Error (%04x)\n", rxstat);
+ netdev_err(ndev, "Rx Error (%04x)\n", rxstat);
ndev->stats.rx_errors++;
if (RSV_GETBIT(rxstat, RSV_CRCERROR))
ndev->stats.rx_crc_errors++;
@@ -962,8 +961,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
skb = netdev_alloc_skb(ndev, len + NET_IP_ALIGN);
if (!skb) {
if (netif_msg_rx_err(priv))
- dev_err(&ndev->dev,
- "out of memory for Rx'd frame\n");
+ netdev_err(ndev, "out of memory for Rx'd frame\n");
ndev->stats.rx_dropped++;
} else {
skb_reserve(skb, NET_IP_ALIGN);
@@ -983,12 +981,12 @@ static void enc28j60_hw_rx(struct net_device *ndev)
/*
* Move the RX read pointer to the start of the next
* received packet.
- * This frees the memory we just read out
+ * This frees the memory we just read out.
*/
erxrdpt = erxrdpt_workaround(next_packet, RXSTART_INIT, RXEND_INIT);
if (netif_msg_hw(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT:0x%04x\n",
- __func__, erxrdpt);
+ dev_printk(KERN_DEBUG, dev, "%s() ERXRDPT:0x%04x\n",
+ __func__, erxrdpt);
mutex_lock(&priv->lock);
nolock_regw_write(priv, ERXRDPTL, erxrdpt);
@@ -997,9 +995,9 @@ static void enc28j60_hw_rx(struct net_device *ndev)
u16 reg;
reg = nolock_regw_read(priv, ERXRDPTL);
if (reg != erxrdpt)
- printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT verify "
- "error (0x%04x - 0x%04x)\n", __func__,
- reg, erxrdpt);
+ dev_printk(KERN_DEBUG, dev,
+ "%s() ERXRDPT verify error (0x%04x - 0x%04x)\n",
+ __func__, reg, erxrdpt);
}
#endif
priv->next_pk_ptr = next_packet;
@@ -1013,6 +1011,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
*/
static int enc28j60_get_free_rxfifo(struct enc28j60_net *priv)
{
+ struct net_device *ndev = priv->netdev;
int epkcnt, erxst, erxnd, erxwr, erxrd;
int free_space;
@@ -1035,8 +1034,8 @@ static int enc28j60_get_free_rxfifo(struct enc28j60_net *priv)
}
mutex_unlock(&priv->lock);
if (netif_msg_rx_status(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() free_space = %d\n",
- __func__, free_space);
+ netdev_printk(KERN_DEBUG, ndev, "%s() free_space = %d\n",
+ __func__, free_space);
return free_space;
}
@@ -1046,24 +1045,25 @@ static int enc28j60_get_free_rxfifo(struct enc28j60_net *priv)
static void enc28j60_check_link_status(struct net_device *ndev)
{
struct enc28j60_net *priv = netdev_priv(ndev);
+ struct device *dev = &priv->spi->dev;
u16 reg;
int duplex;
reg = enc28j60_phy_read(priv, PHSTAT2);
if (netif_msg_hw(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() PHSTAT1: %04x, "
- "PHSTAT2: %04x\n", __func__,
- enc28j60_phy_read(priv, PHSTAT1), reg);
+ dev_printk(KERN_DEBUG, dev,
+ "%s() PHSTAT1: %04x, PHSTAT2: %04x\n", __func__,
+ enc28j60_phy_read(priv, PHSTAT1), reg);
duplex = reg & PHSTAT2_DPXSTAT;
if (reg & PHSTAT2_LSTAT) {
netif_carrier_on(ndev);
if (netif_msg_ifup(priv))
- dev_info(&ndev->dev, "link up - %s\n",
- duplex ? "Full duplex" : "Half duplex");
+ netdev_info(ndev, "link up - %s\n",
+ duplex ? "Full duplex" : "Half duplex");
} else {
if (netif_msg_ifdown(priv))
- dev_info(&ndev->dev, "link down\n");
+ netdev_info(ndev, "link down\n");
netif_carrier_off(ndev);
}
}
@@ -1089,8 +1089,8 @@ static void enc28j60_tx_clear(struct net_device *ndev, bool err)
/*
* RX handler
- * ignore PKTIF because is unreliable! (look at the errata datasheet)
- * check EPKTCNT is the suggested workaround.
+ * Ignore PKTIF because is unreliable! (Look at the errata datasheet)
+ * Check EPKTCNT is the suggested workaround.
* We don't need to clear interrupt flag, automatically done when
* enc28j60_hw_rx() decrements the packet counter.
* Returns how many packet processed.
@@ -1102,13 +1102,14 @@ static int enc28j60_rx_interrupt(struct net_device *ndev)
pk_counter = locked_regb_read(priv, EPKTCNT);
if (pk_counter && netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME ": intRX, pk_cnt: %d\n", pk_counter);
+ netdev_printk(KERN_DEBUG, ndev, "intRX, pk_cnt: %d\n",
+ pk_counter);
if (pk_counter > priv->max_pk_counter) {
/* update statistics */
priv->max_pk_counter = pk_counter;
if (netif_msg_rx_status(priv) && priv->max_pk_counter > 1)
- printk(KERN_DEBUG DRV_NAME ": RX max_pk_cnt: %d\n",
- priv->max_pk_counter);
+ netdev_printk(KERN_DEBUG, ndev, "RX max_pk_cnt: %d\n",
+ priv->max_pk_counter);
}
ret = pk_counter;
while (pk_counter-- > 0)
@@ -1124,8 +1125,6 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
struct net_device *ndev = priv->netdev;
int intflags, loop;
- if (netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
/* disable further interrupts */
locked_reg_bfclr(priv, EIE, EIE_INTIE);
@@ -1136,16 +1135,16 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
if ((intflags & EIR_DMAIF) != 0) {
loop++;
if (netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME
- ": intDMA(%d)\n", loop);
+ netdev_printk(KERN_DEBUG, ndev, "intDMA(%d)\n",
+ loop);
locked_reg_bfclr(priv, EIR, EIR_DMAIF);
}
/* LINK changed handler */
if ((intflags & EIR_LINKIF) != 0) {
loop++;
if (netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME
- ": intLINK(%d)\n", loop);
+ netdev_printk(KERN_DEBUG, ndev, "intLINK(%d)\n",
+ loop);
enc28j60_check_link_status(ndev);
/* read PHIR to clear the flag */
enc28j60_phy_read(priv, PHIR);
@@ -1156,13 +1155,12 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
bool err = false;
loop++;
if (netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME
- ": intTX(%d)\n", loop);
+ netdev_printk(KERN_DEBUG, ndev, "intTX(%d)\n",
+ loop);
priv->tx_retry_count = 0;
if (locked_regb_read(priv, ESTAT) & ESTAT_TXABRT) {
if (netif_msg_tx_err(priv))
- dev_err(&ndev->dev,
- "Tx Error (aborted)\n");
+ netdev_err(ndev, "Tx Error (aborted)\n");
err = true;
}
if (netif_msg_tx_done(priv)) {
@@ -1179,8 +1177,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
loop++;
if (netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME
- ": intTXErr(%d)\n", loop);
+ netdev_printk(KERN_DEBUG, ndev, "intTXErr(%d)\n",
+ loop);
locked_reg_bfclr(priv, ECON1, ECON1_TXRTS);
enc28j60_read_tsv(priv, tsv);
if (netif_msg_tx_err(priv))
@@ -1194,9 +1192,9 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
/* Transmit Late collision check for retransmit */
if (TSV_GETBIT(tsv, TSV_TXLATECOLLISION)) {
if (netif_msg_tx_err(priv))
- printk(KERN_DEBUG DRV_NAME
- ": LateCollision TXErr (%d)\n",
- priv->tx_retry_count);
+ netdev_printk(KERN_DEBUG, ndev,
+ "LateCollision TXErr (%d)\n",
+ priv->tx_retry_count);
if (priv->tx_retry_count++ < MAX_TX_RETRYCOUNT)
locked_reg_bfset(priv, ECON1,
ECON1_TXRTS);
@@ -1210,13 +1208,12 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
if ((intflags & EIR_RXERIF) != 0) {
loop++;
if (netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME
- ": intRXErr(%d)\n", loop);
+ netdev_printk(KERN_DEBUG, ndev, "intRXErr(%d)\n",
+ loop);
/* Check free FIFO space to flag RX overrun */
if (enc28j60_get_free_rxfifo(priv) <= 0) {
if (netif_msg_rx_err(priv))
- printk(KERN_DEBUG DRV_NAME
- ": RX Overrun\n");
+ netdev_printk(KERN_DEBUG, ndev, "RX Overrun\n");
ndev->stats.rx_dropped++;
}
locked_reg_bfclr(priv, EIR, EIR_RXERIF);
@@ -1228,8 +1225,6 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
/* re-enable interrupts */
locked_reg_bfset(priv, EIE, EIE_INTIE);
- if (netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() exit\n", __func__);
}
/*
@@ -1239,11 +1234,13 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
*/
static void enc28j60_hw_tx(struct enc28j60_net *priv)
{
+ struct net_device *ndev = priv->netdev;
+
BUG_ON(!priv->tx_skb);
if (netif_msg_tx_queued(priv))
- printk(KERN_DEBUG DRV_NAME
- ": Tx Packet Len:%d\n", priv->tx_skb->len);
+ netdev_printk(KERN_DEBUG, ndev, "Tx Packet Len:%d\n",
+ priv->tx_skb->len);
if (netif_msg_pktdata(priv))
dump_packet(__func__,
@@ -1253,6 +1250,7 @@ static void enc28j60_hw_tx(struct enc28j60_net *priv)
#ifdef CONFIG_ENC28J60_WRITEVERIFY
/* readback and verify written data */
if (netif_msg_drv(priv)) {
+ struct device *dev = &priv->spi->dev;
int test_len, k;
u8 test_buf[64]; /* limit the test to the first 64 bytes */
int okflag;
@@ -1266,16 +1264,14 @@ static void enc28j60_hw_tx(struct enc28j60_net *priv)
okflag = 1;
for (k = 0; k < test_len; k++) {
if (priv->tx_skb->data[k] != test_buf[k]) {
- printk(KERN_DEBUG DRV_NAME
- ": Error, %d location differ: "
- "0x%02x-0x%02x\n", k,
- priv->tx_skb->data[k], test_buf[k]);
+ dev_printk(KERN_DEBUG, dev,
+ "Error, %d location differ: 0x%02x-0x%02x\n",
+ k, priv->tx_skb->data[k], test_buf[k]);
okflag = 0;
}
}
if (!okflag)
- printk(KERN_DEBUG DRV_NAME ": Tx write buffer, "
- "verify ERROR!\n");
+ dev_printk(KERN_DEBUG, dev, "Tx write buffer, verify ERROR!\n");
}
#endif
/* set TX request flag */
@@ -1287,14 +1283,11 @@ static netdev_tx_t enc28j60_send_packet(struct sk_buff *skb,
{
struct enc28j60_net *priv = netdev_priv(dev);
- if (netif_msg_tx_queued(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
-
/* If some error occurs while trying to transmit this
* packet, you should return '1' from this function.
* In such a case you _may not_ do anything to the
* SKB, it is still owned by the network queueing
- * layer when an error is returned. This means you
+ * layer when an error is returned. This means you
* may not modify any SKB fields, you may not free
* the SKB, etc.
*/
@@ -1337,7 +1330,7 @@ static void enc28j60_tx_timeout(struct net_device *ndev)
struct enc28j60_net *priv = netdev_priv(ndev);
if (netif_msg_timer(priv))
- dev_err(&ndev->dev, DRV_NAME " tx timeout\n");
+ netdev_err(ndev, "tx timeout\n");
ndev->stats.tx_errors++;
/* can't restart safely under softirq */
@@ -1356,13 +1349,9 @@ static int enc28j60_net_open(struct net_device *dev)
{
struct enc28j60_net *priv = netdev_priv(dev);
- if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
-
if (!is_valid_ether_addr(dev->dev_addr)) {
if (netif_msg_ifup(priv))
- dev_err(&dev->dev, "invalid MAC address %pM\n",
- dev->dev_addr);
+ netdev_err(dev, "invalid MAC address %pM\n", dev->dev_addr);
return -EADDRNOTAVAIL;
}
/* Reset the hardware here (and take it out of low power mode) */
@@ -1370,7 +1359,7 @@ static int enc28j60_net_open(struct net_device *dev)
enc28j60_hw_disable(priv);
if (!enc28j60_hw_init(priv)) {
if (netif_msg_ifup(priv))
- dev_err(&dev->dev, "hw_reset() failed\n");
+ netdev_err(dev, "hw_reset() failed\n");
return -EINVAL;
}
/* Update the MAC address (in case user has changed it) */
@@ -1392,9 +1381,6 @@ static int enc28j60_net_close(struct net_device *dev)
{
struct enc28j60_net *priv = netdev_priv(dev);
- if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
-
enc28j60_hw_disable(priv);
enc28j60_lowpower(priv, true);
netif_stop_queue(dev);
@@ -1415,16 +1401,16 @@ static void enc28j60_set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
if (netif_msg_link(priv))
- dev_info(&dev->dev, "promiscuous mode\n");
+ netdev_info(dev, "promiscuous mode\n");
priv->rxfilter = RXFILTER_PROMISC;
} else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
if (netif_msg_link(priv))
- dev_info(&dev->dev, "%smulticast mode\n",
- (dev->flags & IFF_ALLMULTI) ? "all-" : "");
+ netdev_info(dev, "%smulticast mode\n",
+ (dev->flags & IFF_ALLMULTI) ? "all-" : "");
priv->rxfilter = RXFILTER_MULTI;
} else {
if (netif_msg_link(priv))
- dev_info(&dev->dev, "normal mode\n");
+ netdev_info(dev, "normal mode\n");
priv->rxfilter = RXFILTER_NORMAL;
}
@@ -1436,20 +1422,21 @@ static void enc28j60_setrx_work_handler(struct work_struct *work)
{
struct enc28j60_net *priv =
container_of(work, struct enc28j60_net, setrx_work);
+ struct device *dev = &priv->spi->dev;
if (priv->rxfilter == RXFILTER_PROMISC) {
if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": promiscuous mode\n");
+ dev_printk(KERN_DEBUG, dev, "promiscuous mode\n");
locked_regb_write(priv, ERXFCON, 0x00);
} else if (priv->rxfilter == RXFILTER_MULTI) {
if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": multicast mode\n");
+ dev_printk(KERN_DEBUG, dev, "multicast mode\n");
locked_regb_write(priv, ERXFCON,
ERXFCON_UCEN | ERXFCON_CRCEN |
ERXFCON_BCEN | ERXFCON_MCEN);
} else {
if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": normal mode\n");
+ dev_printk(KERN_DEBUG, dev, "normal mode\n");
locked_regb_write(priv, ERXFCON,
ERXFCON_UCEN | ERXFCON_CRCEN |
ERXFCON_BCEN);
@@ -1468,7 +1455,7 @@ static void enc28j60_restart_work_handler(struct work_struct *work)
enc28j60_net_close(ndev);
ret = enc28j60_net_open(ndev);
if (unlikely(ret)) {
- dev_info(&ndev->dev, " could not restart %d\n", ret);
+ netdev_info(ndev, "could not restart %d\n", ret);
dev_close(ndev);
}
}
@@ -1552,14 +1539,13 @@ static const struct net_device_ops enc28j60_netdev_ops = {
static int enc28j60_probe(struct spi_device *spi)
{
+ unsigned char macaddr[ETH_ALEN];
struct net_device *dev;
struct enc28j60_net *priv;
- const void *macaddr;
int ret = 0;
if (netif_msg_drv(&debug))
- dev_info(&spi->dev, DRV_NAME " Ethernet driver %s loaded\n",
- DRV_VERSION);
+ dev_info(&spi->dev, "Ethernet driver %s loaded\n", DRV_VERSION);
dev = alloc_etherdev(sizeof(struct enc28j60_net));
if (!dev) {
@@ -1570,8 +1556,7 @@ static int enc28j60_probe(struct spi_device *spi)
priv->netdev = dev; /* priv to netdev reference */
priv->spi = spi; /* priv to spi reference */
- priv->msg_enable = netif_msg_init(debug.msg_enable,
- ENC28J60_MSG_DEFAULT);
+ priv->msg_enable = netif_msg_init(debug.msg_enable, ENC28J60_MSG_DEFAULT);
mutex_init(&priv->lock);
INIT_WORK(&priv->tx_work, enc28j60_tx_work_handler);
INIT_WORK(&priv->setrx_work, enc28j60_setrx_work_handler);
@@ -1582,13 +1567,12 @@ static int enc28j60_probe(struct spi_device *spi)
if (!enc28j60_chipset_init(dev)) {
if (netif_msg_probe(priv))
- dev_info(&spi->dev, DRV_NAME " chip not found\n");
+ dev_info(&spi->dev, "chip not found\n");
ret = -EIO;
goto error_irq;
}
- macaddr = of_get_mac_address(spi->dev.of_node);
- if (macaddr)
+ if (device_get_mac_address(&spi->dev, macaddr, sizeof(macaddr)))
ether_addr_copy(dev->dev_addr, macaddr);
else
eth_hw_addr_random(dev);
@@ -1600,8 +1584,8 @@ static int enc28j60_probe(struct spi_device *spi)
ret = request_irq(spi->irq, enc28j60_irq, 0, DRV_NAME, priv);
if (ret < 0) {
if (netif_msg_probe(priv))
- dev_err(&spi->dev, DRV_NAME ": request irq %d failed "
- "(ret = %d)\n", spi->irq, ret);
+ dev_err(&spi->dev, "request irq %d failed (ret = %d)\n",
+ spi->irq, ret);
goto error_irq;
}
@@ -1616,11 +1600,10 @@ static int enc28j60_probe(struct spi_device *spi)
ret = register_netdev(dev);
if (ret) {
if (netif_msg_probe(priv))
- dev_err(&spi->dev, "register netdev " DRV_NAME
- " failed (ret = %d)\n", ret);
+ dev_err(&spi->dev, "register netdev failed (ret = %d)\n",
+ ret);
goto error_register;
}
- dev_info(&dev->dev, DRV_NAME " driver registered\n");
return 0;
@@ -1636,9 +1619,6 @@ static int enc28j60_remove(struct spi_device *spi)
{
struct enc28j60_net *priv = spi_get_drvdata(spi);
- if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": remove\n");
-
unregister_netdev(priv->netdev);
free_irq(spi->irq, priv);
free_netdev(priv->netdev);
@@ -1660,22 +1640,7 @@ static struct spi_driver enc28j60_driver = {
.probe = enc28j60_probe,
.remove = enc28j60_remove,
};
-
-static int __init enc28j60_init(void)
-{
- msec20_to_jiffies = msecs_to_jiffies(20);
-
- return spi_register_driver(&enc28j60_driver);
-}
-
-module_init(enc28j60_init);
-
-static void __exit enc28j60_exit(void)
-{
- spi_unregister_driver(&enc28j60_driver);
-}
-
-module_exit(enc28j60_exit);
+module_spi_driver(enc28j60_driver);
MODULE_DESCRIPTION(DRV_NAME " ethernet driver");
MODULE_AUTHOR("Claudio Lanconelli <lanconelli.claudio@eptar.com>");
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index 549898d5d450..f0d0e09f60e2 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -19,6 +19,7 @@ config NFP
tristate "Netronome(R) NFP4000/NFP6000 NIC driver"
depends on PCI && PCI_MSI
depends on VXLAN || VXLAN=n
+ select NET_DEVLINK
---help---
This driver supports the Netronome(R) NFP4000/NFP6000 based
cards working as a advanced Ethernet NIC. It works with both
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 47c708f08ade..87bf784f8e8f 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -15,6 +15,7 @@ nfp-objs := \
nfpcore/nfp_resource.o \
nfpcore/nfp_rtsym.o \
nfpcore/nfp_target.o \
+ ccm.o \
nfp_asm.o \
nfp_app.o \
nfp_app_nic.o \
@@ -42,7 +43,8 @@ nfp-objs += \
flower/match.o \
flower/metadata.o \
flower/offload.o \
- flower/tunnel_conf.o
+ flower/tunnel_conf.o \
+ flower/qos_conf.o
endif
ifeq ($(CONFIG_BPF_SYSCALL),y)
diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
index 9584f03f3efa..69e84ff7f2e5 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
@@ -261,10 +261,15 @@ int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm)
int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed)
{
+ const u32 cmd = NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET;
struct nfp_net *nn = alink->vnic;
unsigned int i;
int err;
+ err = nfp_net_mbox_lock(nn, alink->abm->prio_map_len);
+ if (err)
+ return err;
+
/* Write data_len and wipe reserved */
nn_writeq(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATALEN,
alink->abm->prio_map_len);
@@ -273,8 +278,7 @@ int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed)
nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATA + i,
packed[i / sizeof(u32)]);
- err = nfp_net_reconfig_mbox(nn,
- NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET);
+ err = nfp_net_mbox_reconfig_and_unlock(nn, cmd);
if (err)
nfp_err(alink->abm->app->cpp,
"setting DSCP -> VQ map failed with error %d\n", err);
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
index 4d4ff5844c47..9183b3e85d21 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
@@ -53,7 +53,8 @@ nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
}
}
-static struct net_device *nfp_abm_repr_get(struct nfp_app *app, u32 port_id)
+static struct net_device *
+nfp_abm_repr_get(struct nfp_app *app, u32 port_id, bool *redir_egress)
{
enum nfp_repr_type rtype;
struct nfp_reprs *reprs;
@@ -549,5 +550,5 @@ const struct nfp_app_type app_abm = {
.eswitch_mode_get = nfp_abm_eswitch_mode_get,
.eswitch_mode_set = nfp_abm_eswitch_mode_set,
- .repr_get = nfp_abm_repr_get,
+ .dev_get = nfp_abm_repr_get,
};
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
index 9b6cfa697879..bc9850e4ec5e 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -6,48 +6,13 @@
#include <linux/bug.h>
#include <linux/jiffies.h>
#include <linux/skbuff.h>
-#include <linux/wait.h>
+#include "../ccm.h"
#include "../nfp_app.h"
#include "../nfp_net.h"
#include "fw.h"
#include "main.h"
-#define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4)
-
-static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
-{
- u16 used_tags;
-
- used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;
-
- return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
-}
-
-static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
-{
- /* All FW communication for BPF is request-reply. To make sure we
- * don't reuse the message ID too early after timeout - limit the
- * number of requests in flight.
- */
- if (nfp_bpf_all_tags_busy(bpf)) {
- cmsg_warn(bpf, "all FW request contexts busy!\n");
- return -EAGAIN;
- }
-
- WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
- return bpf->tag_alloc_next++;
-}
-
-static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
-{
- WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));
-
- while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
- bpf->tag_alloc_last != bpf->tag_alloc_next)
- bpf->tag_alloc_last++;
-}
-
static struct sk_buff *
nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
{
@@ -87,149 +52,6 @@ nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n)
return size;
}
-static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb)
-{
- struct cmsg_hdr *hdr;
-
- hdr = (struct cmsg_hdr *)skb->data;
-
- return hdr->type;
-}
-
-static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
-{
- struct cmsg_hdr *hdr;
-
- hdr = (struct cmsg_hdr *)skb->data;
-
- return be16_to_cpu(hdr->tag);
-}
-
-static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
-{
- unsigned int msg_tag;
- struct sk_buff *skb;
-
- skb_queue_walk(&bpf->cmsg_replies, skb) {
- msg_tag = nfp_bpf_cmsg_get_tag(skb);
- if (msg_tag == tag) {
- nfp_bpf_free_tag(bpf, tag);
- __skb_unlink(skb, &bpf->cmsg_replies);
- return skb;
- }
- }
-
- return NULL;
-}
-
-static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
-{
- struct sk_buff *skb;
-
- nfp_ctrl_lock(bpf->app->ctrl);
- skb = __nfp_bpf_reply(bpf, tag);
- nfp_ctrl_unlock(bpf->app->ctrl);
-
- return skb;
-}
-
-static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
-{
- struct sk_buff *skb;
-
- nfp_ctrl_lock(bpf->app->ctrl);
- skb = __nfp_bpf_reply(bpf, tag);
- if (!skb)
- nfp_bpf_free_tag(bpf, tag);
- nfp_ctrl_unlock(bpf->app->ctrl);
-
- return skb;
-}
-
-static struct sk_buff *
-nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
- int tag)
-{
- struct sk_buff *skb;
- int i, err;
-
- for (i = 0; i < 50; i++) {
- udelay(4);
- skb = nfp_bpf_reply(bpf, tag);
- if (skb)
- return skb;
- }
-
- err = wait_event_interruptible_timeout(bpf->cmsg_wq,
- skb = nfp_bpf_reply(bpf, tag),
- msecs_to_jiffies(5000));
- /* We didn't get a response - try last time and atomically drop
- * the tag even if no response is matched.
- */
- if (!skb)
- skb = nfp_bpf_reply_drop_tag(bpf, tag);
- if (err < 0) {
- cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
- err == ERESTARTSYS ? "interrupted" : "error",
- type, err);
- return ERR_PTR(err);
- }
- if (!skb) {
- cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
- type);
- return ERR_PTR(-ETIMEDOUT);
- }
-
- return skb;
-}
-
-static struct sk_buff *
-nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
- enum nfp_bpf_cmsg_type type, unsigned int reply_size)
-{
- struct cmsg_hdr *hdr;
- int tag;
-
- nfp_ctrl_lock(bpf->app->ctrl);
- tag = nfp_bpf_alloc_tag(bpf);
- if (tag < 0) {
- nfp_ctrl_unlock(bpf->app->ctrl);
- dev_kfree_skb_any(skb);
- return ERR_PTR(tag);
- }
-
- hdr = (void *)skb->data;
- hdr->ver = CMSG_MAP_ABI_VERSION;
- hdr->type = type;
- hdr->tag = cpu_to_be16(tag);
-
- __nfp_app_ctrl_tx(bpf->app, skb);
-
- nfp_ctrl_unlock(bpf->app->ctrl);
-
- skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
- if (IS_ERR(skb))
- return skb;
-
- hdr = (struct cmsg_hdr *)skb->data;
- if (hdr->type != __CMSG_REPLY(type)) {
- cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
- hdr->type, __CMSG_REPLY(type));
- goto err_free;
- }
- /* 0 reply_size means caller will do the validation */
- if (reply_size && skb->len != reply_size) {
- cmsg_warn(bpf, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
- type, skb->len, reply_size);
- goto err_free;
- }
-
- return skb;
-err_free:
- dev_kfree_skb_any(skb);
- return ERR_PTR(-EIO);
-}
-
static int
nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
struct cmsg_reply_map_simple *reply)
@@ -275,8 +97,8 @@ nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
req->map_type = cpu_to_be32(map->map_type);
req->map_flags = 0;
- skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC,
- sizeof(*reply));
+ skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_ALLOC,
+ sizeof(*reply));
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -310,8 +132,8 @@ void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
req = (void *)skb->data;
req->tid = cpu_to_be32(nfp_map->tid);
- skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE,
- sizeof(*reply));
+ skb = nfp_ccm_communicate(&bpf->ccm, skb, NFP_CCM_TYPE_BPF_MAP_FREE,
+ sizeof(*reply));
if (IS_ERR(skb)) {
cmsg_warn(bpf, "leaking map - I/O error\n");
return;
@@ -354,8 +176,7 @@ nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
}
static int
-nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
- enum nfp_bpf_cmsg_type op,
+nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_ccm_type op,
u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
{
struct nfp_bpf_map *nfp_map = offmap->dev_priv;
@@ -386,8 +207,8 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value,
map->value_size);
- skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
- nfp_bpf_cmsg_map_reply_size(bpf, 1));
+ skb = nfp_ccm_communicate(&bpf->ccm, skb, op,
+ nfp_bpf_cmsg_map_reply_size(bpf, 1));
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -415,34 +236,34 @@ err_free:
int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
void *key, void *value, u64 flags)
{
- return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE,
+ return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_UPDATE,
key, value, flags, NULL, NULL);
}
int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
{
- return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE,
+ return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_DELETE,
key, NULL, 0, NULL, NULL);
}
int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
void *key, void *value)
{
- return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP,
+ return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_LOOKUP,
key, NULL, 0, NULL, value);
}
int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
void *next_key)
{
- return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST,
+ return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETFIRST,
NULL, NULL, 0, next_key, NULL);
}
int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
void *key, void *next_key)
{
- return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT,
+ return nfp_bpf_ctrl_entry_op(offmap, NFP_CCM_TYPE_BPF_MAP_GETNEXT,
key, NULL, 0, next_key, NULL);
}
@@ -456,54 +277,35 @@ unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf)
void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_app_bpf *bpf = app->priv;
- unsigned int tag;
if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
- goto err_free;
+ dev_kfree_skb_any(skb);
+ return;
}
- if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) {
+ if (nfp_ccm_get_type(skb) == NFP_CCM_TYPE_BPF_BPF_EVENT) {
if (!nfp_bpf_event_output(bpf, skb->data, skb->len))
dev_consume_skb_any(skb);
else
dev_kfree_skb_any(skb);
- return;
}
- nfp_ctrl_lock(bpf->app->ctrl);
-
- tag = nfp_bpf_cmsg_get_tag(skb);
- if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
- cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
- tag);
- goto err_unlock;
- }
-
- __skb_queue_tail(&bpf->cmsg_replies, skb);
- wake_up_interruptible_all(&bpf->cmsg_wq);
-
- nfp_ctrl_unlock(bpf->app->ctrl);
-
- return;
-err_unlock:
- nfp_ctrl_unlock(bpf->app->ctrl);
-err_free:
- dev_kfree_skb_any(skb);
+ nfp_ccm_rx(&bpf->ccm, skb);
}
void
nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len)
{
+ const struct nfp_ccm_hdr *hdr = data;
struct nfp_app_bpf *bpf = app->priv;
- const struct cmsg_hdr *hdr = data;
if (unlikely(len < sizeof(struct cmsg_reply_map_simple))) {
cmsg_warn(bpf, "cmsg drop - too short %d!\n", len);
return;
}
- if (hdr->type == CMSG_TYPE_BPF_EVENT)
+ if (hdr->type == NFP_CCM_TYPE_BPF_BPF_EVENT)
nfp_bpf_event_output(bpf, data, len);
else
cmsg_warn(bpf, "cmsg drop - msg type %d with raw buffer!\n",
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
index 721921bcf120..06c4286bd79e 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
@@ -6,6 +6,7 @@
#include <linux/bitops.h>
#include <linux/types.h>
+#include "../ccm.h"
/* Kernel's enum bpf_reg_type is not uABI so people may change it breaking
* our FW ABI. In that case we will do translation in the driver.
@@ -52,22 +53,6 @@ struct nfp_bpf_cap_tlv_maps {
/*
* Types defined for map related control messages
*/
-#define CMSG_MAP_ABI_VERSION 1
-
-enum nfp_bpf_cmsg_type {
- CMSG_TYPE_MAP_ALLOC = 1,
- CMSG_TYPE_MAP_FREE = 2,
- CMSG_TYPE_MAP_LOOKUP = 3,
- CMSG_TYPE_MAP_UPDATE = 4,
- CMSG_TYPE_MAP_DELETE = 5,
- CMSG_TYPE_MAP_GETNEXT = 6,
- CMSG_TYPE_MAP_GETFIRST = 7,
- CMSG_TYPE_BPF_EVENT = 8,
- __CMSG_TYPE_MAP_MAX,
-};
-
-#define CMSG_TYPE_MAP_REPLY_BIT 7
-#define __CMSG_REPLY(req) (BIT(CMSG_TYPE_MAP_REPLY_BIT) | (req))
/* BPF ABIv2 fixed-length control message fields */
#define CMSG_MAP_KEY_LW 16
@@ -84,19 +69,13 @@ enum nfp_bpf_cmsg_status {
CMSG_RC_ERR_MAP_E2BIG = 7,
};
-struct cmsg_hdr {
- u8 type;
- u8 ver;
- __be16 tag;
-};
-
struct cmsg_reply_map_simple {
- struct cmsg_hdr hdr;
+ struct nfp_ccm_hdr hdr;
__be32 rc;
};
struct cmsg_req_map_alloc_tbl {
- struct cmsg_hdr hdr;
+ struct nfp_ccm_hdr hdr;
__be32 key_size; /* in bytes */
__be32 value_size; /* in bytes */
__be32 max_entries;
@@ -110,7 +89,7 @@ struct cmsg_reply_map_alloc_tbl {
};
struct cmsg_req_map_free_tbl {
- struct cmsg_hdr hdr;
+ struct nfp_ccm_hdr hdr;
__be32 tid;
};
@@ -120,7 +99,7 @@ struct cmsg_reply_map_free_tbl {
};
struct cmsg_req_map_op {
- struct cmsg_hdr hdr;
+ struct nfp_ccm_hdr hdr;
__be32 tid;
__be32 count;
__be32 flags;
@@ -135,7 +114,7 @@ struct cmsg_reply_map_op {
};
struct cmsg_bpf_event {
- struct cmsg_hdr hdr;
+ struct nfp_ccm_hdr hdr;
__be32 cpu_id;
__be64 map_ptr;
__be32 data_size;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 275de9f4c61c..9c136da25221 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -442,14 +442,16 @@ static int nfp_bpf_init(struct nfp_app *app)
bpf->app = app;
app->priv = bpf;
- skb_queue_head_init(&bpf->cmsg_replies);
- init_waitqueue_head(&bpf->cmsg_wq);
INIT_LIST_HEAD(&bpf->map_list);
- err = rhashtable_init(&bpf->maps_neutral, &nfp_bpf_maps_neutral_params);
+ err = nfp_ccm_init(&bpf->ccm, app);
if (err)
goto err_free_bpf;
+ err = rhashtable_init(&bpf->maps_neutral, &nfp_bpf_maps_neutral_params);
+ if (err)
+ goto err_clean_ccm;
+
nfp_bpf_init_capabilities(bpf);
err = nfp_bpf_parse_capabilities(app);
@@ -474,6 +476,8 @@ static int nfp_bpf_init(struct nfp_app *app)
err_free_neutral_maps:
rhashtable_destroy(&bpf->maps_neutral);
+err_clean_ccm:
+ nfp_ccm_clean(&bpf->ccm);
err_free_bpf:
kfree(bpf);
return err;
@@ -484,7 +488,7 @@ static void nfp_bpf_clean(struct nfp_app *app)
struct nfp_app_bpf *bpf = app->priv;
bpf_offload_dev_destroy(bpf->bpf_dev);
- WARN_ON(!skb_queue_empty(&bpf->cmsg_replies));
+ nfp_ccm_clean(&bpf->ccm);
WARN_ON(!list_empty(&bpf->map_list));
WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
rhashtable_free_and_destroy(&bpf->maps_neutral,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index b25a48218bcf..e54d1ac84df2 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/wait.h>
+#include "../ccm.h"
#include "../nfp_asm.h"
#include "fw.h"
@@ -84,16 +85,10 @@ enum pkt_vec {
/**
* struct nfp_app_bpf - bpf app priv structure
* @app: backpointer to the app
+ * @ccm: common control message handler data
*
* @bpf_dev: BPF offload device handle
*
- * @tag_allocator: bitmap of control message tags in use
- * @tag_alloc_next: next tag bit to allocate
- * @tag_alloc_last: next tag bit to be freed
- *
- * @cmsg_replies: received cmsg replies waiting to be consumed
- * @cmsg_wq: work queue for waiting for cmsg replies
- *
* @cmsg_key_sz: size of key in cmsg element array
* @cmsg_val_sz: size of value in cmsg element array
*
@@ -132,16 +127,10 @@ enum pkt_vec {
*/
struct nfp_app_bpf {
struct nfp_app *app;
+ struct nfp_ccm ccm;
struct bpf_offload_dev *bpf_dev;
- DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
- u16 tag_alloc_next;
- u16 tag_alloc_last;
-
- struct sk_buff_head cmsg_replies;
- struct wait_queue_head cmsg_wq;
-
unsigned int cmsg_key_sz;
unsigned int cmsg_val_sz;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 15dce97650a5..39c9fec222b4 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -22,6 +22,7 @@
#include <net/tc_act/tc_mirred.h>
#include "main.h"
+#include "../ccm.h"
#include "../nfp_app.h"
#include "../nfp_net_ctrl.h"
#include "../nfp_net.h"
@@ -452,7 +453,7 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
return -EINVAL;
- if (cbe->hdr.ver != CMSG_MAP_ABI_VERSION)
+ if (cbe->hdr.ver != NFP_CCM_ABI_VERSION)
return -EINVAL;
rcu_read_lock();
diff --git a/drivers/net/ethernet/netronome/nfp/ccm.c b/drivers/net/ethernet/netronome/nfp/ccm.c
new file mode 100644
index 000000000000..94476e41e261
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/ccm.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2016-2019 Netronome Systems, Inc. */
+
+#include <linux/bitops.h>
+
+#include "ccm.h"
+#include "nfp_app.h"
+#include "nfp_net.h"
+
+#define NFP_CCM_TYPE_REPLY_BIT 7
+#define __NFP_CCM_REPLY(req) (BIT(NFP_CCM_TYPE_REPLY_BIT) | (req))
+
+#define ccm_warn(app, msg...) nn_dp_warn(&(app)->ctrl->dp, msg)
+
+#define NFP_CCM_TAG_ALLOC_SPAN (U16_MAX / 4)
+
+static bool nfp_ccm_all_tags_busy(struct nfp_ccm *ccm)
+{
+ u16 used_tags;
+
+ used_tags = ccm->tag_alloc_next - ccm->tag_alloc_last;
+
+ return used_tags > NFP_CCM_TAG_ALLOC_SPAN;
+}
+
+static int nfp_ccm_alloc_tag(struct nfp_ccm *ccm)
+{
+ /* CCM is for FW communication which is request-reply. To make sure
+ * we don't reuse the message ID too early after timeout - limit the
+ * number of requests in flight.
+ */
+ if (unlikely(nfp_ccm_all_tags_busy(ccm))) {
+ ccm_warn(ccm->app, "all FW request contexts busy!\n");
+ return -EAGAIN;
+ }
+
+ WARN_ON(__test_and_set_bit(ccm->tag_alloc_next, ccm->tag_allocator));
+ return ccm->tag_alloc_next++;
+}
+
+static void nfp_ccm_free_tag(struct nfp_ccm *ccm, u16 tag)
+{
+ WARN_ON(!__test_and_clear_bit(tag, ccm->tag_allocator));
+
+ while (!test_bit(ccm->tag_alloc_last, ccm->tag_allocator) &&
+ ccm->tag_alloc_last != ccm->tag_alloc_next)
+ ccm->tag_alloc_last++;
+}
+
+static struct sk_buff *__nfp_ccm_reply(struct nfp_ccm *ccm, u16 tag)
+{
+ unsigned int msg_tag;
+ struct sk_buff *skb;
+
+ skb_queue_walk(&ccm->replies, skb) {
+ msg_tag = nfp_ccm_get_tag(skb);
+ if (msg_tag == tag) {
+ nfp_ccm_free_tag(ccm, tag);
+ __skb_unlink(skb, &ccm->replies);
+ return skb;
+ }
+ }
+
+ return NULL;
+}
+
+static struct sk_buff *
+nfp_ccm_reply(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag)
+{
+ struct sk_buff *skb;
+
+ nfp_ctrl_lock(app->ctrl);
+ skb = __nfp_ccm_reply(ccm, tag);
+ nfp_ctrl_unlock(app->ctrl);
+
+ return skb;
+}
+
+static struct sk_buff *
+nfp_ccm_reply_drop_tag(struct nfp_ccm *ccm, struct nfp_app *app, u16 tag)
+{
+ struct sk_buff *skb;
+
+ nfp_ctrl_lock(app->ctrl);
+ skb = __nfp_ccm_reply(ccm, tag);
+ if (!skb)
+ nfp_ccm_free_tag(ccm, tag);
+ nfp_ctrl_unlock(app->ctrl);
+
+ return skb;
+}
+
+static struct sk_buff *
+nfp_ccm_wait_reply(struct nfp_ccm *ccm, struct nfp_app *app,
+ enum nfp_ccm_type type, int tag)
+{
+ struct sk_buff *skb;
+ int i, err;
+
+ for (i = 0; i < 50; i++) {
+ udelay(4);
+ skb = nfp_ccm_reply(ccm, app, tag);
+ if (skb)
+ return skb;
+ }
+
+ err = wait_event_interruptible_timeout(ccm->wq,
+ skb = nfp_ccm_reply(ccm, app,
+ tag),
+ msecs_to_jiffies(5000));
+ /* We didn't get a response - try last time and atomically drop
+ * the tag even if no response is matched.
+ */
+ if (!skb)
+ skb = nfp_ccm_reply_drop_tag(ccm, app, tag);
+ if (err < 0) {
+ ccm_warn(app, "%s waiting for response to 0x%02x: %d\n",
+ err == ERESTARTSYS ? "interrupted" : "error",
+ type, err);
+ return ERR_PTR(err);
+ }
+ if (!skb) {
+ ccm_warn(app, "timeout waiting for response to 0x%02x\n", type);
+ return ERR_PTR(-ETIMEDOUT);
+ }
+
+ return skb;
+}
+
+struct sk_buff *
+nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
+ enum nfp_ccm_type type, unsigned int reply_size)
+{
+ struct nfp_app *app = ccm->app;
+ struct nfp_ccm_hdr *hdr;
+ int reply_type, tag;
+
+ nfp_ctrl_lock(app->ctrl);
+ tag = nfp_ccm_alloc_tag(ccm);
+ if (tag < 0) {
+ nfp_ctrl_unlock(app->ctrl);
+ dev_kfree_skb_any(skb);
+ return ERR_PTR(tag);
+ }
+
+ hdr = (void *)skb->data;
+ hdr->ver = NFP_CCM_ABI_VERSION;
+ hdr->type = type;
+ hdr->tag = cpu_to_be16(tag);
+
+ __nfp_app_ctrl_tx(app, skb);
+
+ nfp_ctrl_unlock(app->ctrl);
+
+ skb = nfp_ccm_wait_reply(ccm, app, type, tag);
+ if (IS_ERR(skb))
+ return skb;
+
+ reply_type = nfp_ccm_get_type(skb);
+ if (reply_type != __NFP_CCM_REPLY(type)) {
+ ccm_warn(app, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
+ reply_type, __NFP_CCM_REPLY(type));
+ goto err_free;
+ }
+ /* 0 reply_size means caller will do the validation */
+ if (reply_size && skb->len != reply_size) {
+ ccm_warn(app, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
+ type, skb->len, reply_size);
+ goto err_free;
+ }
+
+ return skb;
+err_free:
+ dev_kfree_skb_any(skb);
+ return ERR_PTR(-EIO);
+}
+
+void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb)
+{
+ struct nfp_app *app = ccm->app;
+ unsigned int tag;
+
+ if (unlikely(skb->len < sizeof(struct nfp_ccm_hdr))) {
+ ccm_warn(app, "cmsg drop - too short %d!\n", skb->len);
+ goto err_free;
+ }
+
+ nfp_ctrl_lock(app->ctrl);
+
+ tag = nfp_ccm_get_tag(skb);
+ if (unlikely(!test_bit(tag, ccm->tag_allocator))) {
+ ccm_warn(app, "cmsg drop - no one is waiting for tag %u!\n",
+ tag);
+ goto err_unlock;
+ }
+
+ __skb_queue_tail(&ccm->replies, skb);
+ wake_up_interruptible_all(&ccm->wq);
+
+ nfp_ctrl_unlock(app->ctrl);
+ return;
+
+err_unlock:
+ nfp_ctrl_unlock(app->ctrl);
+err_free:
+ dev_kfree_skb_any(skb);
+}
+
+int nfp_ccm_init(struct nfp_ccm *ccm, struct nfp_app *app)
+{
+ ccm->app = app;
+ skb_queue_head_init(&ccm->replies);
+ init_waitqueue_head(&ccm->wq);
+ return 0;
+}
+
+void nfp_ccm_clean(struct nfp_ccm *ccm)
+{
+ WARN_ON(!skb_queue_empty(&ccm->replies));
+}
diff --git a/drivers/net/ethernet/netronome/nfp/ccm.h b/drivers/net/ethernet/netronome/nfp/ccm.h
new file mode 100644
index 000000000000..e2fe4b867958
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/ccm.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2016-2019 Netronome Systems, Inc. */
+
+#ifndef NFP_CCM_H
+#define NFP_CCM_H 1
+
+#include <linux/bitmap.h>
+#include <linux/skbuff.h>
+#include <linux/wait.h>
+
+struct nfp_app;
+
+/* Firmware ABI */
+
+enum nfp_ccm_type {
+ NFP_CCM_TYPE_BPF_MAP_ALLOC = 1,
+ NFP_CCM_TYPE_BPF_MAP_FREE = 2,
+ NFP_CCM_TYPE_BPF_MAP_LOOKUP = 3,
+ NFP_CCM_TYPE_BPF_MAP_UPDATE = 4,
+ NFP_CCM_TYPE_BPF_MAP_DELETE = 5,
+ NFP_CCM_TYPE_BPF_MAP_GETNEXT = 6,
+ NFP_CCM_TYPE_BPF_MAP_GETFIRST = 7,
+ NFP_CCM_TYPE_BPF_BPF_EVENT = 8,
+ __NFP_CCM_TYPE_MAX,
+};
+
+#define NFP_CCM_ABI_VERSION 1
+
+struct nfp_ccm_hdr {
+ u8 type;
+ u8 ver;
+ __be16 tag;
+};
+
+static inline u8 nfp_ccm_get_type(struct sk_buff *skb)
+{
+ struct nfp_ccm_hdr *hdr;
+
+ hdr = (struct nfp_ccm_hdr *)skb->data;
+
+ return hdr->type;
+}
+
+static inline unsigned int nfp_ccm_get_tag(struct sk_buff *skb)
+{
+ struct nfp_ccm_hdr *hdr;
+
+ hdr = (struct nfp_ccm_hdr *)skb->data;
+
+ return be16_to_cpu(hdr->tag);
+}
+
+/* Implementation */
+
+/**
+ * struct nfp_ccm - common control message handling
+ * @tag_allocator: bitmap of control message tags in use
+ * @tag_alloc_next: next tag bit to allocate
+ * @tag_alloc_last: next tag bit to be freed
+ *
+ * @replies: received cmsg replies waiting to be consumed
+ * @wq: work queue for waiting for cmsg replies
+ */
+struct nfp_ccm {
+ struct nfp_app *app;
+
+ DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
+ u16 tag_alloc_next;
+ u16 tag_alloc_last;
+
+ struct sk_buff_head replies;
+ struct wait_queue_head wq;
+};
+
+int nfp_ccm_init(struct nfp_ccm *ccm, struct nfp_app *app);
+void nfp_ccm_clean(struct nfp_ccm *ccm);
+void nfp_ccm_rx(struct nfp_ccm *ccm, struct sk_buff *skb);
+struct sk_buff *
+nfp_ccm_communicate(struct nfp_ccm *ccm, struct sk_buff *skb,
+ enum nfp_ccm_type type, unsigned int reply_size);
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index e336f6ee94f5..c56e31d9f8a4 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -160,9 +160,9 @@ nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
struct nfp_flower_priv *priv = app->priv;
switch (tun->key.tp_dst) {
- case htons(NFP_FL_VXLAN_PORT):
+ case htons(IANA_VXLAN_UDP_PORT):
return NFP_FL_TUNNEL_VXLAN;
- case htons(NFP_FL_GENEVE_PORT):
+ case htons(GENEVE_UDP_PORT):
if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
return NFP_FL_TUNNEL_GENEVE;
/* FALLTHROUGH */
@@ -582,60 +582,23 @@ static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
}
}
-static int
-nfp_fl_pedit(const struct flow_action_entry *act,
- struct tc_cls_flower_offload *flow,
- char *nfp_action, int *a_len, u32 *csum_updated)
-{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+struct nfp_flower_pedit_acts {
struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl;
struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos;
struct nfp_fl_set_ip4_addrs set_ip_addr;
- enum flow_action_mangle_base htype;
struct nfp_fl_set_tport set_tport;
struct nfp_fl_set_eth set_eth;
+};
+
+static int
+nfp_fl_commit_mangle(struct tc_cls_flower_offload *flow, char *nfp_action,
+ int *a_len, struct nfp_flower_pedit_acts *set_act,
+ u32 *csum_updated)
+{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
size_t act_size = 0;
u8 ip_proto = 0;
- u32 offset;
- int err;
-
- memset(&set_ip6_tc_hl_fl, 0, sizeof(set_ip6_tc_hl_fl));
- memset(&set_ip_ttl_tos, 0, sizeof(set_ip_ttl_tos));
- memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
- memset(&set_ip6_src, 0, sizeof(set_ip6_src));
- memset(&set_ip_addr, 0, sizeof(set_ip_addr));
- memset(&set_tport, 0, sizeof(set_tport));
- memset(&set_eth, 0, sizeof(set_eth));
-
- htype = act->mangle.htype;
- offset = act->mangle.offset;
-
- switch (htype) {
- case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
- err = nfp_fl_set_eth(act, offset, &set_eth);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
- err = nfp_fl_set_ip4(act, offset, &set_ip_addr,
- &set_ip_ttl_tos);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
- err = nfp_fl_set_ip6(act, offset, &set_ip6_dst,
- &set_ip6_src, &set_ip6_tc_hl_fl);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
- err = nfp_fl_set_tport(act, offset, &set_tport,
- NFP_FL_ACTION_OPCODE_SET_TCP);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
- err = nfp_fl_set_tport(act, offset, &set_tport,
- NFP_FL_ACTION_OPCODE_SET_UDP);
- break;
- default:
- return -EOPNOTSUPP;
- }
- if (err)
- return err;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -644,77 +607,82 @@ nfp_fl_pedit(const struct flow_action_entry *act,
ip_proto = match.key->ip_proto;
}
- if (set_eth.head.len_lw) {
- act_size = sizeof(set_eth);
- memcpy(nfp_action, &set_eth, act_size);
+ if (set_act->set_eth.head.len_lw) {
+ act_size = sizeof(set_act->set_eth);
+ memcpy(nfp_action, &set_act->set_eth, act_size);
*a_len += act_size;
}
- if (set_ip_ttl_tos.head.len_lw) {
+
+ if (set_act->set_ip_ttl_tos.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_ip_ttl_tos);
- memcpy(nfp_action, &set_ip_ttl_tos, act_size);
+ act_size = sizeof(set_act->set_ip_ttl_tos);
+ memcpy(nfp_action, &set_act->set_ip_ttl_tos, act_size);
*a_len += act_size;
/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
nfp_fl_csum_l4_to_flag(ip_proto);
}
- if (set_ip_addr.head.len_lw) {
+
+ if (set_act->set_ip_addr.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_ip_addr);
- memcpy(nfp_action, &set_ip_addr, act_size);
+ act_size = sizeof(set_act->set_ip_addr);
+ memcpy(nfp_action, &set_act->set_ip_addr, act_size);
*a_len += act_size;
/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
nfp_fl_csum_l4_to_flag(ip_proto);
}
- if (set_ip6_tc_hl_fl.head.len_lw) {
+
+ if (set_act->set_ip6_tc_hl_fl.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_ip6_tc_hl_fl);
- memcpy(nfp_action, &set_ip6_tc_hl_fl, act_size);
+ act_size = sizeof(set_act->set_ip6_tc_hl_fl);
+ memcpy(nfp_action, &set_act->set_ip6_tc_hl_fl, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
}
- if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
+
+ if (set_act->set_ip6_dst.head.len_lw &&
+ set_act->set_ip6_src.head.len_lw) {
/* TC compiles set src and dst IPv6 address as a single action,
* the hardware requires this to be 2 separate actions.
*/
nfp_action += act_size;
- act_size = sizeof(set_ip6_src);
- memcpy(nfp_action, &set_ip6_src, act_size);
+ act_size = sizeof(set_act->set_ip6_src);
+ memcpy(nfp_action, &set_act->set_ip6_src, act_size);
*a_len += act_size;
- act_size = sizeof(set_ip6_dst);
- memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
- act_size);
+ act_size = sizeof(set_act->set_ip6_dst);
+ memcpy(&nfp_action[sizeof(set_act->set_ip6_src)],
+ &set_act->set_ip6_dst, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
- } else if (set_ip6_dst.head.len_lw) {
+ } else if (set_act->set_ip6_dst.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_ip6_dst);
- memcpy(nfp_action, &set_ip6_dst, act_size);
+ act_size = sizeof(set_act->set_ip6_dst);
+ memcpy(nfp_action, &set_act->set_ip6_dst, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
- } else if (set_ip6_src.head.len_lw) {
+ } else if (set_act->set_ip6_src.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_ip6_src);
- memcpy(nfp_action, &set_ip6_src, act_size);
+ act_size = sizeof(set_act->set_ip6_src);
+ memcpy(nfp_action, &set_act->set_ip6_src, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
}
- if (set_tport.head.len_lw) {
+ if (set_act->set_tport.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_tport);
- memcpy(nfp_action, &set_tport, act_size);
+ act_size = sizeof(set_act->set_tport);
+ memcpy(nfp_action, &set_act->set_tport, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
@@ -725,7 +693,40 @@ nfp_fl_pedit(const struct flow_action_entry *act,
}
static int
-nfp_flower_output_action(struct nfp_app *app, const struct flow_action_entry *act,
+nfp_fl_pedit(const struct flow_action_entry *act,
+ struct tc_cls_flower_offload *flow, char *nfp_action, int *a_len,
+ u32 *csum_updated, struct nfp_flower_pedit_acts *set_act)
+{
+ enum flow_action_mangle_base htype;
+ u32 offset;
+
+ htype = act->mangle.htype;
+ offset = act->mangle.offset;
+
+ switch (htype) {
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+ return nfp_fl_set_eth(act, offset, &set_act->set_eth);
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
+ return nfp_fl_set_ip4(act, offset, &set_act->set_ip_addr,
+ &set_act->set_ip_ttl_tos);
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+ return nfp_fl_set_ip6(act, offset, &set_act->set_ip6_dst,
+ &set_act->set_ip6_src,
+ &set_act->set_ip6_tc_hl_fl);
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
+ return nfp_fl_set_tport(act, offset, &set_act->set_tport,
+ NFP_FL_ACTION_OPCODE_SET_TCP);
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+ return nfp_fl_set_tport(act, offset, &set_act->set_tport,
+ NFP_FL_ACTION_OPCODE_SET_UDP);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+nfp_flower_output_action(struct nfp_app *app,
+ const struct flow_action_entry *act,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev, bool last,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
@@ -775,7 +776,8 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
- int *out_cnt, u32 *csum_updated)
+ int *out_cnt, u32 *csum_updated,
+ struct nfp_flower_pedit_acts *set_act)
{
struct nfp_fl_set_ipv4_udp_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
@@ -860,7 +862,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
return 0;
case FLOW_ACTION_MANGLE:
if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len],
- a_len, csum_updated))
+ a_len, csum_updated, set_act))
return -EOPNOTSUPP;
break;
case FLOW_ACTION_CSUM:
@@ -880,12 +882,49 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
return 0;
}
+static bool nfp_fl_check_mangle_start(struct flow_action *flow_act,
+ int current_act_idx)
+{
+ struct flow_action_entry current_act;
+ struct flow_action_entry prev_act;
+
+ current_act = flow_act->entries[current_act_idx];
+ if (current_act.id != FLOW_ACTION_MANGLE)
+ return false;
+
+ if (current_act_idx == 0)
+ return true;
+
+ prev_act = flow_act->entries[current_act_idx - 1];
+
+ return prev_act.id != FLOW_ACTION_MANGLE;
+}
+
+static bool nfp_fl_check_mangle_end(struct flow_action *flow_act,
+ int current_act_idx)
+{
+ struct flow_action_entry current_act;
+ struct flow_action_entry next_act;
+
+ current_act = flow_act->entries[current_act_idx];
+ if (current_act.id != FLOW_ACTION_MANGLE)
+ return false;
+
+ if (current_act_idx == flow_act->num_entries)
+ return true;
+
+ next_act = flow_act->entries[current_act_idx + 1];
+
+ return next_act.id != FLOW_ACTION_MANGLE;
+}
+
int nfp_flower_compile_action(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow)
{
int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
+ struct nfp_flower_pedit_acts set_act;
enum nfp_flower_tun_type tun_type;
struct flow_action_entry *act;
u32 csum_updated = 0;
@@ -899,12 +938,18 @@ int nfp_flower_compile_action(struct nfp_app *app,
out_cnt = 0;
flow_action_for_each(i, act, &flow->rule->action) {
+ if (nfp_fl_check_mangle_start(&flow->rule->action, i))
+ memset(&set_act, 0, sizeof(set_act));
err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
netdev, &tun_type, &tun_out_cnt,
- &out_cnt, &csum_updated);
+ &out_cnt, &csum_updated, &set_act);
if (err)
return err;
act_cnt++;
+ if (nfp_fl_check_mangle_end(&flow->rule->action, i))
+ nfp_fl_commit_mangle(flow,
+ &nfp_flow->action_data[act_len],
+ &act_len, &set_act, &csum_updated);
}
/* We optimise when the action list is small, this can unfortunately
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index cf9e1118ee8f..d5bbe3d6048b 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -159,7 +159,7 @@ nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
rtnl_lock();
rcu_read_lock();
- netdev = nfp_app_repr_get(app, be32_to_cpu(msg->portnum));
+ netdev = nfp_app_dev_get(app, be32_to_cpu(msg->portnum), NULL);
rcu_read_unlock();
if (!netdev) {
nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
@@ -192,7 +192,7 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
msg = nfp_flower_cmsg_get_data(skb);
rcu_read_lock();
- exists = !!nfp_app_repr_get(app, be32_to_cpu(msg->portnum));
+ exists = !!nfp_app_dev_get(app, be32_to_cpu(msg->portnum), NULL);
rcu_read_unlock();
if (!exists) {
nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
@@ -205,6 +205,50 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
}
static void
+nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
+{
+ unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
+ struct nfp_flower_cmsg_merge_hint *msg;
+ struct nfp_fl_payload *sub_flows[2];
+ int err, i, flow_cnt;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+ /* msg->count starts at 0 and always assumes at least 1 entry. */
+ flow_cnt = msg->count + 1;
+
+ if (msg_len < struct_size(msg, flow, flow_cnt)) {
+ nfp_flower_cmsg_warn(app, "Merge hint ctrl msg too short - %d bytes but expect %zd\n",
+ msg_len, struct_size(msg, flow, flow_cnt));
+ return;
+ }
+
+ if (flow_cnt != 2) {
+ nfp_flower_cmsg_warn(app, "Merge hint contains %d flows - two are expected\n",
+ flow_cnt);
+ return;
+ }
+
+ rtnl_lock();
+ for (i = 0; i < flow_cnt; i++) {
+ u32 ctx = be32_to_cpu(msg->flow[i].host_ctx);
+
+ sub_flows[i] = nfp_flower_get_fl_payload_from_ctx(app, ctx);
+ if (!sub_flows[i]) {
+ nfp_flower_cmsg_warn(app, "Invalid flow in merge hint\n");
+ goto err_rtnl_unlock;
+ }
+ }
+
+ err = nfp_flower_merge_offloaded_flows(app, sub_flows[0], sub_flows[1]);
+ /* Only warn on memory fail. Hint veto will not break functionality. */
+ if (err == -ENOMEM)
+ nfp_flower_cmsg_warn(app, "Flow merge memory fail.\n");
+
+err_rtnl_unlock:
+ rtnl_unlock();
+}
+
+static void
nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_flower_priv *app_priv = app->priv;
@@ -222,12 +266,21 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
nfp_flower_cmsg_portmod_rx(app, skb);
break;
+ case NFP_FLOWER_CMSG_TYPE_MERGE_HINT:
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE) {
+ nfp_flower_cmsg_merge_hint_rx(app, skb);
+ break;
+ }
+ goto err_default;
case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
nfp_tunnel_request_route(app, skb);
break;
case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
nfp_tunnel_keep_alive(app, skb);
break;
+ case NFP_FLOWER_CMSG_TYPE_QOS_STATS:
+ nfp_flower_stats_rlim_reply(app, skb);
+ break;
case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG:
if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
skb_stored = nfp_flower_lag_unprocessed_msg(app, skb);
@@ -235,6 +288,7 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
}
/* fall through */
default:
+err_default:
nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
type);
goto out;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 0ed51e79db00..537f7fc19584 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -402,11 +402,13 @@ struct nfp_flower_cmsg_hdr {
/* Types defined for port related control messages */
enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_FLOW_ADD = 0,
+ NFP_FLOWER_CMSG_TYPE_FLOW_MOD = 1,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL = 2,
NFP_FLOWER_CMSG_TYPE_LAG_CONFIG = 4,
NFP_FLOWER_CMSG_TYPE_PORT_REIFY = 6,
NFP_FLOWER_CMSG_TYPE_MAC_REPR = 7,
NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8,
+ NFP_FLOWER_CMSG_TYPE_MERGE_HINT = 9,
NFP_FLOWER_CMSG_TYPE_NO_NEIGH = 10,
NFP_FLOWER_CMSG_TYPE_TUN_MAC = 11,
NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS = 12,
@@ -414,6 +416,9 @@ enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_TUN_IPS = 14,
NFP_FLOWER_CMSG_TYPE_FLOW_STATS = 15,
NFP_FLOWER_CMSG_TYPE_PORT_ECHO = 16,
+ NFP_FLOWER_CMSG_TYPE_QOS_MOD = 18,
+ NFP_FLOWER_CMSG_TYPE_QOS_DEL = 19,
+ NFP_FLOWER_CMSG_TYPE_QOS_STATS = 20,
NFP_FLOWER_CMSG_TYPE_MAX = 32,
};
@@ -451,6 +456,16 @@ struct nfp_flower_cmsg_portreify {
#define NFP_FLOWER_CMSG_PORTREIFY_INFO_EXIST BIT(0)
+/* NFP_FLOWER_CMSG_TYPE_FLOW_MERGE_HINT */
+struct nfp_flower_cmsg_merge_hint {
+ u8 reserved[3];
+ u8 count;
+ struct {
+ __be32 host_ctx;
+ __be64 host_cookie;
+ } __packed flow[0];
+};
+
enum nfp_flower_cmsg_port_type {
NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC = 0x0,
NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT = 0x1,
@@ -473,6 +488,13 @@ enum nfp_flower_cmsg_port_vnic_type {
#define NFP_FLOWER_CMSG_PORT_PCIE_Q GENMASK(5, 0)
#define NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM GENMASK(7, 0)
+static inline u32 nfp_flower_internal_port_get_port_id(u8 internal_port)
+{
+ return FIELD_PREP(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, internal_port) |
+ FIELD_PREP(NFP_FLOWER_CMSG_PORT_TYPE,
+ NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT);
+}
+
static inline u32 nfp_flower_cmsg_phys_port(u8 phys_port)
{
return FIELD_PREP(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, phys_port) |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 408089133599..eb846133943b 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -22,6 +22,9 @@
#define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL
+#define NFP_MIN_INT_PORT_ID 1
+#define NFP_MAX_INT_PORT_ID 256
+
static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn)
{
return "FLOWER";
@@ -32,6 +35,113 @@ static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app)
return DEVLINK_ESWITCH_MODE_SWITCHDEV;
}
+static int
+nfp_flower_lookup_internal_port_id(struct nfp_flower_priv *priv,
+ struct net_device *netdev)
+{
+ struct net_device *entry;
+ int i, id = 0;
+
+ rcu_read_lock();
+ idr_for_each_entry(&priv->internal_ports.port_ids, entry, i)
+ if (entry == netdev) {
+ id = i;
+ break;
+ }
+ rcu_read_unlock();
+
+ return id;
+}
+
+static int
+nfp_flower_get_internal_port_id(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ int id;
+
+ id = nfp_flower_lookup_internal_port_id(priv, netdev);
+ if (id > 0)
+ return id;
+
+ idr_preload(GFP_ATOMIC);
+ spin_lock_bh(&priv->internal_ports.lock);
+ id = idr_alloc(&priv->internal_ports.port_ids, netdev,
+ NFP_MIN_INT_PORT_ID, NFP_MAX_INT_PORT_ID, GFP_ATOMIC);
+ spin_unlock_bh(&priv->internal_ports.lock);
+ idr_preload_end();
+
+ return id;
+}
+
+u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
+ struct net_device *netdev)
+{
+ int ext_port;
+
+ if (nfp_netdev_is_nfp_repr(netdev)) {
+ return nfp_repr_get_port_id(netdev);
+ } else if (nfp_flower_internal_port_can_offload(app, netdev)) {
+ ext_port = nfp_flower_get_internal_port_id(app, netdev);
+ if (ext_port < 0)
+ return 0;
+
+ return nfp_flower_internal_port_get_port_id(ext_port);
+ }
+
+ return 0;
+}
+
+static struct net_device *
+nfp_flower_get_netdev_from_internal_port_id(struct nfp_app *app, int port_id)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct net_device *netdev;
+
+ rcu_read_lock();
+ netdev = idr_find(&priv->internal_ports.port_ids, port_id);
+ rcu_read_unlock();
+
+ return netdev;
+}
+
+static void
+nfp_flower_free_internal_port_id(struct nfp_app *app, struct net_device *netdev)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ int id;
+
+ id = nfp_flower_lookup_internal_port_id(priv, netdev);
+ if (!id)
+ return;
+
+ spin_lock_bh(&priv->internal_ports.lock);
+ idr_remove(&priv->internal_ports.port_ids, id);
+ spin_unlock_bh(&priv->internal_ports.lock);
+}
+
+static int
+nfp_flower_internal_port_event_handler(struct nfp_app *app,
+ struct net_device *netdev,
+ unsigned long event)
+{
+ if (event == NETDEV_UNREGISTER &&
+ nfp_flower_internal_port_can_offload(app, netdev))
+ nfp_flower_free_internal_port_id(app, netdev);
+
+ return NOTIFY_OK;
+}
+
+static void nfp_flower_internal_port_init(struct nfp_flower_priv *priv)
+{
+ spin_lock_init(&priv->internal_ports.lock);
+ idr_init(&priv->internal_ports.port_ids);
+}
+
+static void nfp_flower_internal_port_cleanup(struct nfp_flower_priv *priv)
+{
+ idr_destroy(&priv->internal_ports.port_ids);
+}
+
static struct nfp_flower_non_repr_priv *
nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev)
{
@@ -119,12 +229,21 @@ nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
}
static struct net_device *
-nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
+nfp_flower_dev_get(struct nfp_app *app, u32 port_id, bool *redir_egress)
{
enum nfp_repr_type repr_type;
struct nfp_reprs *reprs;
u8 port = 0;
+ /* Check if the port is internal. */
+ if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id) ==
+ NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT) {
+ if (redir_egress)
+ *redir_egress = true;
+ port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, port_id);
+ return nfp_flower_get_netdev_from_internal_port_id(app, port);
+ }
+
repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
if (repr_type > NFP_REPR_TYPE_MAX)
return NULL;
@@ -641,11 +760,33 @@ static int nfp_flower_init(struct nfp_app *app)
goto err_cleanup_metadata;
}
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MOD) {
+ /* Tell the firmware that the driver supports flow merging. */
+ err = nfp_rtsym_write_le(app->pf->rtbl,
+ "_abi_flower_merge_hint_enable", 1);
+ if (!err) {
+ app_priv->flower_ext_feats |= NFP_FL_FEATS_FLOW_MERGE;
+ nfp_flower_internal_port_init(app_priv);
+ } else if (err == -ENOENT) {
+ nfp_warn(app->cpp, "Flow merge not supported by FW.\n");
+ } else {
+ goto err_lag_clean;
+ }
+ } else {
+ nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n");
+ }
+
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
+ nfp_flower_qos_init(app);
+
INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
INIT_LIST_HEAD(&app_priv->non_repr_priv);
return 0;
+err_lag_clean:
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
+ nfp_flower_lag_cleanup(&app_priv->nfp_lag);
err_cleanup_metadata:
nfp_flower_metadata_cleanup(app);
err_free_app_priv:
@@ -661,9 +802,15 @@ static void nfp_flower_clean(struct nfp_app *app)
skb_queue_purge(&app_priv->cmsg_skbs_low);
flush_work(&app_priv->cmsg_work);
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
+ nfp_flower_qos_cleanup(app);
+
if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
nfp_flower_lag_cleanup(&app_priv->nfp_lag);
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE)
+ nfp_flower_internal_port_cleanup(app_priv);
+
nfp_flower_metadata_cleanup(app);
vfree(app->priv);
app->priv = NULL;
@@ -762,6 +909,10 @@ nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
if (ret & NOTIFY_STOP_MASK)
return ret;
+ ret = nfp_flower_internal_port_event_handler(app, netdev, event);
+ if (ret & NOTIFY_STOP_MASK)
+ return ret;
+
return nfp_tunnel_mac_event_handler(app, netdev, event, ptr);
}
@@ -800,7 +951,7 @@ const struct nfp_app_type app_flower = {
.sriov_disable = nfp_flower_sriov_disable,
.eswitch_mode_get = eswitch_mode_get,
- .repr_get = nfp_flower_repr_get,
+ .dev_get = nfp_flower_dev_get,
.setup_tc = nfp_flower_setup_tc,
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index c0945a5fd1a4..40957a8dbfe6 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -5,6 +5,7 @@
#define __NFP_FLOWER_H__ 1
#include "cmsg.h"
+#include "../nfp_net.h"
#include <linux/circ_buf.h>
#include <linux/hashtable.h>
@@ -34,14 +35,14 @@ struct nfp_app;
#define NFP_FL_MASK_REUSE_TIME_NS 40000
#define NFP_FL_MASK_ID_LOCATION 1
-#define NFP_FL_VXLAN_PORT 4789
-#define NFP_FL_GENEVE_PORT 6081
-
/* Extra features bitmap. */
#define NFP_FL_FEATS_GENEVE BIT(0)
#define NFP_FL_NBI_MTU_SETTING BIT(1)
#define NFP_FL_FEATS_GENEVE_OPT BIT(2)
#define NFP_FL_FEATS_VLAN_PCP BIT(3)
+#define NFP_FL_FEATS_VF_RLIM BIT(4)
+#define NFP_FL_FEATS_FLOW_MOD BIT(5)
+#define NFP_FL_FEATS_FLOW_MERGE BIT(30)
#define NFP_FL_FEATS_LAG BIT(31)
struct nfp_fl_mask_id {
@@ -118,6 +119,16 @@ struct nfp_fl_lag {
};
/**
+ * struct nfp_fl_internal_ports - Flower APP priv data for additional ports
+ * @port_ids: Assignment of ids to any additional ports
+ * @lock: Lock for extra ports list
+ */
+struct nfp_fl_internal_ports {
+ struct idr port_ids;
+ spinlock_t lock;
+};
+
+/**
* struct nfp_flower_priv - Flower APP per-vNIC priv data
* @app: Back pointer to app
* @nn: Pointer to vNIC
@@ -131,6 +142,7 @@ struct nfp_fl_lag {
* @flow_table: Hash table used to store flower rules
* @stats: Stored stats updates for flower rules
* @stats_lock: Lock for flower rule stats updates
+ * @stats_ctx_table: Hash table to map stats contexts to its flow rule
* @cmsg_work: Workqueue for control messages processing
* @cmsg_skbs_high: List of higher priority skbs for control message
* processing
@@ -146,6 +158,10 @@ struct nfp_fl_lag {
* @non_repr_priv: List of offloaded non-repr ports and their priv data
* @active_mem_unit: Current active memory unit for flower rules
* @total_mem_units: Total number of available memory units for flower rules
+ * @internal_ports: Internal port ids used in offloaded rules
+ * @qos_stats_work: Workqueue for qos stats processing
+ * @qos_rate_limiters: Current active qos rate limiters
+ * @qos_stats_lock: Lock on qos stats updates
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -160,6 +176,7 @@ struct nfp_flower_priv {
struct rhashtable flow_table;
struct nfp_fl_stats *stats;
spinlock_t stats_lock; /* lock stats */
+ struct rhashtable stats_ctx_table;
struct work_struct cmsg_work;
struct sk_buff_head cmsg_skbs_high;
struct sk_buff_head cmsg_skbs_low;
@@ -172,6 +189,24 @@ struct nfp_flower_priv {
struct list_head non_repr_priv;
unsigned int active_mem_unit;
unsigned int total_mem_units;
+ struct nfp_fl_internal_ports internal_ports;
+ struct delayed_work qos_stats_work;
+ unsigned int qos_rate_limiters;
+ spinlock_t qos_stats_lock; /* Protect the qos stats */
+};
+
+/**
+ * struct nfp_fl_qos - Flower APP priv data for quality of service
+ * @netdev_port_id: NFP port number of repr with qos info
+ * @curr_stats: Currently stored stats updates for qos info
+ * @prev_stats: Previously stored updates for qos info
+ * @last_update: Stored time when last stats were updated
+ */
+struct nfp_fl_qos {
+ u32 netdev_port_id;
+ struct nfp_stat_pair curr_stats;
+ struct nfp_stat_pair prev_stats;
+ u64 last_update;
};
/**
@@ -180,14 +215,18 @@ struct nfp_flower_priv {
* @lag_port_flags: Extended port flags to record lag state of repr
* @mac_offloaded: Flag indicating a MAC address is offloaded for repr
* @offloaded_mac_addr: MAC address that has been offloaded for repr
+ * @block_shared: Flag indicating if offload applies to shared blocks
* @mac_list: List entry of reprs that share the same offloaded MAC
+ * @qos_table: Stored info on filters implementing qos
*/
struct nfp_flower_repr_priv {
struct nfp_repr *nfp_repr;
unsigned long lag_port_flags;
bool mac_offloaded;
u8 offloaded_mac_addr[ETH_ALEN];
+ bool block_shared;
struct list_head mac_list;
+ struct nfp_fl_qos qos_table;
};
/**
@@ -239,6 +278,25 @@ struct nfp_fl_payload {
char *unmasked_data;
char *mask_data;
char *action_data;
+ struct list_head linked_flows;
+ bool in_hw;
+};
+
+struct nfp_fl_payload_link {
+ /* A link contains a pointer to a merge flow and an associated sub_flow.
+ * Each merge flow will feature in 2 links to its underlying sub_flows.
+ * A sub_flow will have at least 1 link to a merge flow or more if it
+ * has been used to create multiple merge flows.
+ *
+ * For a merge flow, 'linked_flows' in its nfp_fl_payload struct lists
+ * all links to sub_flows (sub_flow.flow) via merge.list.
+ * For a sub_flow, 'linked_flows' gives all links to merge flows it has
+ * formed (merge_flow.flow) via sub_flow.list.
+ */
+ struct {
+ struct list_head list;
+ struct nfp_fl_payload *flow;
+ } merge_flow, sub_flow;
};
extern const struct rhashtable_params nfp_flower_table_params;
@@ -250,12 +308,40 @@ struct nfp_fl_stats_frame {
__be64 stats_cookie;
};
+static inline bool
+nfp_flower_internal_port_can_offload(struct nfp_app *app,
+ struct net_device *netdev)
+{
+ struct nfp_flower_priv *app_priv = app->priv;
+
+ if (!(app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE))
+ return false;
+ if (!netdev->rtnl_link_ops)
+ return false;
+ if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
+ return true;
+
+ return false;
+}
+
+/* The address of the merged flow acts as its cookie.
+ * Cookies supplied to us by TC flower are also addresses to allocated
+ * memory and thus this scheme should not generate any collisions.
+ */
+static inline bool nfp_flower_is_merge_flow(struct nfp_fl_payload *flow_pay)
+{
+ return flow_pay->tc_flower_cookie == (unsigned long)flow_pay;
+}
+
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
unsigned int host_ctx_split);
void nfp_flower_metadata_cleanup(struct nfp_app *app);
int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data);
+int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
+ struct nfp_fl_payload *sub_flow1,
+ struct nfp_fl_payload *sub_flow2);
int nfp_flower_compile_flow_match(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
@@ -270,6 +356,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
struct nfp_fl_payload *nfp_flow,
struct net_device *netdev);
+void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
+ struct nfp_fl_payload *nfp_flow);
int nfp_modify_flow_metadata(struct nfp_app *app,
struct nfp_fl_payload *nfp_flow);
@@ -277,6 +365,8 @@ struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
struct net_device *netdev);
struct nfp_fl_payload *
+nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id);
+struct nfp_fl_payload *
nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb);
@@ -302,6 +392,11 @@ int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
struct nfp_fl_pre_lag *pre_act);
int nfp_flower_lag_get_output_id(struct nfp_app *app,
struct net_device *master);
+void nfp_flower_qos_init(struct nfp_app *app);
+void nfp_flower_qos_cleanup(struct nfp_app *app);
+int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_matchall_offload *flow);
+void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb);
int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
struct net_device *netdev,
unsigned long event);
@@ -314,4 +409,6 @@ void
__nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv);
void
nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev);
+u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
+ struct net_device *netdev);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 9b8b843d0340..bfa4bf34911d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -326,13 +326,12 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
struct nfp_fl_payload *nfp_flow,
enum nfp_flower_tun_type tun_type)
{
- u32 cmsg_port = 0;
+ u32 port_id;
int err;
u8 *ext;
u8 *msk;
- if (nfp_netdev_is_nfp_repr(netdev))
- cmsg_port = nfp_repr_get_port_id(netdev);
+ port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
memset(nfp_flow->mask_data, 0, key_ls->key_size);
@@ -358,13 +357,13 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
/* Populate Exact Port data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
- cmsg_port, false, tun_type);
+ port_id, false, tun_type);
if (err)
return err;
/* Populate Mask Port Data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
- cmsg_port, true, tun_type);
+ port_id, true, tun_type);
if (err)
return err;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 492837b852b6..3d326efdc814 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -24,6 +24,18 @@ struct nfp_fl_flow_table_cmp_arg {
unsigned long cookie;
};
+struct nfp_fl_stats_ctx_to_flow {
+ struct rhash_head ht_node;
+ u32 stats_cxt;
+ struct nfp_fl_payload *flow;
+};
+
+static const struct rhashtable_params stats_ctx_table_params = {
+ .key_offset = offsetof(struct nfp_fl_stats_ctx_to_flow, stats_cxt),
+ .head_offset = offsetof(struct nfp_fl_stats_ctx_to_flow, ht_node),
+ .key_len = sizeof(u32),
+};
+
static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
{
struct nfp_flower_priv *priv = app->priv;
@@ -264,9 +276,6 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len,
if (!mask_entry)
return false;
- if (meta_flags)
- *meta_flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
-
*mask_id = mask_entry->mask_id;
mask_entry->ref_cnt--;
if (!mask_entry->ref_cnt) {
@@ -285,25 +294,42 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
struct nfp_fl_payload *nfp_flow,
struct net_device *netdev)
{
+ struct nfp_fl_stats_ctx_to_flow *ctx_entry;
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *check_entry;
u8 new_mask_id;
u32 stats_cxt;
+ int err;
- if (nfp_get_stats_entry(app, &stats_cxt))
- return -ENOENT;
+ err = nfp_get_stats_entry(app, &stats_cxt);
+ if (err)
+ return err;
nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
nfp_flow->ingress_dev = netdev;
+ ctx_entry = kzalloc(sizeof(*ctx_entry), GFP_KERNEL);
+ if (!ctx_entry) {
+ err = -ENOMEM;
+ goto err_release_stats;
+ }
+
+ ctx_entry->stats_cxt = stats_cxt;
+ ctx_entry->flow = nfp_flow;
+
+ if (rhashtable_insert_fast(&priv->stats_ctx_table, &ctx_entry->ht_node,
+ stats_ctx_table_params)) {
+ err = -ENOMEM;
+ goto err_free_ctx_entry;
+ }
+
new_mask_id = 0;
if (!nfp_check_mask_add(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len,
&nfp_flow->meta.flags, &new_mask_id)) {
- if (nfp_release_stats_entry(app, stats_cxt))
- return -EINVAL;
- return -ENOENT;
+ err = -ENOENT;
+ goto err_remove_rhash;
}
nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
@@ -317,43 +343,82 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (check_entry) {
- if (nfp_release_stats_entry(app, stats_cxt))
- return -EINVAL;
-
- if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
- nfp_flow->meta.mask_len,
- NULL, &new_mask_id))
- return -EINVAL;
-
- return -EEXIST;
+ err = -EEXIST;
+ goto err_remove_mask;
}
return 0;
+
+err_remove_mask:
+ nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len,
+ NULL, &new_mask_id);
+err_remove_rhash:
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
+ &ctx_entry->ht_node,
+ stats_ctx_table_params));
+err_free_ctx_entry:
+ kfree(ctx_entry);
+err_release_stats:
+ nfp_release_stats_entry(app, stats_cxt);
+
+ return err;
+}
+
+void __nfp_modify_flow_metadata(struct nfp_flower_priv *priv,
+ struct nfp_fl_payload *nfp_flow)
+{
+ nfp_flow->meta.flags &= ~NFP_FL_META_FLAG_MANAGE_MASK;
+ nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
+ priv->flower_version++;
}
int nfp_modify_flow_metadata(struct nfp_app *app,
struct nfp_fl_payload *nfp_flow)
{
+ struct nfp_fl_stats_ctx_to_flow *ctx_entry;
struct nfp_flower_priv *priv = app->priv;
u8 new_mask_id = 0;
u32 temp_ctx_id;
+ __nfp_modify_flow_metadata(priv, nfp_flow);
+
nfp_check_mask_remove(app, nfp_flow->mask_data,
nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
&new_mask_id);
- nfp_flow->meta.flow_version = cpu_to_be64(priv->flower_version);
- priv->flower_version++;
-
/* Update flow payload with mask ids. */
nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
- /* Release the stats ctx id. */
+ /* Release the stats ctx id and ctx to flow table entry. */
temp_ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
+ ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &temp_ctx_id,
+ stats_ctx_table_params);
+ if (!ctx_entry)
+ return -ENOENT;
+
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
+ &ctx_entry->ht_node,
+ stats_ctx_table_params));
+ kfree(ctx_entry);
+
return nfp_release_stats_entry(app, temp_ctx_id);
}
+struct nfp_fl_payload *
+nfp_flower_get_fl_payload_from_ctx(struct nfp_app *app, u32 ctx_id)
+{
+ struct nfp_fl_stats_ctx_to_flow *ctx_entry;
+ struct nfp_flower_priv *priv = app->priv;
+
+ ctx_entry = rhashtable_lookup_fast(&priv->stats_ctx_table, &ctx_id,
+ stats_ctx_table_params);
+ if (!ctx_entry)
+ return NULL;
+
+ return ctx_entry->flow;
+}
+
static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
const void *obj)
{
@@ -403,6 +468,10 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
if (err)
return err;
+ err = rhashtable_init(&priv->stats_ctx_table, &stats_ctx_table_params);
+ if (err)
+ goto err_free_flow_table;
+
get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
/* Init ring buffer and unallocated mask_ids. */
@@ -410,7 +479,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
if (!priv->mask_ids.mask_id_free_list.buf)
- goto err_free_flow_table;
+ goto err_free_stats_ctx_table;
priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
@@ -447,6 +516,8 @@ err_free_last_used:
kfree(priv->mask_ids.last_used);
err_free_mask_id:
kfree(priv->mask_ids.mask_id_free_list.buf);
+err_free_stats_ctx_table:
+ rhashtable_destroy(&priv->stats_ctx_table);
err_free_flow_table:
rhashtable_destroy(&priv->flow_table);
return -ENOMEM;
@@ -461,6 +532,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
rhashtable_free_and_destroy(&priv->flow_table,
nfp_check_rhashtable_empty, NULL);
+ rhashtable_free_and_destroy(&priv->stats_ctx_table,
+ nfp_check_rhashtable_empty, NULL);
kvfree(priv->stats);
kfree(priv->mask_ids.mask_id_free_list.buf);
kfree(priv->mask_ids.last_used);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 450d7296fd57..1fbfeb43c538 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -55,6 +55,28 @@
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
+#define NFP_FLOWER_MERGE_FIELDS \
+ (NFP_FLOWER_LAYER_PORT | \
+ NFP_FLOWER_LAYER_MAC | \
+ NFP_FLOWER_LAYER_TP | \
+ NFP_FLOWER_LAYER_IPV4 | \
+ NFP_FLOWER_LAYER_IPV6)
+
+struct nfp_flower_merge_check {
+ union {
+ struct {
+ __be16 tci;
+ struct nfp_flower_mac_mpls l2;
+ struct nfp_flower_tp_ports l4;
+ union {
+ struct nfp_flower_ipv4 ipv4;
+ struct nfp_flower_ipv6 ipv6;
+ };
+ };
+ unsigned long vals[8];
+ };
+};
+
static int
nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
u8 mtype)
@@ -195,7 +217,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
flow_rule_match_enc_opts(rule, &enc_op);
switch (enc_ports.key->dst) {
- case htons(NFP_FL_VXLAN_PORT):
+ case htons(IANA_VXLAN_UDP_PORT):
*tun_type = NFP_FL_TUNNEL_VXLAN;
key_layer |= NFP_FLOWER_LAYER_VXLAN;
key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
@@ -203,7 +225,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
if (enc_op.key)
return -EOPNOTSUPP;
break;
- case htons(NFP_FL_GENEVE_PORT):
+ case htons(GENEVE_UDP_PORT):
if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
return -EOPNOTSUPP;
*tun_type = NFP_FL_TUNNEL_GENEVE;
@@ -326,7 +348,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
break;
case cpu_to_be16(ETH_P_IPV6):
- key_layer |= NFP_FLOWER_LAYER_IPV6;
+ key_layer |= NFP_FLOWER_LAYER_IPV6;
key_size += sizeof(struct nfp_flower_ipv6);
break;
@@ -376,6 +398,8 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
flow_pay->nfp_tun_ipv4_addr = 0;
flow_pay->meta.flags = 0;
+ INIT_LIST_HEAD(&flow_pay->linked_flows);
+ flow_pay->in_hw = false;
return flow_pay;
@@ -388,6 +412,447 @@ err_free_flow:
return NULL;
}
+static int
+nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
+ struct nfp_flower_merge_check *merge,
+ u8 *last_act_id, int *act_out)
+{
+ struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl;
+ struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos;
+ struct nfp_fl_set_ip4_addrs *ipv4_add;
+ struct nfp_fl_set_ipv6_addr *ipv6_add;
+ struct nfp_fl_push_vlan *push_vlan;
+ struct nfp_fl_set_tport *tport;
+ struct nfp_fl_set_eth *eth;
+ struct nfp_fl_act_head *a;
+ unsigned int act_off = 0;
+ u8 act_id = 0;
+ u8 *ports;
+ int i;
+
+ while (act_off < flow->meta.act_len) {
+ a = (struct nfp_fl_act_head *)&flow->action_data[act_off];
+ act_id = a->jump_id;
+
+ switch (act_id) {
+ case NFP_FL_ACTION_OPCODE_OUTPUT:
+ if (act_out)
+ (*act_out)++;
+ break;
+ case NFP_FL_ACTION_OPCODE_PUSH_VLAN:
+ push_vlan = (struct nfp_fl_push_vlan *)a;
+ if (push_vlan->vlan_tci)
+ merge->tci = cpu_to_be16(0xffff);
+ break;
+ case NFP_FL_ACTION_OPCODE_POP_VLAN:
+ merge->tci = cpu_to_be16(0);
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL:
+ /* New tunnel header means l2 to l4 can be matched. */
+ eth_broadcast_addr(&merge->l2.mac_dst[0]);
+ eth_broadcast_addr(&merge->l2.mac_src[0]);
+ memset(&merge->l4, 0xff,
+ sizeof(struct nfp_flower_tp_ports));
+ memset(&merge->ipv4, 0xff,
+ sizeof(struct nfp_flower_ipv4));
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
+ eth = (struct nfp_fl_set_eth *)a;
+ for (i = 0; i < ETH_ALEN; i++)
+ merge->l2.mac_dst[i] |= eth->eth_addr_mask[i];
+ for (i = 0; i < ETH_ALEN; i++)
+ merge->l2.mac_src[i] |=
+ eth->eth_addr_mask[ETH_ALEN + i];
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS:
+ ipv4_add = (struct nfp_fl_set_ip4_addrs *)a;
+ merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask;
+ merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask;
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS:
+ ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a;
+ merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask;
+ merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask;
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC:
+ ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
+ for (i = 0; i < 4; i++)
+ merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |=
+ ipv6_add->ipv6[i].mask;
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_IPV6_DST:
+ ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
+ for (i = 0; i < 4; i++)
+ merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |=
+ ipv6_add->ipv6[i].mask;
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL:
+ ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a;
+ merge->ipv6.ip_ext.ttl |=
+ ipv6_tc_hl_fl->ipv6_hop_limit_mask;
+ merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask;
+ merge->ipv6.ipv6_flow_label_exthdr |=
+ ipv6_tc_hl_fl->ipv6_label_mask;
+ break;
+ case NFP_FL_ACTION_OPCODE_SET_UDP:
+ case NFP_FL_ACTION_OPCODE_SET_TCP:
+ tport = (struct nfp_fl_set_tport *)a;
+ ports = (u8 *)&merge->l4.port_src;
+ for (i = 0; i < 4; i++)
+ ports[i] |= tport->tp_port_mask[i];
+ break;
+ case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
+ case NFP_FL_ACTION_OPCODE_PRE_LAG:
+ case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ act_off += a->len_lw << NFP_FL_LW_SIZ;
+ }
+
+ if (last_act_id)
+ *last_act_id = act_id;
+
+ return 0;
+}
+
+static int
+nfp_flower_populate_merge_match(struct nfp_fl_payload *flow,
+ struct nfp_flower_merge_check *merge,
+ bool extra_fields)
+{
+ struct nfp_flower_meta_tci *meta_tci;
+ u8 *mask = flow->mask_data;
+ u8 key_layer, match_size;
+
+ memset(merge, 0, sizeof(struct nfp_flower_merge_check));
+
+ meta_tci = (struct nfp_flower_meta_tci *)mask;
+ key_layer = meta_tci->nfp_flow_key_layer;
+
+ if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields)
+ return -EOPNOTSUPP;
+
+ merge->tci = meta_tci->tci;
+ mask += sizeof(struct nfp_flower_meta_tci);
+
+ if (key_layer & NFP_FLOWER_LAYER_EXT_META)
+ mask += sizeof(struct nfp_flower_ext_meta);
+
+ mask += sizeof(struct nfp_flower_in_port);
+
+ if (key_layer & NFP_FLOWER_LAYER_MAC) {
+ match_size = sizeof(struct nfp_flower_mac_mpls);
+ memcpy(&merge->l2, mask, match_size);
+ mask += match_size;
+ }
+
+ if (key_layer & NFP_FLOWER_LAYER_TP) {
+ match_size = sizeof(struct nfp_flower_tp_ports);
+ memcpy(&merge->l4, mask, match_size);
+ mask += match_size;
+ }
+
+ if (key_layer & NFP_FLOWER_LAYER_IPV4) {
+ match_size = sizeof(struct nfp_flower_ipv4);
+ memcpy(&merge->ipv4, mask, match_size);
+ }
+
+ if (key_layer & NFP_FLOWER_LAYER_IPV6) {
+ match_size = sizeof(struct nfp_flower_ipv6);
+ memcpy(&merge->ipv6, mask, match_size);
+ }
+
+ return 0;
+}
+
+static int
+nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1,
+ struct nfp_fl_payload *sub_flow2)
+{
+ /* Two flows can be merged if sub_flow2 only matches on bits that are
+ * either matched by sub_flow1 or set by a sub_flow1 action. This
+ * ensures that every packet that hits sub_flow1 and recirculates is
+ * guaranteed to hit sub_flow2.
+ */
+ struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge;
+ int err, act_out = 0;
+ u8 last_act_id = 0;
+
+ err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge,
+ true);
+ if (err)
+ return err;
+
+ err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge,
+ false);
+ if (err)
+ return err;
+
+ err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge,
+ &last_act_id, &act_out);
+ if (err)
+ return err;
+
+ /* Must only be 1 output action and it must be the last in sequence. */
+ if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT)
+ return -EOPNOTSUPP;
+
+ /* Reject merge if sub_flow2 matches on something that is not matched
+ * on or set in an action by sub_flow1.
+ */
+ err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals,
+ sub_flow1_merge.vals,
+ sizeof(struct nfp_flower_merge_check) * 8);
+ if (err)
+ return -EINVAL;
+
+ return 0;
+}
+
+static unsigned int
+nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
+ bool *tunnel_act)
+{
+ unsigned int act_off = 0, act_len;
+ struct nfp_fl_act_head *a;
+ u8 act_id = 0;
+
+ while (act_off < len) {
+ a = (struct nfp_fl_act_head *)&act_src[act_off];
+ act_len = a->len_lw << NFP_FL_LW_SIZ;
+ act_id = a->jump_id;
+
+ switch (act_id) {
+ case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
+ if (tunnel_act)
+ *tunnel_act = true;
+ /* fall through */
+ case NFP_FL_ACTION_OPCODE_PRE_LAG:
+ memcpy(act_dst + act_off, act_src + act_off, act_len);
+ break;
+ default:
+ return act_off;
+ }
+
+ act_off += act_len;
+ }
+
+ return act_off;
+}
+
+static int nfp_fl_verify_post_tun_acts(char *acts, int len)
+{
+ struct nfp_fl_act_head *a;
+ unsigned int act_off = 0;
+
+ while (act_off < len) {
+ a = (struct nfp_fl_act_head *)&acts[act_off];
+ if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
+ return -EOPNOTSUPP;
+
+ act_off += a->len_lw << NFP_FL_LW_SIZ;
+ }
+
+ return 0;
+}
+
+static int
+nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
+ struct nfp_fl_payload *sub_flow2,
+ struct nfp_fl_payload *merge_flow)
+{
+ unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
+ bool tunnel_act = false;
+ char *merge_act;
+ int err;
+
+ /* The last action of sub_flow1 must be output - do not merge this. */
+ sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output);
+ sub2_act_len = sub_flow2->meta.act_len;
+
+ if (!sub2_act_len)
+ return -EINVAL;
+
+ if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ)
+ return -EINVAL;
+
+ /* A shortcut can only be applied if there is a single action. */
+ if (sub1_act_len)
+ merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
+ else
+ merge_flow->meta.shortcut = sub_flow2->meta.shortcut;
+
+ merge_flow->meta.act_len = sub1_act_len + sub2_act_len;
+ merge_act = merge_flow->action_data;
+
+ /* Copy any pre-actions to the start of merge flow action list. */
+ pre_off1 = nfp_flower_copy_pre_actions(merge_act,
+ sub_flow1->action_data,
+ sub1_act_len, &tunnel_act);
+ merge_act += pre_off1;
+ sub1_act_len -= pre_off1;
+ pre_off2 = nfp_flower_copy_pre_actions(merge_act,
+ sub_flow2->action_data,
+ sub2_act_len, NULL);
+ merge_act += pre_off2;
+ sub2_act_len -= pre_off2;
+
+ /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
+ * a tunnel, sub_flow 2 can only have output actions for a valid merge.
+ */
+ if (tunnel_act) {
+ char *post_tun_acts = &sub_flow2->action_data[pre_off2];
+
+ err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len);
+ if (err)
+ return err;
+ }
+
+ /* Copy remaining actions from sub_flows 1 and 2. */
+ memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
+ merge_act += sub1_act_len;
+ memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
+
+ return 0;
+}
+
+/* Flow link code should only be accessed under RTNL. */
+static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link)
+{
+ list_del(&link->merge_flow.list);
+ list_del(&link->sub_flow.list);
+ kfree(link);
+}
+
+static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow,
+ struct nfp_fl_payload *sub_flow)
+{
+ struct nfp_fl_payload_link *link;
+
+ list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list)
+ if (link->sub_flow.flow == sub_flow) {
+ nfp_flower_unlink_flow(link);
+ return;
+ }
+}
+
+static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow,
+ struct nfp_fl_payload *sub_flow)
+{
+ struct nfp_fl_payload_link *link;
+
+ link = kmalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+
+ link->merge_flow.flow = merge_flow;
+ list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows);
+ link->sub_flow.flow = sub_flow;
+ list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows);
+
+ return 0;
+}
+
+/**
+ * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
+ * @app: Pointer to the APP handle
+ * @sub_flow1: Initial flow matched to produce merge hint
+ * @sub_flow2: Post recirculation flow matched in merge hint
+ *
+ * Combines 2 flows (if valid) to a single flow, removing the initial from hw
+ * and offloading the new, merged flow.
+ *
+ * Return: negative value on error, 0 in success.
+ */
+int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
+ struct nfp_fl_payload *sub_flow1,
+ struct nfp_fl_payload *sub_flow2)
+{
+ struct tc_cls_flower_offload merge_tc_off;
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_payload *merge_flow;
+ struct nfp_fl_key_ls merge_key_ls;
+ int err;
+
+ ASSERT_RTNL();
+
+ if (sub_flow1 == sub_flow2 ||
+ nfp_flower_is_merge_flow(sub_flow1) ||
+ nfp_flower_is_merge_flow(sub_flow2))
+ return -EINVAL;
+
+ err = nfp_flower_can_merge(sub_flow1, sub_flow2);
+ if (err)
+ return err;
+
+ merge_key_ls.key_size = sub_flow1->meta.key_len;
+
+ merge_flow = nfp_flower_allocate_new(&merge_key_ls);
+ if (!merge_flow)
+ return -ENOMEM;
+
+ merge_flow->tc_flower_cookie = (unsigned long)merge_flow;
+ merge_flow->ingress_dev = sub_flow1->ingress_dev;
+
+ memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data,
+ sub_flow1->meta.key_len);
+ memcpy(merge_flow->mask_data, sub_flow1->mask_data,
+ sub_flow1->meta.mask_len);
+
+ err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow);
+ if (err)
+ goto err_destroy_merge_flow;
+
+ err = nfp_flower_link_flows(merge_flow, sub_flow1);
+ if (err)
+ goto err_destroy_merge_flow;
+
+ err = nfp_flower_link_flows(merge_flow, sub_flow2);
+ if (err)
+ goto err_unlink_sub_flow1;
+
+ merge_tc_off.cookie = merge_flow->tc_flower_cookie;
+ err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
+ merge_flow->ingress_dev);
+ if (err)
+ goto err_unlink_sub_flow2;
+
+ err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node,
+ nfp_flower_table_params);
+ if (err)
+ goto err_release_metadata;
+
+ err = nfp_flower_xmit_flow(app, merge_flow,
+ NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
+ if (err)
+ goto err_remove_rhash;
+
+ merge_flow->in_hw = true;
+ sub_flow1->in_hw = false;
+
+ return 0;
+
+err_remove_rhash:
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
+ &merge_flow->fl_node,
+ nfp_flower_table_params));
+err_release_metadata:
+ nfp_modify_flow_metadata(app, merge_flow);
+err_unlink_sub_flow2:
+ nfp_flower_unlink_flows(merge_flow, sub_flow2);
+err_unlink_sub_flow1:
+ nfp_flower_unlink_flows(merge_flow, sub_flow1);
+err_destroy_merge_flow:
+ kfree(merge_flow->action_data);
+ kfree(merge_flow->mask_data);
+ kfree(merge_flow->unmasked_data);
+ kfree(merge_flow);
+ return err;
+}
+
/**
* nfp_flower_add_offload() - Adds a new flow to hardware.
* @app: Pointer to the APP handle
@@ -454,6 +919,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (port)
port->tc_offload_cnt++;
+ flow_pay->in_hw = true;
+
/* Deallocate flow payload when flower rule has been destroyed. */
kfree(key_layer);
@@ -475,6 +942,75 @@ err_free_key_ls:
return err;
}
+static void
+nfp_flower_remove_merge_flow(struct nfp_app *app,
+ struct nfp_fl_payload *del_sub_flow,
+ struct nfp_fl_payload *merge_flow)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_payload_link *link, *temp;
+ struct nfp_fl_payload *origin;
+ bool mod = false;
+ int err;
+
+ link = list_first_entry(&merge_flow->linked_flows,
+ struct nfp_fl_payload_link, merge_flow.list);
+ origin = link->sub_flow.flow;
+
+ /* Re-add rule the merge had overwritten if it has not been deleted. */
+ if (origin != del_sub_flow)
+ mod = true;
+
+ err = nfp_modify_flow_metadata(app, merge_flow);
+ if (err) {
+ nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n");
+ goto err_free_links;
+ }
+
+ if (!mod) {
+ err = nfp_flower_xmit_flow(app, merge_flow,
+ NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
+ if (err) {
+ nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n");
+ goto err_free_links;
+ }
+ } else {
+ __nfp_modify_flow_metadata(priv, origin);
+ err = nfp_flower_xmit_flow(app, origin,
+ NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
+ if (err)
+ nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n");
+ origin->in_hw = true;
+ }
+
+err_free_links:
+ /* Clean any links connected with the merged flow. */
+ list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
+ merge_flow.list)
+ nfp_flower_unlink_flow(link);
+
+ kfree(merge_flow->action_data);
+ kfree(merge_flow->mask_data);
+ kfree(merge_flow->unmasked_data);
+ WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
+ &merge_flow->fl_node,
+ nfp_flower_table_params));
+ kfree_rcu(merge_flow, rcu);
+}
+
+static void
+nfp_flower_del_linked_merge_flows(struct nfp_app *app,
+ struct nfp_fl_payload *sub_flow)
+{
+ struct nfp_fl_payload_link *link, *temp;
+
+ /* Remove any merge flow formed from the deleted sub_flow. */
+ list_for_each_entry_safe(link, temp, &sub_flow->linked_flows,
+ sub_flow.list)
+ nfp_flower_remove_merge_flow(app, sub_flow,
+ link->merge_flow.flow);
+}
+
/**
* nfp_flower_del_offload() - Removes a flow from hardware.
* @app: Pointer to the APP handle
@@ -482,7 +1018,7 @@ err_free_key_ls:
* @flow: TC flower classifier offload structure
*
* Removes a flow from the repeated hash structure and clears the
- * action payload.
+ * action payload. Any flows merged from this are also deleted.
*
* Return: negative value on error, 0 if removed successfully.
*/
@@ -504,17 +1040,22 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
err = nfp_modify_flow_metadata(app, nfp_flow);
if (err)
- goto err_free_flow;
+ goto err_free_merge_flow;
if (nfp_flow->nfp_tun_ipv4_addr)
nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
+ if (!nfp_flow->in_hw) {
+ err = 0;
+ goto err_free_merge_flow;
+ }
+
err = nfp_flower_xmit_flow(app, nfp_flow,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
- if (err)
- goto err_free_flow;
+ /* Fall through on error. */
-err_free_flow:
+err_free_merge_flow:
+ nfp_flower_del_linked_merge_flows(app, nfp_flow);
if (port)
port->tc_offload_cnt--;
kfree(nfp_flow->action_data);
@@ -527,6 +1068,52 @@ err_free_flow:
return err;
}
+static void
+__nfp_flower_update_merge_stats(struct nfp_app *app,
+ struct nfp_fl_payload *merge_flow)
+{
+ struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_payload_link *link;
+ struct nfp_fl_payload *sub_flow;
+ u64 pkts, bytes, used;
+ u32 ctx_id;
+
+ ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
+ pkts = priv->stats[ctx_id].pkts;
+ /* Do not cycle subflows if no stats to distribute. */
+ if (!pkts)
+ return;
+ bytes = priv->stats[ctx_id].bytes;
+ used = priv->stats[ctx_id].used;
+
+ /* Reset stats for the merge flow. */
+ priv->stats[ctx_id].pkts = 0;
+ priv->stats[ctx_id].bytes = 0;
+
+ /* The merge flow has received stats updates from firmware.
+ * Distribute these stats to all subflows that form the merge.
+ * The stats will collected from TC via the subflows.
+ */
+ list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) {
+ sub_flow = link->sub_flow.flow;
+ ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
+ priv->stats[ctx_id].pkts += pkts;
+ priv->stats[ctx_id].bytes += bytes;
+ max_t(u64, priv->stats[ctx_id].used, used);
+ }
+}
+
+static void
+nfp_flower_update_merge_stats(struct nfp_app *app,
+ struct nfp_fl_payload *sub_flow)
+{
+ struct nfp_fl_payload_link *link;
+
+ /* Get merge flows that the subflow forms to distribute their stats. */
+ list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list)
+ __nfp_flower_update_merge_stats(app, link->merge_flow.flow);
+}
+
/**
* nfp_flower_get_stats() - Populates flow stats obtained from hardware.
* @app: Pointer to the APP handle
@@ -553,6 +1140,10 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
spin_lock_bh(&priv->stats_lock);
+ /* If request is for a sub_flow, update stats from merged flows. */
+ if (!list_empty(&nfp_flow->linked_flows))
+ nfp_flower_update_merge_stats(app, nfp_flow);
+
flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
priv->stats[ctx_id].pkts, priv->stats[ctx_id].used);
@@ -594,6 +1185,9 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
case TC_SETUP_CLSFLOWER:
return nfp_flower_repr_offload(repr->app, repr->netdev,
type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return nfp_flower_setup_qos_offload(repr->app, repr->netdev,
+ type_data);
default:
return -EOPNOTSUPP;
}
@@ -603,10 +1197,14 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
struct tc_block_offload *f)
{
struct nfp_repr *repr = netdev_priv(netdev);
+ struct nfp_flower_repr_priv *repr_priv;
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
+ repr_priv = repr->app_priv;
+ repr_priv->block_shared = tcf_block_shared(f->block);
+
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block,
@@ -682,7 +1280,9 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
struct nfp_flower_priv *priv = app->priv;
int err;
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
+ !(f->binder_type == TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
+ nfp_flower_internal_port_can_offload(app, netdev)))
return -EOPNOTSUPP;
switch (f->command) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
new file mode 100644
index 000000000000..86e968cd5ffd
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2019 Netronome Systems, Inc. */
+
+#include <linux/math64.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+
+#include "cmsg.h"
+#include "main.h"
+#include "../nfp_port.h"
+
+#define NFP_FL_QOS_UPDATE msecs_to_jiffies(1000)
+
+struct nfp_police_cfg_head {
+ __be32 flags_opts;
+ __be32 port;
+};
+
+/* Police cmsg for configuring a trTCM traffic conditioner (8W/32B)
+ * See RFC 2698 for more details.
+ * ----------------------------------------------------------------
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Flag options |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Port Ingress |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Token Bucket Peak |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Token Bucket Committed |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Peak Burst Size |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Committed Burst Size |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Peak Information Rate |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Committed Information Rate |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_police_config {
+ struct nfp_police_cfg_head head;
+ __be32 bkt_tkn_p;
+ __be32 bkt_tkn_c;
+ __be32 pbs;
+ __be32 cbs;
+ __be32 pir;
+ __be32 cir;
+};
+
+struct nfp_police_stats_reply {
+ struct nfp_police_cfg_head head;
+ __be64 pass_bytes;
+ __be64 pass_pkts;
+ __be64 drop_bytes;
+ __be64 drop_pkts;
+};
+
+static int
+nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_matchall_offload *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_action_entry *action = &flow->rule->action.entries[0];
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_police_config *config;
+ struct nfp_repr *repr;
+ struct sk_buff *skb;
+ u32 netdev_port_id;
+ u64 burst, rate;
+
+ if (!nfp_netdev_is_nfp_repr(netdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
+ return -EOPNOTSUPP;
+ }
+ repr = netdev_priv(netdev);
+ repr_priv = repr->app_priv;
+
+ if (repr_priv->block_shared) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on shared blocks");
+ return -EOPNOTSUPP;
+ }
+
+ if (repr->port->type != NFP_PORT_VF_PORT) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on non-VF ports");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flow_offload_has_one_action(&flow->rule->action)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires a single action");
+ return -EOPNOTSUPP;
+ }
+
+ if (flow->common.prio != (1 << 16)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority");
+ return -EOPNOTSUPP;
+ }
+
+ if (action->id != FLOW_ACTION_POLICE) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires police action");
+ return -EOPNOTSUPP;
+ }
+
+ rate = action->police.rate_bytes_ps;
+ burst = div_u64(rate * PSCHED_NS2TICKS(action->police.burst),
+ PSCHED_TICKS_PER_SEC);
+ netdev_port_id = nfp_repr_get_port_id(netdev);
+
+ skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
+ NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ config = nfp_flower_cmsg_get_data(skb);
+ memset(config, 0, sizeof(struct nfp_police_config));
+ config->head.port = cpu_to_be32(netdev_port_id);
+ config->bkt_tkn_p = cpu_to_be32(burst);
+ config->bkt_tkn_c = cpu_to_be32(burst);
+ config->pbs = cpu_to_be32(burst);
+ config->cbs = cpu_to_be32(burst);
+ config->pir = cpu_to_be32(rate);
+ config->cir = cpu_to_be32(rate);
+ nfp_ctrl_tx(repr->app->ctrl, skb);
+
+ repr_priv->qos_table.netdev_port_id = netdev_port_id;
+ fl_priv->qos_rate_limiters++;
+ if (fl_priv->qos_rate_limiters == 1)
+ schedule_delayed_work(&fl_priv->qos_stats_work,
+ NFP_FL_QOS_UPDATE);
+
+ return 0;
+}
+
+static int
+nfp_flower_remove_rate_limiter(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_matchall_offload *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_police_config *config;
+ struct nfp_repr *repr;
+ struct sk_buff *skb;
+ u32 netdev_port_id;
+
+ if (!nfp_netdev_is_nfp_repr(netdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
+ return -EOPNOTSUPP;
+ }
+ repr = netdev_priv(netdev);
+
+ netdev_port_id = nfp_repr_get_port_id(netdev);
+ repr_priv = repr->app_priv;
+
+ if (!repr_priv->qos_table.netdev_port_id) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot remove qos entry that does not exist");
+ return -EOPNOTSUPP;
+ }
+
+ skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
+ NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Clear all qos associate data for this interface */
+ memset(&repr_priv->qos_table, 0, sizeof(struct nfp_fl_qos));
+ fl_priv->qos_rate_limiters--;
+ if (!fl_priv->qos_rate_limiters)
+ cancel_delayed_work_sync(&fl_priv->qos_stats_work);
+
+ config = nfp_flower_cmsg_get_data(skb);
+ memset(config, 0, sizeof(struct nfp_police_config));
+ config->head.port = cpu_to_be32(netdev_port_id);
+ nfp_ctrl_tx(repr->app->ctrl, skb);
+
+ return 0;
+}
+
+void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_police_stats_reply *msg;
+ struct nfp_stat_pair *curr_stats;
+ struct nfp_stat_pair *prev_stats;
+ struct net_device *netdev;
+ struct nfp_repr *repr;
+ u32 netdev_port_id;
+
+ msg = nfp_flower_cmsg_get_data(skb);
+ netdev_port_id = be32_to_cpu(msg->head.port);
+ rcu_read_lock();
+ netdev = nfp_app_dev_get(app, netdev_port_id, NULL);
+ if (!netdev)
+ goto exit_unlock_rcu;
+
+ repr = netdev_priv(netdev);
+ repr_priv = repr->app_priv;
+ curr_stats = &repr_priv->qos_table.curr_stats;
+ prev_stats = &repr_priv->qos_table.prev_stats;
+
+ spin_lock_bh(&fl_priv->qos_stats_lock);
+ curr_stats->pkts = be64_to_cpu(msg->pass_pkts) +
+ be64_to_cpu(msg->drop_pkts);
+ curr_stats->bytes = be64_to_cpu(msg->pass_bytes) +
+ be64_to_cpu(msg->drop_bytes);
+
+ if (!repr_priv->qos_table.last_update) {
+ prev_stats->pkts = curr_stats->pkts;
+ prev_stats->bytes = curr_stats->bytes;
+ }
+
+ repr_priv->qos_table.last_update = jiffies;
+ spin_unlock_bh(&fl_priv->qos_stats_lock);
+
+exit_unlock_rcu:
+ rcu_read_unlock();
+}
+
+static void
+nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv,
+ u32 netdev_port_id)
+{
+ struct nfp_police_cfg_head *head;
+ struct sk_buff *skb;
+
+ skb = nfp_flower_cmsg_alloc(fl_priv->app,
+ sizeof(struct nfp_police_cfg_head),
+ NFP_FLOWER_CMSG_TYPE_QOS_STATS,
+ GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ head = nfp_flower_cmsg_get_data(skb);
+ memset(head, 0, sizeof(struct nfp_police_cfg_head));
+ head->port = cpu_to_be32(netdev_port_id);
+
+ nfp_ctrl_tx(fl_priv->app->ctrl, skb);
+}
+
+static void
+nfp_flower_stats_rlim_request_all(struct nfp_flower_priv *fl_priv)
+{
+ struct nfp_reprs *repr_set;
+ int i;
+
+ rcu_read_lock();
+ repr_set = rcu_dereference(fl_priv->app->reprs[NFP_REPR_TYPE_VF]);
+ if (!repr_set)
+ goto exit_unlock_rcu;
+
+ for (i = 0; i < repr_set->num_reprs; i++) {
+ struct net_device *netdev;
+
+ netdev = rcu_dereference(repr_set->reprs[i]);
+ if (netdev) {
+ struct nfp_repr *priv = netdev_priv(netdev);
+ struct nfp_flower_repr_priv *repr_priv;
+ u32 netdev_port_id;
+
+ repr_priv = priv->app_priv;
+ netdev_port_id = repr_priv->qos_table.netdev_port_id;
+ if (!netdev_port_id)
+ continue;
+
+ nfp_flower_stats_rlim_request(fl_priv, netdev_port_id);
+ }
+ }
+
+exit_unlock_rcu:
+ rcu_read_unlock();
+}
+
+static void update_stats_cache(struct work_struct *work)
+{
+ struct delayed_work *delayed_work;
+ struct nfp_flower_priv *fl_priv;
+
+ delayed_work = to_delayed_work(work);
+ fl_priv = container_of(delayed_work, struct nfp_flower_priv,
+ qos_stats_work);
+
+ nfp_flower_stats_rlim_request_all(fl_priv);
+ schedule_delayed_work(&fl_priv->qos_stats_work, NFP_FL_QOS_UPDATE);
+}
+
+static int
+nfp_flower_stats_rate_limiter(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_matchall_offload *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+ struct nfp_flower_repr_priv *repr_priv;
+ struct nfp_stat_pair *curr_stats;
+ struct nfp_stat_pair *prev_stats;
+ u64 diff_bytes, diff_pkts;
+ struct nfp_repr *repr;
+
+ if (!nfp_netdev_is_nfp_repr(netdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
+ return -EOPNOTSUPP;
+ }
+ repr = netdev_priv(netdev);
+
+ repr_priv = repr->app_priv;
+ if (!repr_priv->qos_table.netdev_port_id) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot find qos entry for stats update");
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock_bh(&fl_priv->qos_stats_lock);
+ curr_stats = &repr_priv->qos_table.curr_stats;
+ prev_stats = &repr_priv->qos_table.prev_stats;
+ diff_pkts = curr_stats->pkts - prev_stats->pkts;
+ diff_bytes = curr_stats->bytes - prev_stats->bytes;
+ prev_stats->pkts = curr_stats->pkts;
+ prev_stats->bytes = curr_stats->bytes;
+ spin_unlock_bh(&fl_priv->qos_stats_lock);
+
+ flow_stats_update(&flow->stats, diff_bytes, diff_pkts,
+ repr_priv->qos_table.last_update);
+ return 0;
+}
+
+void nfp_flower_qos_init(struct nfp_app *app)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+
+ spin_lock_init(&fl_priv->qos_stats_lock);
+ INIT_DELAYED_WORK(&fl_priv->qos_stats_work, &update_stats_cache);
+}
+
+void nfp_flower_qos_cleanup(struct nfp_app *app)
+{
+ struct nfp_flower_priv *fl_priv = app->priv;
+
+ cancel_delayed_work_sync(&fl_priv->qos_stats_work);
+}
+
+int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
+ struct tc_cls_matchall_offload *flow)
+{
+ struct netlink_ext_ack *extack = flow->common.extack;
+ struct nfp_flower_priv *fl_priv = app->priv;
+
+ if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload");
+ return -EOPNOTSUPP;
+ }
+
+ switch (flow->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return nfp_flower_install_rate_limiter(app, netdev, flow,
+ extack);
+ case TC_CLSMATCHALL_DESTROY:
+ return nfp_flower_remove_rate_limiter(app, netdev, flow,
+ extack);
+ case TC_CLSMATCHALL_STATS:
+ return nfp_flower_stats_rate_limiter(app, netdev, flow,
+ extack);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 4d78be4ec4e9..faa06edf95ac 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -171,7 +171,7 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
for (i = 0; i < count; i++) {
ipv4_addr = payload->tun_info[i].ipv4;
port = be32_to_cpu(payload->tun_info[i].egress_port);
- netdev = nfp_app_repr_get(app, port);
+ netdev = nfp_app_dev_get(app, port, NULL);
if (!netdev)
continue;
@@ -270,9 +270,10 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
struct flowi4 *flow, struct neighbour *neigh, gfp_t flag)
{
struct nfp_tun_neigh payload;
+ u32 port_id;
- /* Only offload representor IPv4s for now. */
- if (!nfp_netdev_is_nfp_repr(netdev))
+ port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
+ if (!port_id)
return;
memset(&payload, 0, sizeof(struct nfp_tun_neigh));
@@ -290,7 +291,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
payload.src_ipv4 = flow->saddr;
ether_addr_copy(payload.src_addr, netdev->dev_addr);
neigh_ha_snapshot(payload.dst_addr, neigh, netdev);
- payload.port_id = cpu_to_be32(nfp_repr_get_port_id(netdev));
+ payload.port_id = cpu_to_be32(port_id);
/* Add destination of new route to NFP cache. */
nfp_tun_add_route_to_cache(app, payload.dst_ipv4);
@@ -366,7 +367,7 @@ void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb)
payload = nfp_flower_cmsg_get_data(skb);
- netdev = nfp_app_repr_get(app, be32_to_cpu(payload->ingress_port));
+ netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL);
if (!netdev)
goto route_fail_warning;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index f8d422713705..76d13af46a7a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -79,7 +79,7 @@ extern const struct nfp_app_type app_abm;
* @eswitch_mode_set: set SR-IOV eswitch mode (under pf->lock)
* @sriov_enable: app-specific sriov initialisation
* @sriov_disable: app-specific sriov clean-up
- * @repr_get: get representor netdev
+ * @dev_get: get representor or internal port representing netdev
*/
struct nfp_app_type {
enum nfp_app_id id;
@@ -143,7 +143,8 @@ struct nfp_app_type {
enum devlink_eswitch_mode (*eswitch_mode_get)(struct nfp_app *app);
int (*eswitch_mode_set)(struct nfp_app *app, u16 mode);
- struct net_device *(*repr_get)(struct nfp_app *app, u32 id);
+ struct net_device *(*dev_get)(struct nfp_app *app, u32 id,
+ bool *redir_egress);
};
/**
@@ -397,12 +398,14 @@ static inline void nfp_app_sriov_disable(struct nfp_app *app)
app->type->sriov_disable(app);
}
-static inline struct net_device *nfp_app_repr_get(struct nfp_app *app, u32 id)
+static inline
+struct net_device *nfp_app_dev_get(struct nfp_app *app, u32 id,
+ bool *redir_egress)
{
- if (unlikely(!app || !app->type->repr_get))
+ if (unlikely(!app || !app->type->dev_get))
return NULL;
- return app->type->repr_get(app, id);
+ return app->type->dev_get(app, id, redir_egress);
}
struct nfp_app *nfp_app_from_netdev(struct net_device *netdev);
@@ -433,6 +436,6 @@ int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
int nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
struct nfp_net *nn, unsigned int id);
-struct devlink *nfp_devlink_get_devlink(struct net_device *netdev);
+struct devlink_port *nfp_devlink_get_devlink_port(struct net_device *netdev);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index e9eca99cf493..c50fce42f473 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -144,7 +144,8 @@ nfp_devlink_sb_pool_get(struct devlink *devlink, unsigned int sb_index,
static int
nfp_devlink_sb_pool_set(struct devlink *devlink, unsigned int sb_index,
u16 pool_index,
- u32 size, enum devlink_sb_threshold_type threshold_type)
+ u32 size, enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
{
struct nfp_pf *pf = devlink_priv(devlink);
@@ -354,6 +355,8 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
{
struct nfp_eth_table_port eth_port;
struct devlink *devlink;
+ const u8 *serial;
+ int serial_len;
int ret;
rtnl_lock();
@@ -362,10 +365,10 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
if (ret)
return ret;
- devlink_port_type_eth_set(&port->dl_port, port->netdev);
+ serial_len = nfp_cpp_serial(port->app->cpp, &serial);
devlink_port_attrs_set(&port->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
eth_port.label_port, eth_port.is_split,
- eth_port.label_subport);
+ eth_port.label_subport, serial, serial_len);
devlink = priv_to_devlink(app->pf);
@@ -377,13 +380,23 @@ void nfp_devlink_port_unregister(struct nfp_port *port)
devlink_port_unregister(&port->dl_port);
}
-struct devlink *nfp_devlink_get_devlink(struct net_device *netdev)
+void nfp_devlink_port_type_eth_set(struct nfp_port *port)
+{
+ devlink_port_type_eth_set(&port->dl_port, port->netdev);
+}
+
+void nfp_devlink_port_type_clear(struct nfp_port *port)
{
- struct nfp_app *app;
+ devlink_port_type_clear(&port->dl_port);
+}
+
+struct devlink_port *nfp_devlink_get_devlink_port(struct net_device *netdev)
+{
+ struct nfp_port *port;
- app = nfp_app_from_netdev(netdev);
- if (!app)
+ port = nfp_port_from_netdev(netdev);
+ if (!port)
return NULL;
- return priv_to_devlink(app->pf);
+ return &port->dl_port;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index f4c8776e42b6..948d1a4b4643 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -294,6 +294,9 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
+ if (!pci_get_drvdata(pdev))
+ return -ENOENT;
+
if (num_vfs == 0)
return nfp_pcie_sriov_disable(pdev);
else
@@ -720,9 +723,13 @@ err_pci_disable:
return err;
}
-static void nfp_pci_remove(struct pci_dev *pdev)
+static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw)
{
- struct nfp_pf *pf = pci_get_drvdata(pdev);
+ struct nfp_pf *pf;
+
+ pf = pci_get_drvdata(pdev);
+ if (!pf)
+ return;
nfp_hwmon_unregister(pf);
@@ -733,7 +740,7 @@ static void nfp_pci_remove(struct pci_dev *pdev)
vfree(pf->dumpspec);
kfree(pf->rtbl);
nfp_mip_close(pf->mip);
- if (pf->fw_loaded)
+ if (unload_fw && pf->fw_loaded)
nfp_fw_unload(pf);
destroy_workqueue(pf->wq);
@@ -749,11 +756,22 @@ static void nfp_pci_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+static void nfp_pci_remove(struct pci_dev *pdev)
+{
+ __nfp_pci_shutdown(pdev, true);
+}
+
+static void nfp_pci_shutdown(struct pci_dev *pdev)
+{
+ __nfp_pci_shutdown(pdev, false);
+}
+
static struct pci_driver nfp_pci_driver = {
.name = nfp_driver_name,
.id_table = nfp_pci_device_ids,
.probe = nfp_pci_probe,
.remove = nfp_pci_remove,
+ .shutdown = nfp_pci_shutdown,
.sriov_configure = nfp_pcie_sriov_configure,
};
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index be37c2d6151c..df9aff2684ed 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -539,12 +539,17 @@ struct nfp_net_dp {
* @shared_handler: Handler for shared interrupts
* @shared_name: Name for shared interrupt
* @me_freq_mhz: ME clock_freq (MHz)
- * @reconfig_lock: Protects HW reconfiguration request regs/machinery
+ * @reconfig_lock: Protects @reconfig_posted, @reconfig_timer_active,
+ * @reconfig_sync_present and HW reconfiguration request
+ * regs/machinery from async requests (sync must take
+ * @bar_lock)
* @reconfig_posted: Pending reconfig bits coming from async sources
* @reconfig_timer_active: Timer for reading reconfiguration results is pending
* @reconfig_sync_present: Some thread is performing synchronous reconfig
* @reconfig_timer: Timer for async reading of reconfig results
* @reconfig_in_progress_update: Update FW is processing now (debug only)
+ * @bar_lock: vNIC config BAR access lock, protects: update,
+ * mailbox area
* @link_up: Is the link up?
* @link_status_lock: Protects @link_* and ensures atomicity with BAR reading
* @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter
@@ -615,6 +620,8 @@ struct nfp_net {
struct timer_list reconfig_timer;
u32 reconfig_in_progress_update;
+ struct mutex bar_lock;
+
u32 rx_coalesce_usecs;
u32 rx_coalesce_max_frames;
u32 tx_coalesce_usecs;
@@ -839,6 +846,16 @@ static inline void nfp_ctrl_unlock(struct nfp_net *nn)
spin_unlock_bh(&nn->r_vecs[0].lock);
}
+static inline void nn_ctrl_bar_lock(struct nfp_net *nn)
+{
+ mutex_lock(&nn->bar_lock);
+}
+
+static inline void nn_ctrl_bar_unlock(struct nfp_net *nn)
+{
+ mutex_unlock(&nn->bar_lock);
+}
+
/* Globals */
extern const char nfp_driver_version[];
@@ -871,7 +888,9 @@ unsigned int nfp_net_rss_key_sz(struct nfp_net *nn);
void nfp_net_rss_write_itbl(struct nfp_net *nn);
void nfp_net_rss_write_key(struct nfp_net *nn);
void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
-int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd);
+int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size);
+int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd);
+int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd);
unsigned int
nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 6d1b8816552e..b82b684f52ce 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/lockdep.h>
#include <linux/mm.h>
#include <linux/overflow.h>
#include <linux/page_ref.h>
@@ -137,20 +138,37 @@ static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
return false;
}
-static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
+static bool __nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
{
bool timed_out = false;
+ int i;
+
+ /* Poll update field, waiting for NFP to ack the config.
+ * Do an opportunistic wait-busy loop, afterward sleep.
+ */
+ for (i = 0; i < 50; i++) {
+ if (nfp_net_reconfig_check_done(nn, false))
+ return false;
+ udelay(4);
+ }
- /* Poll update field, waiting for NFP to ack the config */
while (!nfp_net_reconfig_check_done(nn, timed_out)) {
- msleep(1);
+ usleep_range(250, 500);
timed_out = time_is_before_eq_jiffies(deadline);
}
+ return timed_out;
+}
+
+static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
+{
+ if (__nfp_net_reconfig_wait(nn, deadline))
+ return -EIO;
+
if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
return -EIO;
- return timed_out ? -EIO : 0;
+ return 0;
}
static void nfp_net_reconfig_timer(struct timer_list *t)
@@ -243,7 +261,7 @@ static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
}
/**
- * nfp_net_reconfig() - Reconfigure the firmware
+ * __nfp_net_reconfig() - Reconfigure the firmware
* @nn: NFP Net device to reconfigure
* @update: The value for the update field in the BAR config
*
@@ -253,10 +271,12 @@ static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
*
* Return: Negative errno on error, 0 on success
*/
-int nfp_net_reconfig(struct nfp_net *nn, u32 update)
+static int __nfp_net_reconfig(struct nfp_net *nn, u32 update)
{
int ret;
+ lockdep_assert_held(&nn->bar_lock);
+
nfp_net_reconfig_sync_enter(nn);
nfp_net_reconfig_start(nn, update);
@@ -274,8 +294,31 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
return ret;
}
+int nfp_net_reconfig(struct nfp_net *nn, u32 update)
+{
+ int ret;
+
+ nn_ctrl_bar_lock(nn);
+ ret = __nfp_net_reconfig(nn, update);
+ nn_ctrl_bar_unlock(nn);
+
+ return ret;
+}
+
+int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size)
+{
+ if (nn->tlv_caps.mbox_len < NFP_NET_CFG_MBOX_SIMPLE_VAL + data_size) {
+ nn_err(nn, "mailbox too small for %u of data (%u)\n",
+ data_size, nn->tlv_caps.mbox_len);
+ return -EIO;
+ }
+
+ nn_ctrl_bar_lock(nn);
+ return 0;
+}
+
/**
- * nfp_net_reconfig_mbox() - Reconfigure the firmware via the mailbox
+ * nfp_net_mbox_reconfig() - Reconfigure the firmware via the mailbox
* @nn: NFP Net device to reconfigure
* @mbox_cmd: The value for the mailbox command
*
@@ -283,19 +326,15 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
*
* Return: Negative errno on error, 0 on success
*/
-int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
+int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd)
{
u32 mbox = nn->tlv_caps.mbox_off;
int ret;
- if (!nfp_net_has_mbox(&nn->tlv_caps)) {
- nn_err(nn, "no mailbox present, command: %u\n", mbox_cmd);
- return -EIO;
- }
-
+ lockdep_assert_held(&nn->bar_lock);
nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
- ret = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
+ ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
if (ret) {
nn_err(nn, "Mailbox update error\n");
return ret;
@@ -304,6 +343,15 @@ int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd)
return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
}
+int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd)
+{
+ int ret;
+
+ ret = nfp_net_mbox_reconfig(nn, mbox_cmd);
+ nn_ctrl_bar_unlock(nn);
+ return ret;
+}
+
/* Interrupt configuration and handling
*/
@@ -909,7 +957,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
nfp_net_tx_ring_stop(nd_q, tx_ring);
tx_ring->wr_ptr_add += nr_frags + 1;
- if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, skb->xmit_more))
+ if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, netdev_xmit_more()))
nfp_net_tx_xmit_more_flush(tx_ring);
return NETDEV_TX_OK;
@@ -1635,6 +1683,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
struct nfp_net_rx_buf *rxbuf;
struct nfp_net_rx_desc *rxd;
struct nfp_meta_parsed meta;
+ bool redir_egress = false;
struct net_device *netdev;
dma_addr_t new_dma_addr;
u32 meta_len_xdp = 0;
@@ -1770,13 +1819,16 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
struct nfp_net *nn;
nn = netdev_priv(dp->netdev);
- netdev = nfp_app_repr_get(nn->app, meta.portid);
+ netdev = nfp_app_dev_get(nn->app, meta.portid,
+ &redir_egress);
if (unlikely(!netdev)) {
nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf,
NULL);
continue;
}
- nfp_repr_inc_rx_stats(netdev, pkt_len);
+
+ if (nfp_netdev_is_nfp_repr(netdev))
+ nfp_repr_inc_rx_stats(netdev, pkt_len);
}
skb = build_skb(rxbuf->frag, true_bufsz);
@@ -1811,7 +1863,13 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
if (meta_len_xdp)
skb_metadata_set(skb, meta_len_xdp);
- napi_gro_receive(&rx_ring->r_vec->napi, skb);
+ if (likely(!redir_egress)) {
+ napi_gro_receive(&rx_ring->r_vec->napi, skb);
+ } else {
+ skb->dev = netdev;
+ __skb_push(skb, ETH_HLEN);
+ dev_queue_xmit(skb);
+ }
}
if (xdp_prog) {
@@ -3111,7 +3169,9 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
static int
nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
+ const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD;
struct nfp_net *nn = netdev_priv(netdev);
+ int err;
/* Priority tagged packets with vlan id 0 are processed by the
* NFP as untagged packets
@@ -3119,17 +3179,23 @@ nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (!vid)
return 0;
+ err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
+ if (err)
+ return err;
+
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
ETH_P_8021Q);
- return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD);
+ return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
}
static int
nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
{
+ const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL;
struct nfp_net *nn = netdev_priv(netdev);
+ int err;
/* Priority tagged packets with vlan id 0 are processed by the
* NFP as untagged packets
@@ -3137,11 +3203,15 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (!vid)
return 0;
+ err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
+ if (err)
+ return err;
+
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
ETH_P_8021Q);
- return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
+ return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
}
static void nfp_net_stat64(struct net_device *netdev,
@@ -3324,8 +3394,11 @@ nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
struct nfp_net *nn = netdev_priv(netdev);
int n;
+ /* If port is defined, devlink_port is registered and devlink core
+ * is taking care of name formatting.
+ */
if (nn->port)
- return nfp_port_get_phys_port_name(netdev, name, len);
+ return -EOPNOTSUPP;
if (nn->dp.is_vf || nn->vnic_no_name)
return -EOPNOTSUPP;
@@ -3517,6 +3590,7 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_set_vf_mac = nfp_app_set_vf_mac,
.ndo_set_vf_vlan = nfp_app_set_vf_vlan,
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
+ .ndo_set_vf_trust = nfp_app_set_vf_trust,
.ndo_get_vf_config = nfp_app_get_vf_config,
.ndo_set_vf_link_state = nfp_app_set_vf_link_state,
.ndo_setup_tc = nfp_port_setup_tc,
@@ -3530,8 +3604,7 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
.ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
.ndo_bpf = nfp_net_xdp,
- .ndo_get_port_parent_id = nfp_port_get_port_parent_id,
- .ndo_get_devlink = nfp_devlink_get_devlink,
+ .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
};
/**
@@ -3548,7 +3621,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->fw_ver.resv, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
nn->max_mtu);
- nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
nn->cap,
nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
@@ -3564,7 +3637,6 @@ void nfp_net_info(struct nfp_net *nn)
nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS1 " : "",
nn->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSS2 " : "",
nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "",
- nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "",
nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
@@ -3632,6 +3704,8 @@ nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
+ mutex_init(&nn->bar_lock);
+
spin_lock_init(&nn->reconfig_lock);
spin_lock_init(&nn->link_status_lock);
@@ -3659,6 +3733,9 @@ err_free_nn:
void nfp_net_free(struct nfp_net *nn)
{
WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
+
+ mutex_destroy(&nn->bar_lock);
+
if (nn->dp.netdev)
free_netdev(nn->dp.netdev);
else
@@ -3920,9 +3997,6 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
}
- if (nn->dp.netdev)
- nfp_net_netdev_init(nn);
-
/* Stash the re-configuration queue away. First odd queue in TX Bar */
nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
@@ -3935,6 +4009,9 @@ int nfp_net_init(struct nfp_net *nn)
if (err)
return err;
+ if (nn->dp.netdev)
+ nfp_net_netdev_init(nn);
+
nfp_net_vecs_init(nn);
if (!nn->dp.netdev)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 372adea10e14..25919e338071 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -104,8 +104,6 @@
#define NFP_NET_CFG_CTRL_RINGPRIO (0x1 << 19) /* Ring priorities */
#define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */
#define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring*/
-#define NFP_NET_CFG_CTRL_L2SWITCH (0x1 << 22) /* L2 Switch */
-#define NFP_NET_CFG_CTRL_L2SWITCH_LOCAL (0x1 << 23) /* Switch to local */
#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* VXLAN tunnel support */
#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* NVGRE tunnel support */
#define NFP_NET_CFG_CTRL_BPF (0x1 << 27) /* BPF offload capable */
@@ -130,7 +128,6 @@
#define NFP_NET_CFG_UPDATE_TXRPRIO (0x1 << 3) /* TX Ring prio change */
#define NFP_NET_CFG_UPDATE_RXRPRIO (0x1 << 4) /* RX Ring prio change */
#define NFP_NET_CFG_UPDATE_MSIX (0x1 << 5) /* MSI-X change */
-#define NFP_NET_CFG_UPDATE_L2SWITCH (0x1 << 6) /* Switch changes */
#define NFP_NET_CFG_UPDATE_RESET (0x1 << 7) /* Update due to FLR */
#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */
#define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */
@@ -392,7 +389,6 @@
#define NFP_NET_CFG_MBOX_SIMPLE_CMD 0x0
#define NFP_NET_CFG_MBOX_SIMPLE_RET 0x4
#define NFP_NET_CFG_MBOX_SIMPLE_VAL 0x8
-#define NFP_NET_CFG_MBOX_SIMPLE_LEN 12
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1
#define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
@@ -498,10 +494,4 @@ struct nfp_net_tlv_caps {
int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem,
struct nfp_net_tlv_caps *caps);
-
-static inline bool nfp_net_has_mbox(struct nfp_net_tlv_caps *caps)
-{
- return caps->mbox_len >= NFP_NET_CFG_MBOX_SIMPLE_LEN;
-}
-
#endif /* _NFP_NET_CTRL_H_ */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 690b62718dbb..851e31e0ba8e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -18,6 +18,7 @@
#include <linux/pci.h>
#include <linux/ethtool.h>
#include <linux/firmware.h>
+#include <linux/sfp.h>
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_nsp.h"
@@ -152,6 +153,8 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
#define NN_RVEC_GATHER_STATS 9
#define NN_RVEC_PER_Q_STATS 3
+#define SFP_SFF_REV_COMPLIANCE 1
+
static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
{
struct nfp_nsp *nsp;
@@ -1096,6 +1099,130 @@ nfp_app_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
buffer);
}
+static int
+nfp_port_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ unsigned int read_len;
+ struct nfp_nsp *nsp;
+ int err = 0;
+ u8 data;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ nsp = nfp_nsp_open(port->app->cpp);
+ if (IS_ERR(nsp)) {
+ err = PTR_ERR(nsp);
+ netdev_err(netdev, "Failed to access the NSP: %d\n", err);
+ return err;
+ }
+
+ if (!nfp_nsp_has_read_module_eeprom(nsp)) {
+ netdev_info(netdev, "reading module EEPROM not supported. Please update flash\n");
+ err = -EOPNOTSUPP;
+ goto exit_close_nsp;
+ }
+
+ switch (eth_port->interface) {
+ case NFP_INTERFACE_SFP:
+ case NFP_INTERFACE_SFP28:
+ err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
+ SFP_SFF8472_COMPLIANCE, &data,
+ 1, &read_len);
+ if (err < 0)
+ goto exit_close_nsp;
+
+ if (!data) {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ }
+ break;
+ case NFP_INTERFACE_QSFP:
+ err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
+ SFP_SFF_REV_COMPLIANCE, &data,
+ 1, &read_len);
+ if (err < 0)
+ goto exit_close_nsp;
+
+ if (data < 0x3) {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ }
+ break;
+ case NFP_INTERFACE_QSFP28:
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ break;
+ default:
+ netdev_err(netdev, "Unsupported module 0x%x detected\n",
+ eth_port->interface);
+ err = -EINVAL;
+ }
+
+exit_close_nsp:
+ nfp_nsp_close(nsp);
+ return err;
+}
+
+static int
+nfp_port_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ struct nfp_nsp *nsp;
+ int err;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ nsp = nfp_nsp_open(port->app->cpp);
+ if (IS_ERR(nsp)) {
+ err = PTR_ERR(nsp);
+ netdev_err(netdev, "Failed to access the NSP: %d\n", err);
+ return err;
+ }
+
+ if (!nfp_nsp_has_read_module_eeprom(nsp)) {
+ netdev_info(netdev, "reading module EEPROM not supported. Please update flash\n");
+ err = -EOPNOTSUPP;
+ goto exit_close_nsp;
+ }
+
+ err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
+ eeprom->offset, data, eeprom->len,
+ &eeprom->len);
+ if (err < 0) {
+ if (eeprom->len) {
+ netdev_warn(netdev,
+ "Incomplete read from module EEPROM: %d\n",
+ err);
+ err = 0;
+ } else {
+ netdev_err(netdev,
+ "Reading from module EEPROM failed: %d\n",
+ err);
+ }
+ }
+
+exit_close_nsp:
+ nfp_nsp_close(nsp);
+ return err;
+}
+
static int nfp_net_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
@@ -1253,6 +1380,8 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
.get_dump_data = nfp_app_get_dump_data,
+ .get_module_info = nfp_port_get_module_info,
+ .get_module_eeprom = nfp_port_get_module_eeprom,
.get_coalesce = nfp_net_get_coalesce,
.set_coalesce = nfp_net_set_coalesce,
.get_channels = nfp_net_get_channels,
@@ -1272,6 +1401,8 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
.get_dump_data = nfp_app_get_dump_data,
+ .get_module_info = nfp_port_get_module_info,
+ .get_module_eeprom = nfp_port_get_module_eeprom,
.get_link_ksettings = nfp_net_get_link_ksettings,
.set_link_ksettings = nfp_net_set_link_ksettings,
.get_fecparam = nfp_port_get_fecparam,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 08f5fdbd8e41..986464d4a206 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -150,34 +150,39 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
nn->id = id;
+ if (nn->port) {
+ err = nfp_devlink_port_register(pf->app, nn->port);
+ if (err)
+ return err;
+ }
+
err = nfp_net_init(nn);
if (err)
- return err;
+ goto err_devlink_port_clean;
nfp_net_debugfs_vnic_add(nn, pf->ddir);
- if (nn->port) {
- err = nfp_devlink_port_register(pf->app, nn->port);
- if (err)
- goto err_dfs_clean;
- }
+ if (nn->port)
+ nfp_devlink_port_type_eth_set(nn->port);
nfp_net_info(nn);
if (nfp_net_is_data_vnic(nn)) {
err = nfp_app_vnic_init(pf->app, nn);
if (err)
- goto err_devlink_port_clean;
+ goto err_devlink_port_type_clean;
}
return 0;
-err_devlink_port_clean:
+err_devlink_port_type_clean:
if (nn->port)
- nfp_devlink_port_unregister(nn->port);
-err_dfs_clean:
+ nfp_devlink_port_type_clear(nn->port);
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_clean(nn);
+err_devlink_port_clean:
+ if (nn->port)
+ nfp_devlink_port_unregister(nn->port);
return err;
}
@@ -221,9 +226,11 @@ static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
if (nfp_net_is_data_vnic(nn))
nfp_app_vnic_clean(pf->app, nn);
if (nn->port)
- nfp_devlink_port_unregister(nn->port);
+ nfp_devlink_port_type_clear(nn->port);
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_clean(nn);
+ if (nn->port)
+ nfp_devlink_port_unregister(nn->port);
}
static int nfp_net_pf_alloc_irqs(struct nfp_pf *pf)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 94d228c04496..036edcc1fa18 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -267,13 +267,13 @@ const struct net_device_ops nfp_repr_netdev_ops = {
.ndo_set_vf_mac = nfp_app_set_vf_mac,
.ndo_set_vf_vlan = nfp_app_set_vf_vlan,
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
+ .ndo_set_vf_trust = nfp_app_set_vf_trust,
.ndo_get_vf_config = nfp_app_get_vf_config,
.ndo_set_vf_link_state = nfp_app_set_vf_link_state,
.ndo_fix_features = nfp_repr_fix_features,
.ndo_set_features = nfp_port_set_features,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_get_port_parent_id = nfp_port_get_port_parent_id,
- .ndo_get_devlink = nfp_devlink_get_devlink,
+ .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
};
void
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
index b6ec46ed0540..3fdaaf8ed2ba 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
-/* Copyright (C) 2017 Netronome Systems, Inc. */
+/* Copyright (C) 2017-2019 Netronome Systems, Inc. */
#include <linux/bitfield.h>
#include <linux/errno.h>
@@ -146,6 +146,30 @@ int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
"spoofchk");
}
+int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool enable)
+{
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
+ unsigned int vf_offset;
+ u8 vf_ctrl;
+ int err;
+
+ err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_TRUST,
+ "trust");
+ if (err)
+ return err;
+
+ /* Write trust control bit to VF entry in VF config symbol */
+ vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ +
+ NFP_NET_VF_CFG_CTRL;
+ vf_ctrl = readb(app->pf->vfcfg_tbl2 + vf_offset);
+ vf_ctrl &= ~NFP_NET_VF_CFG_CTRL_TRUST;
+ vf_ctrl |= FIELD_PREP(NFP_NET_VF_CFG_CTRL_TRUST, enable);
+ writeb(vf_ctrl, app->pf->vfcfg_tbl2 + vf_offset);
+
+ return nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_TRUST,
+ "trust");
+}
+
int nfp_app_set_vf_link_state(struct net_device *netdev, int vf,
int link_state)
{
@@ -213,6 +237,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tci);
ivi->spoofchk = FIELD_GET(NFP_NET_VF_CFG_CTRL_SPOOF, flags);
+ ivi->trusted = FIELD_GET(NFP_NET_VF_CFG_CTRL_TRUST, flags);
ivi->linkstate = FIELD_GET(NFP_NET_VF_CFG_CTRL_LINK_STATE, flags);
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
index c9f09c5bb5ee..a3db0cbf6425 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
-/* Copyright (C) 2017 Netronome Systems, Inc. */
+/* Copyright (C) 2017-2019 Netronome Systems, Inc. */
#ifndef _NFP_NET_SRIOV_H_
#define _NFP_NET_SRIOV_H_
@@ -19,12 +19,14 @@
#define NFP_NET_VF_CFG_MB_CAP_VLAN (0x1 << 1)
#define NFP_NET_VF_CFG_MB_CAP_SPOOF (0x1 << 2)
#define NFP_NET_VF_CFG_MB_CAP_LINK_STATE (0x1 << 3)
+#define NFP_NET_VF_CFG_MB_CAP_TRUST (0x1 << 4)
#define NFP_NET_VF_CFG_MB_RET 0x2
#define NFP_NET_VF_CFG_MB_UPD 0x4
#define NFP_NET_VF_CFG_MB_UPD_MAC (0x1 << 0)
#define NFP_NET_VF_CFG_MB_UPD_VLAN (0x1 << 1)
#define NFP_NET_VF_CFG_MB_UPD_SPOOF (0x1 << 2)
#define NFP_NET_VF_CFG_MB_UPD_LINK_STATE (0x1 << 3)
+#define NFP_NET_VF_CFG_MB_UPD_TRUST (0x1 << 4)
#define NFP_NET_VF_CFG_MB_VF_NUM 0x7
/* VF config entry
@@ -35,6 +37,7 @@
#define NFP_NET_VF_CFG_MAC_HI 0x0
#define NFP_NET_VF_CFG_MAC_LO 0x6
#define NFP_NET_VF_CFG_CTRL 0x4
+#define NFP_NET_VF_CFG_CTRL_TRUST 0x8
#define NFP_NET_VF_CFG_CTRL_SPOOF 0x4
#define NFP_NET_VF_CFG_CTRL_LINK_STATE 0x3
#define NFP_NET_VF_CFG_LS_MODE_AUTO 0
@@ -48,6 +51,7 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
__be16 vlan_proto);
int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
+int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool setting);
int nfp_app_set_vf_link_state(struct net_device *netdev, int vf,
int link_state);
int nfp_app_get_vf_config(struct net_device *netdev, int vf,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index 1145849ca7ba..e4977cdf7678 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -282,8 +282,14 @@ err_free_vf:
static void nfp_netvf_pci_remove(struct pci_dev *pdev)
{
- struct nfp_net_vf *vf = pci_get_drvdata(pdev);
- struct nfp_net *nn = vf->nn;
+ struct nfp_net_vf *vf;
+ struct nfp_net *nn;
+
+ vf = pci_get_drvdata(pdev);
+ if (!vf)
+ return;
+
+ nn = vf->nn;
/* Note, the order is slightly different from above as we need
* to keep the nn pointer around till we have freed everything.
@@ -317,4 +323,5 @@ struct pci_driver nfp_netvf_pci_driver = {
.id_table = nfp_netvf_pci_device_ids,
.probe = nfp_netvf_pci_probe,
.remove = nfp_netvf_pci_remove,
+ .shutdown = nfp_netvf_pci_remove,
};
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c
index 93c5bfc0510b..fcd16877e6e0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c
@@ -30,22 +30,6 @@ struct nfp_port *nfp_port_from_netdev(struct net_device *netdev)
return NULL;
}
-int nfp_port_get_port_parent_id(struct net_device *netdev,
- struct netdev_phys_item_id *ppid)
-{
- struct nfp_port *port;
- const u8 *serial;
-
- port = nfp_port_from_netdev(netdev);
- if (!port)
- return -EOPNOTSUPP;
-
- ppid->id_len = nfp_cpp_serial(port->app->cpp, &serial);
- memcpy(&ppid->id, serial, ppid->id_len);
-
- return 0;
-}
-
int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data)
{
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index 90ae053f5c07..d7fd203bb180 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -131,6 +131,8 @@ int nfp_net_refresh_port_table_sync(struct nfp_pf *pf);
int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port);
void nfp_devlink_port_unregister(struct nfp_port *port);
+void nfp_devlink_port_type_eth_set(struct nfp_port *port);
+void nfp_devlink_port_type_clear(struct nfp_port *port);
/**
* Mac stats (0x0000 - 0x0200)
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 3a4e224a64b7..42cf4fd875ea 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -79,6 +79,8 @@
#define NFP_VERSIONS_NCSI_OFF 22
#define NFP_VERSIONS_CFGR_OFF 26
+#define NSP_SFF_EEPROM_BLOCK_LEN 8
+
enum nfp_nsp_cmd {
SPCODE_NOOP = 0, /* No operation */
SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */
@@ -95,6 +97,7 @@ enum nfp_nsp_cmd {
SPCODE_FW_STORED = 16, /* If no FW loaded, load flash app FW */
SPCODE_HWINFO_LOOKUP = 17, /* Lookup HWinfo with overwrites etc. */
SPCODE_VERSIONS = 21, /* Report FW versions */
+ SPCODE_READ_SFF_EEPROM = 22, /* Read module EEPROM */
};
struct nfp_nsp_dma_buf {
@@ -965,3 +968,62 @@ const char *nfp_nsp_versions_get(enum nfp_nsp_versions id, bool flash,
return (const char *)&buf[buf_off];
}
+
+static int
+__nfp_nsp_module_eeprom(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+ struct nfp_nsp_command_buf_arg module_eeprom = {
+ {
+ .code = SPCODE_READ_SFF_EEPROM,
+ .option = size,
+ },
+ .in_buf = buf,
+ .in_size = size,
+ .out_buf = buf,
+ .out_size = size,
+ };
+
+ return nfp_nsp_command_buf(state, &module_eeprom);
+}
+
+int nfp_nsp_read_module_eeprom(struct nfp_nsp *state, int eth_index,
+ unsigned int offset, void *data,
+ unsigned int len, unsigned int *read_len)
+{
+ struct eeprom_buf {
+ u8 metalen;
+ __le16 length;
+ __le16 offset;
+ __le16 readlen;
+ u8 eth_index;
+ u8 data[0];
+ } __packed *buf;
+ int bufsz, ret;
+
+ BUILD_BUG_ON(offsetof(struct eeprom_buf, data) % 8);
+
+ /* Buffer must be large enough and rounded to the next block size. */
+ bufsz = struct_size(buf, data, round_up(len, NSP_SFF_EEPROM_BLOCK_LEN));
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf->metalen =
+ offsetof(struct eeprom_buf, data) / NSP_SFF_EEPROM_BLOCK_LEN;
+ buf->length = cpu_to_le16(len);
+ buf->offset = cpu_to_le16(offset);
+ buf->eth_index = eth_index;
+
+ ret = __nfp_nsp_module_eeprom(state, buf, bufsz);
+
+ *read_len = min_t(unsigned int, len, le16_to_cpu(buf->readlen));
+ if (*read_len)
+ memcpy(data, buf->data, *read_len);
+
+ if (!ret && *read_len < len)
+ ret = -EIO;
+
+ kfree(buf);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index bd9c358c646f..22ee6985ee1c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -22,6 +22,9 @@ int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw);
int nfp_nsp_mac_reinit(struct nfp_nsp *state);
int nfp_nsp_load_stored_fw(struct nfp_nsp *state);
int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size);
+int nfp_nsp_read_module_eeprom(struct nfp_nsp *state, int eth_index,
+ unsigned int offset, void *data,
+ unsigned int len, unsigned int *read_len);
static inline bool nfp_nsp_has_mac_reinit(struct nfp_nsp *state)
{
@@ -43,6 +46,11 @@ static inline bool nfp_nsp_has_versions(struct nfp_nsp *state)
return nfp_nsp_get_abi_ver_minor(state) > 27;
}
+static inline bool nfp_nsp_has_read_module_eeprom(struct nfp_nsp *state)
+{
+ return nfp_nsp_get_abi_ver_minor(state) > 28;
+}
+
enum nfp_eth_interface {
NFP_INTERFACE_NONE = 0,
NFP_INTERFACE_SFP = 1,
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 89d17399fb5a..da138edddd32 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1368,7 +1368,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
if (!is_valid_ether_addr(ndev->dev_addr)) {
const char *macaddr = of_get_mac_address(np);
- if (macaddr)
+ if (!IS_ERR(macaddr))
memcpy(ndev->dev_addr, macaddr, ETH_ALEN);
}
if (!is_valid_ether_addr(ndev->dev_addr))
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index a5bf46310f60..5ffaee9f53b1 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1355,7 +1355,7 @@ static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
const int nh_off = skb_network_offset(skb);
const int nh_len = skb_network_header_len(skb);
const int nfrags = skb_shinfo(skb)->nr_frags;
- int cs_size, i, fill, hdr, cpyhdr, evt;
+ int cs_size, i, fill, hdr, evt;
dma_addr_t csdma;
fund = XCT_FUN_ST | XCT_FUN_RR_8BRES |
@@ -1396,7 +1396,6 @@ static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
fill++;
/* Copy the result into the TCP packet */
- cpyhdr = fill;
CS_DESC(csring, fill++) = XCT_FUN_O | XCT_FUN_FUN(csring->fun) |
XCT_FUN_LLEN(2) | XCT_FUN_SE;
CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(cs_dest) | XCT_PTR_T;
@@ -1839,7 +1838,7 @@ static void __exit pasemi_mac_cleanup_module(void)
pci_unregister_driver(&pasemi_mac_driver);
}
-int pasemi_mac_init_module(void)
+static int pasemi_mac_init_module(void)
{
int err;
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 127c89b22ef0..c5e96ce20f59 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -496,6 +496,9 @@ enum qed_mf_mode_bit {
/* Allow DSCP to TC mapping */
QED_MF_DSCP_TO_TC_MAP,
+
+ /* Do not insert a vlan tag with id 0 */
+ QED_MF_DONT_ADD_VLAN0_TAG,
};
enum qed_ufp_mode {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 69966dfc6e3d..5c6a276f69ac 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -204,9 +204,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
else
p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
- /* Do not add vlan tag 0 when DCB is enabled and port in UFP/OV mode */
- if ((test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits) ||
- test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)))
+ if (test_bit(QED_MF_DONT_ADD_VLAN0_TAG, &p_hwfn->cdev->mf_bits))
p_data->arr[type].dont_add_vlan0 = true;
/* QM reconf data */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 866cdc86a3f2..fccdb06fc5c5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -3140,12 +3140,14 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) |
BIT(QED_MF_LLH_PROTO_CLSS) |
BIT(QED_MF_UFP_SPECIFIC) |
- BIT(QED_MF_8021Q_TAGGING);
+ BIT(QED_MF_8021Q_TAGGING) |
+ BIT(QED_MF_DONT_ADD_VLAN0_TAG);
break;
case NVM_CFG1_GLOB_MF_MODE_BD:
cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) |
BIT(QED_MF_LLH_PROTO_CLSS) |
- BIT(QED_MF_8021AD_TAGGING);
+ BIT(QED_MF_8021AD_TAGGING) |
+ BIT(QED_MF_DONT_ADD_VLAN0_TAG);
break;
case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 63a78162cfaf..92fe226980fd 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -498,8 +498,7 @@ struct qede_reload_args {
/* Datapath functions definition */
netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback);
+ struct net_device *sb_dev);
netdev_features_t qede_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 4555c0b161ef..8911a97ab0ca 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -652,9 +652,9 @@ static void qede_get_drvinfo(struct net_device *ndev,
{
char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN];
struct qede_dev *edev = netdev_priv(ndev);
+ char mbi[ETHTOOL_FWVERS_LEN];
strlcpy(info->driver, "qede", sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
edev->dev_info.common.fw_major,
@@ -668,13 +668,27 @@ static void qede_get_drvinfo(struct net_device *ndev,
(edev->dev_info.common.mfw_rev >> 8) & 0xFF,
edev->dev_info.common.mfw_rev & 0xFF);
- if ((strlen(storm) + strlen(mfw) + strlen("mfw storm ")) <
- sizeof(info->fw_version)) {
+ if ((strlen(storm) + strlen(DRV_MODULE_VERSION) + strlen("[storm] ")) <
+ sizeof(info->version))
+ snprintf(info->version, sizeof(info->version),
+ "%s [storm %s]", DRV_MODULE_VERSION, storm);
+ else
+ snprintf(info->version, sizeof(info->version),
+ "%s %s", DRV_MODULE_VERSION, storm);
+
+ if (edev->dev_info.common.mbi_version) {
+ snprintf(mbi, ETHTOOL_FWVERS_LEN, "%d.%d.%d",
+ (edev->dev_info.common.mbi_version &
+ QED_MBI_VERSION_2_MASK) >> QED_MBI_VERSION_2_OFFSET,
+ (edev->dev_info.common.mbi_version &
+ QED_MBI_VERSION_1_MASK) >> QED_MBI_VERSION_1_OFFSET,
+ (edev->dev_info.common.mbi_version &
+ QED_MBI_VERSION_0_MASK) >> QED_MBI_VERSION_0_OFFSET);
snprintf(info->fw_version, sizeof(info->fw_version),
- "mfw %s storm %s", mfw, storm);
+ "mbi %s [mfw %s]", mbi, mfw);
} else {
snprintf(info->fw_version, sizeof(info->fw_version),
- "%s %s", mfw, storm);
+ "mfw %s", mfw);
}
strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 6f7e3622c6b4..0ae28f0d2523 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1657,12 +1657,12 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
txq->tx_db.data.bd_prod =
cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
- if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
+ if (!netdev_xmit_more() || netif_xmit_stopped(netdev_txq))
qede_update_tx_producer(txq);
if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
< (MAX_SKB_FRAGS + 1))) {
- if (skb->xmit_more)
+ if (netdev_xmit_more())
qede_update_tx_producer(txq);
netif_tx_stop_queue(netdev_txq);
@@ -1688,8 +1688,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct qede_dev *edev = netdev_priv(dev);
int total_txq;
@@ -1697,7 +1696,7 @@ u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
return QEDE_TSS_COUNT(edev) ?
- fallback(dev, skb, NULL) % total_txq : 0;
+ netdev_pick_tx(dev, skb, NULL) % total_txq : 0;
}
/* 8B udp header + 8B base tunnel header + 32B option length */
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 97f92953bdb9..b28360bc2255 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -966,7 +966,7 @@ qca_spi_probe(struct spi_device *spi)
mac = of_get_mac_address(spi->dev.of_node);
- if (mac)
+ if (!IS_ERR(mac))
ether_addr_copy(qca->net_dev->dev_addr, mac);
if (!is_valid_ether_addr(qca->net_dev->dev_addr)) {
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index db6068cd7a1f..590616846cd1 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -351,7 +351,7 @@ static int qca_uart_probe(struct serdev_device *serdev)
mac = of_get_mac_address(serdev->dev.of_node);
- if (mac)
+ if (!IS_ERR(mac))
ether_addr_copy(qca->net_dev->dev_addr, mac);
if (!is_valid_ether_addr(qca->net_dev->dev_addr)) {
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 04aa592f35c3..ad335bca3273 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -840,7 +840,7 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
skb_tx_timestamp(skb);
/* Trigger the MAC to check the TX descriptor */
- if (!skb->xmit_more || netif_queue_stopped(dev))
+ if (!netdev_xmit_more() || netif_queue_stopped(dev))
iowrite16(TM2TX, ioaddr + MTPR);
lp->tx_insert_ptr = descptr->vndescp;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index ed651dde6ef9..549be1c76a89 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -491,10 +491,6 @@ enum rtl_register_content {
PCIDAC = (1 << 4),
PCIMulRW = (1 << 3),
#define INTT_MASK GENMASK(1, 0)
- INTT_0 = 0x0000, // 8168
- INTT_1 = 0x0001, // 8168
- INTT_2 = 0x0002, // 8168
- INTT_3 = 0x0003, // 8168
/* rtl8169_PHYstatus */
TBI_Enable = 0x80,
@@ -703,6 +699,8 @@ struct rtl8169_private {
u32 ocp_base;
};
+typedef void (*rtl_generic_fct)(struct rtl8169_private *tp);
+
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
module_param_named(debug, debug.msg_enable, int, 0);
@@ -777,9 +775,9 @@ static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
int i;
for (i = 0; i < n; i++) {
- delay(d);
if (c->check(tp) == high)
return true;
+ delay(d);
}
netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
c->msg, !high, n, d);
@@ -1067,8 +1065,8 @@ DECLARE_RTL_COND(rtl_eriar_cond)
return RTL_R32(tp, ERIAR) & ERIAR_FLAG;
}
-static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
- u32 val, int type)
+static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
+ u32 val, int type)
{
BUG_ON((addr & 3) || (mask == 0));
RTL_W32(tp, ERIDR, val);
@@ -1077,7 +1075,13 @@ static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
}
-static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
+static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
+ u32 val)
+{
+ _rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC);
+}
+
+static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
{
RTL_W32(tp, ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
@@ -1085,13 +1089,30 @@ static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
RTL_R32(tp, ERIDR) : ~0;
}
+static u32 rtl_eri_read(struct rtl8169_private *tp, int addr)
+{
+ return _rtl_eri_read(tp, addr, ERIAR_EXGMAC);
+}
+
static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
- u32 m, int type)
+ u32 m)
{
u32 val;
- val = rtl_eri_read(tp, addr, type);
- rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
+ val = rtl_eri_read(tp, addr);
+ rtl_eri_write(tp, addr, mask, (val & ~m) | p);
+}
+
+static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 mask,
+ u32 p)
+{
+ rtl_w0w1_eri(tp, addr, mask, p, 0);
+}
+
+static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 mask,
+ u32 m)
+{
+ rtl_w0w1_eri(tp, addr, mask, 0, m);
}
static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
@@ -1103,7 +1124,7 @@ static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
{
- return rtl_eri_read(tp, reg, ERIAR_OOB);
+ return _rtl_eri_read(tp, reg, ERIAR_OOB);
}
static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
@@ -1117,13 +1138,13 @@ static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
u32 data)
{
- rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT,
- data, ERIAR_OOB);
+ _rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT,
+ data, ERIAR_OOB);
}
static void r8168dp_oob_notify(struct rtl8169_private *tp, u8 cmd)
{
- rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd);
r8168dp_ocp_write(tp, 0x1, 0x30, 0x00000001);
}
@@ -1259,19 +1280,10 @@ static bool r8168_check_dash(struct rtl8169_private *tp)
}
}
-struct exgmac_reg {
- u16 addr;
- u16 mask;
- u32 val;
-};
-
-static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
- const struct exgmac_reg *r, int len)
+static void rtl_reset_packet_filter(struct rtl8169_private *tp)
{
- while (len-- > 0) {
- rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
- r++;
- }
+ rtl_eri_clear_bits(tp, 0xdc, ERIAR_MASK_0001, BIT(0));
+ rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_0001, BIT(0));
}
DECLARE_RTL_COND(rtl_efusear_cond)
@@ -1327,48 +1339,31 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
tp->mac_version == RTL_GIGA_MAC_VER_38) {
if (phydev->speed == SPEED_1000) {
- rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
- ERIAR_EXGMAC);
- rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
- ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
} else if (phydev->speed == SPEED_100) {
- rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
- ERIAR_EXGMAC);
- rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
- ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
} else {
- rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
- ERIAR_EXGMAC);
- rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
- ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f);
}
- /* Reset packet filter */
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
- ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
- ERIAR_EXGMAC);
+ rtl_reset_packet_filter(tp);
} else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
tp->mac_version == RTL_GIGA_MAC_VER_36) {
if (phydev->speed == SPEED_1000) {
- rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
- ERIAR_EXGMAC);
- rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
- ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005);
} else {
- rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
- ERIAR_EXGMAC);
- rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
- ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f);
}
} else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
if (phydev->speed == SPEED_10) {
- rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
- ERIAR_EXGMAC);
- rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
- ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060a);
} else {
- rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
- ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
}
}
}
@@ -1409,19 +1404,11 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
tmp = ARRAY_SIZE(cfg) - 1;
if (wolopts & WAKE_MAGIC)
- rtl_w0w1_eri(tp,
- 0x0dc,
- ERIAR_MASK_0100,
- MagicPacket_v2,
- 0x0000,
- ERIAR_EXGMAC);
+ rtl_eri_set_bits(tp, 0x0dc, ERIAR_MASK_0100,
+ MagicPacket_v2);
else
- rtl_w0w1_eri(tp,
- 0x0dc,
- ERIAR_MASK_0100,
- 0x0000,
- MagicPacket_v2,
- ERIAR_EXGMAC);
+ rtl_eri_clear_bits(tp, 0x0dc, ERIAR_MASK_0100,
+ MagicPacket_v2);
break;
default:
tmp = ARRAY_SIZE(cfg);
@@ -2293,8 +2280,8 @@ struct phy_reg {
u16 val;
};
-static void rtl_writephy_batch(struct rtl8169_private *tp,
- const struct phy_reg *regs, int len)
+static void __rtl_writephy_batch(struct rtl8169_private *tp,
+ const struct phy_reg *regs, int len)
{
while (len-- > 0) {
rtl_writephy(tp, regs->reg, regs->val);
@@ -2302,6 +2289,8 @@ static void rtl_writephy_batch(struct rtl8169_private *tp,
}
}
+#define rtl_writephy_batch(tp, a) __rtl_writephy_batch(tp, a, ARRAY_SIZE(a))
+
#define PHY_READ 0x00000000
#define PHY_DATA_OR 0x10000000
#define PHY_DATA_AND 0x20000000
@@ -2564,7 +2553,11 @@ static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
static void rtl8168_config_eee_mac(struct rtl8169_private *tp)
{
- rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0003, 0x0000, ERIAR_EXGMAC);
+ /* Adjust EEE LED frequency */
+ if (tp->mac_version != RTL_GIGA_MAC_VER_38)
+ RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
+
+ rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_1111, 0x0003);
}
static void rtl8168f_config_eee_phy(struct rtl8169_private *tp)
@@ -2653,7 +2646,7 @@ static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
{ 0x00, 0x9200 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
@@ -2664,7 +2657,7 @@ static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
@@ -2722,7 +2715,7 @@ static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
rtl8169scd_hw_phy_config_quirk(tp);
}
@@ -2777,7 +2770,7 @@ static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
@@ -2790,7 +2783,7 @@ static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0001);
rtl_patchphy(tp, 0x16, 1 << 0);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
@@ -2801,7 +2794,7 @@ static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
@@ -2814,7 +2807,7 @@ static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
@@ -2829,7 +2822,7 @@ static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
rtl_patchphy(tp, 0x14, 1 << 5);
rtl_patchphy(tp, 0x0d, 1 << 5);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
@@ -2854,7 +2847,7 @@ static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
{ 0x09, 0x0000 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
rtl_patchphy(tp, 0x14, 1 << 5);
rtl_patchphy(tp, 0x0d, 1 << 5);
@@ -2881,7 +2874,7 @@ static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
rtl_patchphy(tp, 0x16, 1 << 0);
rtl_patchphy(tp, 0x14, 1 << 5);
@@ -2903,7 +2896,7 @@ static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
rtl_patchphy(tp, 0x16, 1 << 0);
rtl_patchphy(tp, 0x14, 1 << 5);
@@ -2959,7 +2952,7 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
{ 0x0d, 0xf880 }
};
- rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
+ rtl_writephy_batch(tp, phy_reg_init_0);
/*
* Rx Error Issue
@@ -2980,7 +2973,7 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
};
int val;
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
val = rtl_readphy(tp, 0x0d);
@@ -3006,7 +2999,7 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
{ 0x06, 0x6662 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
/* RSET couple improve */
@@ -3070,7 +3063,7 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
{ 0x0d, 0xf880 }
};
- rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
+ rtl_writephy_batch(tp, phy_reg_init_0);
if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
static const struct phy_reg phy_reg_init[] = {
@@ -3084,7 +3077,7 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
};
int val;
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
val = rtl_readphy(tp, 0x0d);
if ((val & 0x00ff) != 0x006c) {
@@ -3109,7 +3102,7 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
{ 0x06, 0x2642 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
/* Fine tune PLL performance */
@@ -3187,7 +3180,7 @@ static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
@@ -3202,7 +3195,7 @@ static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 }
};
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
rtl_patchphy(tp, 0x0d, 1 << 5);
}
@@ -3238,7 +3231,7 @@ static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
rtl_apply_firmware(tp);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
/* DCO enable for 10M IDLE Power */
rtl_writephy(tp, 0x1f, 0x0007);
@@ -3286,14 +3279,11 @@ static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
addr[2] | (addr[3] << 8),
addr[4] | (addr[5] << 8)
};
- const struct exgmac_reg e[] = {
- { .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) },
- { .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] },
- { .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 },
- { .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) }
- };
- rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
+ rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, w[0] | (w[1] << 16));
+ rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, w[2]);
+ rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, w[0] << 16);
+ rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, w[1] | (w[2] << 16));
}
static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
@@ -3327,7 +3317,7 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_apply_firmware(tp);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
/* For 4-corner performance improve */
rtl_writephy(tp, 0x1f, 0x0005);
@@ -3436,7 +3426,7 @@ static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
rtl_apply_firmware(tp);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
rtl8168f_hw_phy_config(tp);
@@ -3502,7 +3492,7 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000);
rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
/* Modify green table for giga */
rtl_writephy(tp, 0x1f, 0x0005);
@@ -3922,7 +3912,7 @@ static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
rtl_patchphy(tp, 0x19, 1 << 13);
rtl_patchphy(tp, 0x10, 1 << 15);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
@@ -3948,7 +3938,7 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
rtl_apply_firmware(tp);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_writephy_batch(tp, phy_reg_init);
}
static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
@@ -3961,7 +3951,7 @@ static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
rtl_apply_firmware(tp);
/* EEE setting */
- rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
rtl_writephy(tp, 0x1f, 0x0004);
rtl_writephy(tp, 0x10, 0x401f);
rtl_writephy(tp, 0x19, 0x7030);
@@ -3984,139 +3974,73 @@ static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
rtl_apply_firmware(tp);
- rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+ rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
+ rtl_writephy_batch(tp, phy_reg_init);
- rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000);
}
static void rtl_hw_phy_config(struct net_device *dev)
{
+ static const rtl_generic_fct phy_configs[] = {
+ /* PCI devices. */
+ [RTL_GIGA_MAC_VER_01] = NULL,
+ [RTL_GIGA_MAC_VER_02] = rtl8169s_hw_phy_config,
+ [RTL_GIGA_MAC_VER_03] = rtl8169s_hw_phy_config,
+ [RTL_GIGA_MAC_VER_04] = rtl8169sb_hw_phy_config,
+ [RTL_GIGA_MAC_VER_05] = rtl8169scd_hw_phy_config,
+ [RTL_GIGA_MAC_VER_06] = rtl8169sce_hw_phy_config,
+ /* PCI-E devices. */
+ [RTL_GIGA_MAC_VER_07] = rtl8102e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_08] = rtl8102e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_09] = rtl8102e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_10] = NULL,
+ [RTL_GIGA_MAC_VER_11] = rtl8168bb_hw_phy_config,
+ [RTL_GIGA_MAC_VER_12] = rtl8168bef_hw_phy_config,
+ [RTL_GIGA_MAC_VER_13] = NULL,
+ [RTL_GIGA_MAC_VER_14] = NULL,
+ [RTL_GIGA_MAC_VER_15] = NULL,
+ [RTL_GIGA_MAC_VER_16] = NULL,
+ [RTL_GIGA_MAC_VER_17] = rtl8168bef_hw_phy_config,
+ [RTL_GIGA_MAC_VER_18] = rtl8168cp_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_19] = rtl8168c_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_20] = rtl8168c_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_21] = rtl8168c_3_hw_phy_config,
+ [RTL_GIGA_MAC_VER_22] = rtl8168c_4_hw_phy_config,
+ [RTL_GIGA_MAC_VER_23] = rtl8168cp_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config,
+ [RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config,
+ [RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_31] = NULL,
+ [RTL_GIGA_MAC_VER_32] = rtl8168e_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_33] = rtl8168e_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_34] = rtl8168e_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_35] = rtl8168f_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_36] = rtl8168f_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_37] = rtl8402_hw_phy_config,
+ [RTL_GIGA_MAC_VER_38] = rtl8411_hw_phy_config,
+ [RTL_GIGA_MAC_VER_39] = rtl8106e_hw_phy_config,
+ [RTL_GIGA_MAC_VER_40] = rtl8168g_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_41] = NULL,
+ [RTL_GIGA_MAC_VER_42] = rtl8168g_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_43] = rtl8168g_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_44] = rtl8168g_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_45] = rtl8168h_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_46] = rtl8168h_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_47] = rtl8168h_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_48] = rtl8168h_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_49] = rtl8168ep_1_hw_phy_config,
+ [RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config,
+ [RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
+ };
struct rtl8169_private *tp = netdev_priv(dev);
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_01:
- break;
- case RTL_GIGA_MAC_VER_02:
- case RTL_GIGA_MAC_VER_03:
- rtl8169s_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_04:
- rtl8169sb_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_05:
- rtl8169scd_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_06:
- rtl8169sce_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_07:
- case RTL_GIGA_MAC_VER_08:
- case RTL_GIGA_MAC_VER_09:
- rtl8102e_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_11:
- rtl8168bb_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_12:
- rtl8168bef_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_17:
- rtl8168bef_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_18:
- rtl8168cp_1_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_19:
- rtl8168c_1_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_20:
- rtl8168c_2_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_21:
- rtl8168c_3_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_22:
- rtl8168c_4_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_23:
- case RTL_GIGA_MAC_VER_24:
- rtl8168cp_2_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_25:
- rtl8168d_1_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_26:
- rtl8168d_2_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_27:
- rtl8168d_3_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_28:
- rtl8168d_4_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_29:
- case RTL_GIGA_MAC_VER_30:
- rtl8105e_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_31:
- /* None. */
- break;
- case RTL_GIGA_MAC_VER_32:
- case RTL_GIGA_MAC_VER_33:
- rtl8168e_1_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_34:
- rtl8168e_2_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_35:
- rtl8168f_1_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_36:
- rtl8168f_2_hw_phy_config(tp);
- break;
-
- case RTL_GIGA_MAC_VER_37:
- rtl8402_hw_phy_config(tp);
- break;
-
- case RTL_GIGA_MAC_VER_38:
- rtl8411_hw_phy_config(tp);
- break;
-
- case RTL_GIGA_MAC_VER_39:
- rtl8106e_hw_phy_config(tp);
- break;
-
- case RTL_GIGA_MAC_VER_40:
- rtl8168g_1_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_42:
- case RTL_GIGA_MAC_VER_43:
- case RTL_GIGA_MAC_VER_44:
- rtl8168g_2_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_47:
- rtl8168h_1_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_48:
- rtl8168h_2_hw_phy_config(tp);
- break;
-
- case RTL_GIGA_MAC_VER_49:
- rtl8168ep_1_hw_phy_config(tp);
- break;
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
- rtl8168ep_2_hw_phy_config(tp);
- break;
-
- case RTL_GIGA_MAC_VER_41:
- default:
- break;
- }
+ if (phy_configs[tp->mac_version])
+ phy_configs[tp->mac_version](tp);
}
static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
@@ -4147,14 +4071,6 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
phy_speed_up(tp->phydev);
genphy_soft_reset(tp->phydev);
-
- /* It was reported that several chips end up with 10MBit/Half on a
- * 1GBit link after resuming from S3. For whatever reason the PHY on
- * these chips doesn't properly start a renegotiation when soft-reset.
- * Explicitly requesting a renegotiation fixes this.
- */
- if (tp->phydev->autoneg == AUTONEG_ENABLE)
- phy_restart_aneg(tp->phydev);
}
static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
@@ -4283,8 +4199,7 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_40:
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_49:
- rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000,
- 0xfc000000, ERIAR_EXGMAC);
+ rtl_eri_clear_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000);
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
break;
}
@@ -4312,8 +4227,7 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_49:
RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
- rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000,
- 0x00000000, ERIAR_EXGMAC);
+ rtl_eri_set_bits(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000);
break;
}
@@ -4703,6 +4617,8 @@ static void rtl_hw_start(struct rtl8169_private *tp)
rtl_set_rx_tx_desc_registers(tp);
rtl_lock_config_regs(tp);
+ /* disable interrupt coalescing */
+ RTL_W16(tp, IntrMitigate, 0x0000);
/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
RTL_R8(tp, IntrMask);
RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
@@ -4735,12 +4651,6 @@ static void rtl_hw_start_8169(struct rtl8169_private *tp)
rtl8169_set_magic_reg(tp, tp->mac_version);
- /*
- * Undocumented corner. Supposedly:
- * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
- */
- RTL_W16(tp, IntrMitigate, 0x0000);
-
RTL_W32(tp, RxMissed, 0);
}
@@ -4801,8 +4711,8 @@ struct ephy_info {
u16 bits;
};
-static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
- int len)
+static void __rtl_ephy_init(struct rtl8169_private *tp,
+ const struct ephy_info *e, int len)
{
u16 w;
@@ -4813,6 +4723,8 @@ static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
}
}
+#define rtl_ephy_init(tp, a) __rtl_ephy_init(tp, a, ARRAY_SIZE(a))
+
static void rtl_disable_clock_request(struct rtl8169_private *tp)
{
pcie_capability_clear_word(tp->pci_dev, PCI_EXP_LNKCTL,
@@ -4844,6 +4756,24 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
udelay(10);
}
+static void rtl_set_fifo_size(struct rtl8169_private *tp, u16 rx_stat,
+ u16 tx_stat, u16 rx_dyn, u16 tx_dyn)
+{
+ /* Usage of dynamic vs. static FIFO is controlled by bit
+ * TXCFG_AUTO_FIFO. Exact meaning of FIFO values isn't known.
+ */
+ rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, (rx_stat << 16) | rx_dyn);
+ rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, (tx_stat << 16) | tx_dyn);
+}
+
+static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp,
+ u8 low, u8 high)
+{
+ /* FIFO thresholds for pause flow control */
+ rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, low);
+ rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, high);
+}
+
static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
{
RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
@@ -4893,7 +4823,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
- rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
+ rtl_ephy_init(tp, e_info_8168cp);
__rtl_hw_start_8168cp(tp);
}
@@ -4941,7 +4871,7 @@ static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
- rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
+ rtl_ephy_init(tp, e_info_8168c_1);
__rtl_hw_start_8168cp(tp);
}
@@ -4955,7 +4885,7 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
- rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
+ rtl_ephy_init(tp, e_info_8168c_2);
__rtl_hw_start_8168cp(tp);
}
@@ -5013,7 +4943,7 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
- rtl_ephy_init(tp, e_info_8168d_4, ARRAY_SIZE(e_info_8168d_4));
+ rtl_ephy_init(tp, e_info_8168d_4);
rtl_enable_clock_request(tp);
}
@@ -5038,7 +4968,7 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
- rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
+ rtl_ephy_init(tp, e_info_8168e_1);
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
@@ -5063,19 +4993,18 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
rtl_set_def_aspm_entry_latency(tp);
- rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
+ rtl_ephy_init(tp, e_info_8168e_2);
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
+ rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
+ rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050);
+ rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060);
+ rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_0001, BIT(4));
+ rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00);
RTL_W8(tp, MaxTxPacketSize, EarlySize);
@@ -5083,9 +5012,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
- /* Adjust EEE LED frequency */
- RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
-
rtl8168_config_eee_mac(tp);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
@@ -5101,16 +5027,14 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
+ rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
+ rtl_reset_packet_filter(tp);
+ rtl_eri_set_bits(tp, 0x1b0, ERIAR_MASK_0001, BIT(4));
+ rtl_eri_set_bits(tp, 0x1d0, ERIAR_MASK_0001, BIT(4));
+ rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050);
+ rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060);
RTL_W8(tp, MaxTxPacketSize, EarlySize);
@@ -5135,12 +5059,9 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
rtl_hw_start_8168f(tp);
- rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
-
- rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
+ rtl_ephy_init(tp, e_info_8168f_1);
- /* Adjust EEE LED frequency */
- RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
+ rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00);
}
static void rtl_hw_start_8411(struct rtl8169_private *tp)
@@ -5155,39 +5076,33 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
rtl_hw_start_8168f(tp);
rtl_pcie_state_l2l3_disable(tp);
- rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
+ rtl_ephy_init(tp, e_info_8168f_1);
- rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_set_bits(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00);
}
static void rtl_hw_start_8168g(struct rtl8169_private *tp)
{
- rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+ rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
+ rtl8168g_set_pause_thresholds(tp, 0x38, 0x48);
rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC);
+ rtl_reset_packet_filter(tp);
+ rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
RTL_W8(tp, MaxTxPacketSize, EarlySize);
- rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
-
- /* Adjust EEE LED frequency */
- RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
rtl8168_config_eee_mac(tp);
- rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
+ rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06);
+ rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12));
rtl_pcie_state_l2l3_disable(tp);
}
@@ -5205,7 +5120,7 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
/* disable aspm and clock request before access ephy */
rtl_hw_aspm_clkreq_enable(tp, false);
- rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1));
+ rtl_ephy_init(tp, e_info_8168g_1);
rtl_hw_aspm_clkreq_enable(tp, true);
}
@@ -5223,7 +5138,7 @@ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
/* disable aspm and clock request before access ephy */
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
- rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2));
+ rtl_ephy_init(tp, e_info_8168g_2);
}
static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
@@ -5240,7 +5155,7 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
/* disable aspm and clock request before access ephy */
rtl_hw_aspm_clkreq_enable(tp, false);
- rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2));
+ rtl_ephy_init(tp, e_info_8411_2);
rtl_hw_aspm_clkreq_enable(tp, true);
}
@@ -5259,34 +5174,28 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
/* disable aspm and clock request before access ephy */
rtl_hw_aspm_clkreq_enable(tp, false);
- rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
+ rtl_ephy_init(tp, e_info_8168h_1);
- rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+ rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
+ rtl8168g_set_pause_thresholds(tp, 0x38, 0x48);
rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+ rtl_reset_packet_filter(tp);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_1111, 0x0010, 0x00, ERIAR_EXGMAC);
+ rtl_eri_set_bits(tp, 0xdc, ERIAR_MASK_1111, BIT(4));
- rtl_w0w1_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f00, 0x00, ERIAR_EXGMAC);
+ rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f00);
- rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
RTL_W8(tp, MaxTxPacketSize, EarlySize);
- rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
-
- /* Adjust EEE LED frequency */
- RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
rtl8168_config_eee_mac(tp);
@@ -5295,7 +5204,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
- rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
+ rtl_eri_clear_bits(tp, 0x1b0, ERIAR_MASK_0011, BIT(12));
rtl_pcie_state_l2l3_disable(tp);
@@ -5345,34 +5254,28 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
{
rtl8168ep_stop_cmac(tp);
- rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+ rtl_set_fifo_size(tp, 0x08, 0x10, 0x02, 0x06);
+ rtl8168g_set_pause_thresholds(tp, 0x2f, 0x5f);
rtl_set_def_aspm_entry_latency(tp);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+ rtl_reset_packet_filter(tp);
- rtl_w0w1_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f80, 0x00, ERIAR_EXGMAC);
+ rtl_eri_set_bits(tp, 0xd4, ERIAR_MASK_1111, 0x1f80);
- rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
RTL_W8(tp, MaxTxPacketSize, EarlySize);
- rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
-
- /* Adjust EEE LED frequency */
- RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
rtl8168_config_eee_mac(tp);
- rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
+ rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
@@ -5391,7 +5294,7 @@ static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp)
/* disable aspm and clock request before access ephy */
rtl_hw_aspm_clkreq_enable(tp, false);
- rtl_ephy_init(tp, e_info_8168ep_1, ARRAY_SIZE(e_info_8168ep_1));
+ rtl_ephy_init(tp, e_info_8168ep_1);
rtl_hw_start_8168ep(tp);
@@ -5408,7 +5311,7 @@ static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
/* disable aspm and clock request before access ephy */
rtl_hw_aspm_clkreq_enable(tp, false);
- rtl_ephy_init(tp, e_info_8168ep_2, ARRAY_SIZE(e_info_8168ep_2));
+ rtl_ephy_init(tp, e_info_8168ep_2);
rtl_hw_start_8168ep(tp);
@@ -5430,7 +5333,7 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
/* disable aspm and clock request before access ephy */
rtl_hw_aspm_clkreq_enable(tp, false);
- rtl_ephy_init(tp, e_info_8168ep_3, ARRAY_SIZE(e_info_8168ep_3));
+ rtl_ephy_init(tp, e_info_8168ep_3);
rtl_hw_start_8168ep(tp);
@@ -5453,128 +5356,6 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
rtl_hw_aspm_clkreq_enable(tp, true);
}
-static void rtl_hw_start_8168(struct rtl8169_private *tp)
-{
- RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
-
- tp->cp_cmd &= ~INTT_MASK;
- tp->cp_cmd |= PktCntrDisable | INTT_1;
- RTL_W16(tp, CPlusCmd, tp->cp_cmd);
-
- RTL_W16(tp, IntrMitigate, 0x5100);
-
- /* Work around for RxFIFO overflow. */
- if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
- tp->irq_mask |= RxFIFOOver;
- tp->irq_mask &= ~RxOverflow;
- }
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_11:
- rtl_hw_start_8168bb(tp);
- break;
-
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_17:
- rtl_hw_start_8168bef(tp);
- break;
-
- case RTL_GIGA_MAC_VER_18:
- rtl_hw_start_8168cp_1(tp);
- break;
-
- case RTL_GIGA_MAC_VER_19:
- rtl_hw_start_8168c_1(tp);
- break;
-
- case RTL_GIGA_MAC_VER_20:
- rtl_hw_start_8168c_2(tp);
- break;
-
- case RTL_GIGA_MAC_VER_21:
- rtl_hw_start_8168c_3(tp);
- break;
-
- case RTL_GIGA_MAC_VER_22:
- rtl_hw_start_8168c_4(tp);
- break;
-
- case RTL_GIGA_MAC_VER_23:
- rtl_hw_start_8168cp_2(tp);
- break;
-
- case RTL_GIGA_MAC_VER_24:
- rtl_hw_start_8168cp_3(tp);
- break;
-
- case RTL_GIGA_MAC_VER_25:
- case RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_27:
- rtl_hw_start_8168d(tp);
- break;
-
- case RTL_GIGA_MAC_VER_28:
- rtl_hw_start_8168d_4(tp);
- break;
-
- case RTL_GIGA_MAC_VER_31:
- rtl_hw_start_8168dp(tp);
- break;
-
- case RTL_GIGA_MAC_VER_32:
- case RTL_GIGA_MAC_VER_33:
- rtl_hw_start_8168e_1(tp);
- break;
- case RTL_GIGA_MAC_VER_34:
- rtl_hw_start_8168e_2(tp);
- break;
-
- case RTL_GIGA_MAC_VER_35:
- case RTL_GIGA_MAC_VER_36:
- rtl_hw_start_8168f_1(tp);
- break;
-
- case RTL_GIGA_MAC_VER_38:
- rtl_hw_start_8411(tp);
- break;
-
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- rtl_hw_start_8168g_1(tp);
- break;
- case RTL_GIGA_MAC_VER_42:
- rtl_hw_start_8168g_2(tp);
- break;
-
- case RTL_GIGA_MAC_VER_44:
- rtl_hw_start_8411_2(tp);
- break;
-
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- rtl_hw_start_8168h_1(tp);
- break;
-
- case RTL_GIGA_MAC_VER_49:
- rtl_hw_start_8168ep_1(tp);
- break;
-
- case RTL_GIGA_MAC_VER_50:
- rtl_hw_start_8168ep_2(tp);
- break;
-
- case RTL_GIGA_MAC_VER_51:
- rtl_hw_start_8168ep_3(tp);
- break;
-
- default:
- netif_err(tp, drv, tp->dev,
- "unknown chipset (mac_version = %d)\n",
- tp->mac_version);
- break;
- }
-}
-
static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
{
static const struct ephy_info e_info_8102e_1[] = {
@@ -5603,7 +5384,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
RTL_W8(tp, Config1, cfg1 & ~LEDS0);
- rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
+ rtl_ephy_init(tp, e_info_8102e_1);
}
static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
@@ -5645,7 +5426,7 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
- rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
+ rtl_ephy_init(tp, e_info_8105e_1);
rtl_pcie_state_l2l3_disable(tp);
}
@@ -5670,17 +5451,15 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
- rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
+ rtl_ephy_init(tp, e_info_8402);
rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
- rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
+ rtl_set_fifo_size(tp, 0x00, 0x00, 0x02, 0x06);
+ rtl_reset_packet_filter(tp);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
+ rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00);
rtl_pcie_state_l2l3_disable(tp);
}
@@ -5700,6 +5479,73 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
rtl_hw_aspm_clkreq_enable(tp, true);
}
+static void rtl_hw_config(struct rtl8169_private *tp)
+{
+ static const rtl_generic_fct hw_configs[] = {
+ [RTL_GIGA_MAC_VER_07] = rtl_hw_start_8102e_1,
+ [RTL_GIGA_MAC_VER_08] = rtl_hw_start_8102e_3,
+ [RTL_GIGA_MAC_VER_09] = rtl_hw_start_8102e_2,
+ [RTL_GIGA_MAC_VER_10] = NULL,
+ [RTL_GIGA_MAC_VER_11] = rtl_hw_start_8168bb,
+ [RTL_GIGA_MAC_VER_12] = rtl_hw_start_8168bef,
+ [RTL_GIGA_MAC_VER_13] = NULL,
+ [RTL_GIGA_MAC_VER_14] = NULL,
+ [RTL_GIGA_MAC_VER_15] = NULL,
+ [RTL_GIGA_MAC_VER_16] = NULL,
+ [RTL_GIGA_MAC_VER_17] = rtl_hw_start_8168bef,
+ [RTL_GIGA_MAC_VER_18] = rtl_hw_start_8168cp_1,
+ [RTL_GIGA_MAC_VER_19] = rtl_hw_start_8168c_1,
+ [RTL_GIGA_MAC_VER_20] = rtl_hw_start_8168c_2,
+ [RTL_GIGA_MAC_VER_21] = rtl_hw_start_8168c_3,
+ [RTL_GIGA_MAC_VER_22] = rtl_hw_start_8168c_4,
+ [RTL_GIGA_MAC_VER_23] = rtl_hw_start_8168cp_2,
+ [RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3,
+ [RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d,
+ [RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d,
+ [RTL_GIGA_MAC_VER_27] = rtl_hw_start_8168d,
+ [RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4,
+ [RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1,
+ [RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2,
+ [RTL_GIGA_MAC_VER_31] = rtl_hw_start_8168dp,
+ [RTL_GIGA_MAC_VER_32] = rtl_hw_start_8168e_1,
+ [RTL_GIGA_MAC_VER_33] = rtl_hw_start_8168e_1,
+ [RTL_GIGA_MAC_VER_34] = rtl_hw_start_8168e_2,
+ [RTL_GIGA_MAC_VER_35] = rtl_hw_start_8168f_1,
+ [RTL_GIGA_MAC_VER_36] = rtl_hw_start_8168f_1,
+ [RTL_GIGA_MAC_VER_37] = rtl_hw_start_8402,
+ [RTL_GIGA_MAC_VER_38] = rtl_hw_start_8411,
+ [RTL_GIGA_MAC_VER_39] = rtl_hw_start_8106,
+ [RTL_GIGA_MAC_VER_40] = rtl_hw_start_8168g_1,
+ [RTL_GIGA_MAC_VER_41] = rtl_hw_start_8168g_1,
+ [RTL_GIGA_MAC_VER_42] = rtl_hw_start_8168g_2,
+ [RTL_GIGA_MAC_VER_43] = rtl_hw_start_8168g_2,
+ [RTL_GIGA_MAC_VER_44] = rtl_hw_start_8411_2,
+ [RTL_GIGA_MAC_VER_45] = rtl_hw_start_8168h_1,
+ [RTL_GIGA_MAC_VER_46] = rtl_hw_start_8168h_1,
+ [RTL_GIGA_MAC_VER_47] = rtl_hw_start_8168h_1,
+ [RTL_GIGA_MAC_VER_48] = rtl_hw_start_8168h_1,
+ [RTL_GIGA_MAC_VER_49] = rtl_hw_start_8168ep_1,
+ [RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2,
+ [RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
+ };
+
+ if (hw_configs[tp->mac_version])
+ hw_configs[tp->mac_version](tp);
+}
+
+static void rtl_hw_start_8168(struct rtl8169_private *tp)
+{
+ RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
+
+ /* Workaround for RxFIFO overflow. */
+ if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
+ tp->irq_mask |= RxFIFOOver;
+ tp->irq_mask &= ~RxOverflow;
+ }
+
+ rtl_hw_config(tp);
+}
+
static void rtl_hw_start_8101(struct rtl8169_private *tp)
{
if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
@@ -5715,43 +5561,7 @@ static void rtl_hw_start_8101(struct rtl8169_private *tp)
tp->cp_cmd &= CPCMD_QUIRK_MASK;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_07:
- rtl_hw_start_8102e_1(tp);
- break;
-
- case RTL_GIGA_MAC_VER_08:
- rtl_hw_start_8102e_3(tp);
- break;
-
- case RTL_GIGA_MAC_VER_09:
- rtl_hw_start_8102e_2(tp);
- break;
-
- case RTL_GIGA_MAC_VER_29:
- rtl_hw_start_8105e_1(tp);
- break;
- case RTL_GIGA_MAC_VER_30:
- rtl_hw_start_8105e_2(tp);
- break;
-
- case RTL_GIGA_MAC_VER_37:
- rtl_hw_start_8402(tp);
- break;
-
- case RTL_GIGA_MAC_VER_39:
- rtl_hw_start_8106(tp);
- break;
- case RTL_GIGA_MAC_VER_43:
- rtl_hw_start_8168g_2(tp);
- break;
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- rtl_hw_start_8168h_1(tp);
- break;
- }
-
- RTL_W16(tp, IntrMitigate, 0x0000);
+ rtl_hw_config(tp);
}
static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
@@ -6268,7 +6078,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
*/
smp_mb();
if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS))
- netif_wake_queue(dev);
+ netif_start_queue(dev);
}
return NETDEV_TX_OK;
@@ -6543,10 +6353,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
}
- if (status & (RTL_EVENT_NAPI | LinkChg)) {
- rtl_irq_disable(tp);
- napi_schedule_irqoff(&tp->napi);
- }
+ rtl_irq_disable(tp);
+ napi_schedule_irqoff(&tp->napi);
out:
rtl_ack_events(tp, status);
@@ -6645,8 +6453,7 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
if (!tp->supports_gmii)
phy_set_max_speed(phydev, SPEED_100);
- /* Ensure to advertise everything, incl. pause */
- linkmode_copy(phydev->advertising, phydev->supported);
+ phy_support_asym_pause(phydev);
phy_attached_info(phydev);
@@ -7123,13 +6930,13 @@ static void rtl_read_mac_address(struct rtl8169_private *tp,
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_35 ... RTL_GIGA_MAC_VER_38:
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
- value = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC);
+ value = rtl_eri_read(tp, 0xe0);
mac_addr[0] = (value >> 0) & 0xff;
mac_addr[1] = (value >> 8) & 0xff;
mac_addr[2] = (value >> 16) & 0xff;
mac_addr[3] = (value >> 24) & 0xff;
- value = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC);
+ value = rtl_eri_read(tp, 0xe4);
mac_addr[4] = (value >> 0) & 0xff;
mac_addr[5] = (value >> 8) & 0xff;
break;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 316b47741d3f..ef8f08931fe8 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -111,7 +111,7 @@ static void ravb_set_buffer_align(struct sk_buff *skb)
*/
static void ravb_read_mac_address(struct net_device *ndev, const u8 *mac)
{
- if (mac) {
+ if (!IS_ERR(mac)) {
ether_addr_copy(ndev->dev_addr, mac);
} else {
u32 mahr = ravb_read(ndev, MAHR);
@@ -1607,8 +1607,7 @@ drop:
}
static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
/* If skb needs TX timestamp, it is handled in network control queue */
return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
@@ -1961,6 +1960,13 @@ static void ravb_set_config_mode(struct net_device *ndev)
}
}
+static const struct soc_device_attribute ravb_delay_mode_quirk_match[] = {
+ { .soc_id = "r8a774c0" },
+ { .soc_id = "r8a77990" },
+ { .soc_id = "r8a77995" },
+ { /* sentinel */ }
+};
+
/* Set tx and rx clock internal delay modes */
static void ravb_set_delay_mode(struct net_device *ndev)
{
@@ -1972,8 +1978,12 @@ static void ravb_set_delay_mode(struct net_device *ndev)
set |= APSR_DM_RDM;
if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
- priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
- set |= APSR_DM_TDM;
+ priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ if (!WARN(soc_device_match(ravb_delay_mode_quirk_match),
+ "phy-mode %s requires TX clock internal delay mode which is not supported by this hardware revision. Please update device tree",
+ phy_modes(priv->phy_interface)))
+ set |= APSR_DM_TDM;
+ }
ravb_modify(ndev, APSR, APSR_DM, set);
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index ed30aebdb941..7c4e282242d5 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3192,7 +3192,7 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
pdata->phy_interface = ret;
mac_addr = of_get_mac_address(np);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
pdata->no_ether_link =
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index a71c900ca04f..7ae6c124bfe9 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2207,6 +2207,15 @@ static int rocker_router_fib_event(struct notifier_block *nb,
switch (event) {
case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
+ if (info->family == AF_INET) {
+ struct fib_entry_notifier_info *fen_info = ptr;
+
+ if (fen_info->fi->fib_nh_is_v6) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
+ return notifier_from_errno(-EINVAL);
+ }
+ }
+
memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
/* Take referece on fib_info to prevent it from being
* freed while work is queued. Release it afterwards.
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index fa296a7c255d..30a49802fb51 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -2288,11 +2288,11 @@ static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port, __be32 dst,
nh = fi->fib_nh;
nh_on_port = (fi->fib_dev == ofdpa_port->dev);
- has_gw = !!nh->nh_gw;
+ has_gw = !!nh->fib_nh_gw4;
if (has_gw && nh_on_port) {
err = ofdpa_port_ipv4_nh(ofdpa_port, flags,
- nh->nh_gw, &index);
+ nh->fib_nh_gw4, &index);
if (err)
return err;
@@ -2749,7 +2749,7 @@ static int ofdpa_fib4_add(struct rocker *rocker,
fen_info->tb_id, 0);
if (err)
return err;
- fen_info->fi->fib_nh->nh_flags |= RTNH_F_OFFLOAD;
+ fen_info->fi->fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
return 0;
}
@@ -2764,7 +2764,7 @@ static int ofdpa_fib4_del(struct rocker *rocker,
ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
if (!ofdpa_port)
return 0;
- fen_info->fi->fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
+ fen_info->fi->fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
return ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
fen_info->dst_len, fen_info->fi,
fen_info->tb_id, OFDPA_OP_FLAG_REMOVE);
@@ -2791,7 +2791,7 @@ static void ofdpa_fib4_abort(struct rocker *rocker)
rocker);
if (!ofdpa_port)
continue;
- flow_entry->fi->fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
+ flow_entry->fi->fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
ofdpa_flow_tbl_del(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
flow_entry);
}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index fbd00cb0cb7d..d2bc9412ba03 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -124,7 +124,7 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
}
/* Get MAC address if available (DT) */
- if (mac)
+ if (!IS_ERR_OR_NULL(mac))
ether_addr_copy(priv->dev->dev_addr, mac);
/* Get the TX/RX IRQ numbers */
diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c
index 3409bbf5b19f..c5059f456f37 100644
--- a/drivers/net/ethernet/sfc/falcon/tx.c
+++ b/drivers/net/ethernet/sfc/falcon/tx.c
@@ -321,7 +321,7 @@ netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
/* Pass off to hardware */
- if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
+ if (!netdev_xmit_more() || netif_xmit_stopped(tx_queue->core_txq)) {
struct ef4_tx_queue *txq2 = ef4_tx_queue_partner(tx_queue);
/* There could be packets left on the partner queue if those
@@ -333,7 +333,7 @@ netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
ef4_nic_push_buffers(tx_queue);
} else {
- tx_queue->xmit_more_available = skb->xmit_more;
+ tx_queue->xmit_more_available = netdev_xmit_more();
}
tx_queue->tx_packets++;
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 9382bb0b4d5a..a4bbfebe3d64 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -342,6 +342,7 @@ static void efx_mcdi_phy_decode_link(struct efx_nic *efx,
break;
default:
WARN_ON(1);
+ /* Fall through */
case MC_CMD_FCNTL_OFF:
link_state->fc = 0;
break;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 06c8f282263f..e182055ec2eb 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -478,8 +478,6 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
next = skb->next;
skb->next = NULL;
- if (next)
- skb->xmit_more = true;
efx_enqueue_skb(tx_queue, skb);
skb = next;
}
@@ -506,7 +504,7 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
unsigned int old_insert_count = tx_queue->insert_count;
- bool xmit_more = skb->xmit_more;
+ bool xmit_more = netdev_xmit_more();
bool data_mapped = false;
unsigned int segments;
unsigned int skb_len;
@@ -533,7 +531,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
if (rc)
goto err;
#ifdef EFX_USE_PIO
- } else if (skb_len <= efx_piobuf_size && !skb->xmit_more &&
+ } else if (skb_len <= efx_piobuf_size && !xmit_more &&
efx_nic_may_tx_pio(tx_queue)) {
/* Use PIO for short packets with an empty queue. */
if (efx_enqueue_skb_pio(tx_queue, skb))
@@ -559,8 +557,8 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) {
struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
- /* There could be packets left on the partner queue if those
- * SKBs had skb->xmit_more set. If we do not push those they
+ /* There could be packets left on the partner queue if
+ * xmit_more was set. If we do not push those they
* could be left for a long time and cause a netdev watchdog.
*/
if (txq2->xmit_more_available)
@@ -568,7 +566,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
efx_nic_push_buffers(tx_queue);
} else {
- tx_queue->xmit_more_available = skb->xmit_more;
+ tx_queue->xmit_more_available = xmit_more;
}
if (segments) {
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index bb6d5fb73035..51a7b48db4bc 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1599,7 +1599,7 @@ static int ave_probe(struct platform_device *pdev)
ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN);
mac_addr = of_get_mac_address(np);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
ether_addr_copy(ndev->dev_addr, mac_addr);
/* if the mac address is invalid, use random mac address */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 37d5e6fe7473..085b700a4994 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -143,6 +143,11 @@
#define XGMAC_RSF BIT(5)
#define XGMAC_RTC GENMASK(1, 0)
#define XGMAC_RTC_SHIFT 0
+#define XGMAC_MTL_RXQ_FLOW_CONTROL(x) (0x00001150 + (0x80 * (x)))
+#define XGMAC_RFD GENMASK(31, 17)
+#define XGMAC_RFD_SHIFT 17
+#define XGMAC_RFA GENMASK(15, 1)
+#define XGMAC_RFA_SHIFT 1
#define XGMAC_MTL_QINTEN(x) (0x00001170 + (0x80 * (x)))
#define XGMAC_RXOIE BIT(16)
#define XGMAC_MTL_QINT_STATUS(x) (0x00001174 + (0x80 * (x)))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 2ba712b48a89..e79037f511e1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -147,6 +147,52 @@ static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode,
value &= ~XGMAC_RQS;
value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS;
+ if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) {
+ u32 flow = readl(ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
+ unsigned int rfd, rfa;
+
+ value |= XGMAC_EHFC;
+
+ /* Set Threshold for Activating Flow Control to min 2 frames,
+ * i.e. 1500 * 2 = 3000 bytes.
+ *
+ * Set Threshold for Deactivating Flow Control to min 1 frame,
+ * i.e. 1500 bytes.
+ */
+ switch (fifosz) {
+ case 4096:
+ /* This violates the above formula because of FIFO size
+ * limit therefore overflow may occur in spite of this.
+ */
+ rfd = 0x03; /* Full-2.5K */
+ rfa = 0x01; /* Full-1.5K */
+ break;
+
+ case 8192:
+ rfd = 0x06; /* Full-4K */
+ rfa = 0x0a; /* Full-6K */
+ break;
+
+ case 16384:
+ rfd = 0x06; /* Full-4K */
+ rfa = 0x12; /* Full-10K */
+ break;
+
+ default:
+ rfd = 0x06; /* Full-4K */
+ rfa = 0x1e; /* Full-16K */
+ break;
+ }
+
+ flow &= ~XGMAC_RFD;
+ flow |= rfd << XGMAC_RFD_SHIFT;
+
+ flow &= ~XGMAC_RFA;
+ flow |= rfa << XGMAC_RFA_SHIFT;
+
+ writel(flow, ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
+ }
+
writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
/* Enable MTL RX overflow */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 48712437d0da..5678b869cbff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -74,7 +74,7 @@ MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
-static int flow_ctrl = FLOW_OFF;
+static int flow_ctrl = FLOW_AUTO;
module_param(flow_ctrl, int, 0644);
MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
@@ -4262,7 +4262,7 @@ int stmmac_dvr_probe(struct device *device,
priv->wol_irq = res->wol_irq;
priv->lpi_irq = res->lpi_irq;
- if (res->mac)
+ if (!IS_ERR_OR_NULL(res->mac))
memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
dev_set_drvdata(device, priv->dev);
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 644e42c181ee..01ea0d6f8819 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -101,8 +101,7 @@ static struct vnet_port *vsw_tx_port_find(struct sk_buff *skb,
}
static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct vnet_port *port = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 590172818b92..96b883f965f6 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -234,8 +234,7 @@ static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb,
}
static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct vnet *vp = netdev_priv(dev);
struct vnet_port *port = __tx_port_find(vp, skb);
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
index 99d86e39ff54..bf6c1c6779ff 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
@@ -995,7 +995,7 @@ static void xlgmac_dev_xmit(struct xlgmac_channel *channel)
smp_wmb();
ring->cur = cur_index + 1;
- if (!pkt_info->skb->xmit_more ||
+ if (!netdev_xmit_more() ||
netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
channel->queue_index)))
xlgmac_tx_start_xmit(channel, ring);
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 8b21b40a9fe5..afbdc9744230 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -20,7 +20,6 @@ config TI_DAVINCI_EMAC
tristate "TI DaVinci EMAC Support"
depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) || COMPILE_TEST
select TI_DAVINCI_MDIO
- select TI_DAVINCI_CPDMA
select PHYLIB
---help---
This driver supports TI's DaVinci Ethernet .
@@ -38,16 +37,6 @@ config TI_DAVINCI_MDIO
To compile this driver as a module, choose M here: the module
will be called davinci_mdio. This is recommended.
-config TI_DAVINCI_CPDMA
- tristate "TI DaVinci CPDMA Support"
- depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
- select GENERIC_ALLOCATOR
- ---help---
- This driver supports TI's DaVinci CPDMA dma engine.
-
- To compile this driver as a module, choose M here: the module
- will be called davinci_cpdma. This is recommended.
-
config TI_CPSW_PHY_SEL
bool "TI CPSW Phy mode Selection (DEPRECATED)"
default n
@@ -55,17 +44,10 @@ config TI_CPSW_PHY_SEL
This driver supports configuring of the phy mode connected to
the CPSW. DEPRECATED: use PHY_TI_GMII_SEL.
-config TI_CPSW_ALE
- tristate "TI CPSW ALE Support"
- ---help---
- This driver supports TI's CPSW ALE module.
-
config TI_CPSW
tristate "TI CPSW Switch Support"
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
- select TI_DAVINCI_CPDMA
select TI_DAVINCI_MDIO
- select TI_CPSW_ALE
select MFD_SYSCON
select REGMAP
---help---
@@ -94,7 +76,6 @@ config TI_CPTS_MOD
config TI_KEYSTONE_NETCP
tristate "TI Keystone NETCP Core Support"
- select TI_CPSW_ALE
select TI_DAVINCI_MDIO
depends on OF
depends on KEYSTONE_NAVIGATOR_DMA && KEYSTONE_NAVIGATOR_QMSS
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 0be551de821c..c3f53a40b48f 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -8,16 +8,15 @@ obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o
obj-$(CONFIG_TLAN) += tlan.o
obj-$(CONFIG_CPMAC) += cpmac.o
-obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
+obj-$(CONFIG_TI_DAVINCI_EMAC) += ti_davinci_emac.o
+ti_davinci_emac-y := davinci_emac.o davinci_cpdma.o
obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
-obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o
obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
-obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o
obj-$(CONFIG_TI_CPTS_MOD) += cpts.o
obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
-ti_cpsw-y := cpsw.o
+ti_cpsw-y := cpsw.o davinci_cpdma.o cpsw_ale.o cpsw_priv.o cpsw_sl.o cpsw_ethtool.o
obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o
-keystone_netcp-y := netcp_core.o
+keystone_netcp-y := netcp_core.o cpsw_ale.o
obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o
keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index e2d47b24a869..3a655a4dc10e 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1,19 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2006, 2007 Eugene Konev
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/module.h>
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
index 38d1cc557c11..bfa81bbfce3f 100644
--- a/drivers/net/ethernet/ti/cpsw-common.c
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -1,14 +1,4 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index fec275e2208d..48e0924259f5 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -1,17 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/* Texas Instruments Ethernet Switch Driver
*
* Copyright (C) 2013 Texas Instruments
*
* Module Author: Mugunthan V N <mugunthanvnm@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index a591583d120e..b18eeb05b993 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1,16 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Texas Instruments Ethernet Switch Driver
*
* Copyright (C) 2012 Texas Instruments
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
@@ -44,138 +37,13 @@
#include "cpsw.h"
#include "cpsw_ale.h"
+#include "cpsw_priv.h"
+#include "cpsw_sl.h"
#include "cpts.h"
#include "davinci_cpdma.h"
#include <net/pkt_sched.h>
-#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
- NETIF_MSG_DRV | NETIF_MSG_LINK | \
- NETIF_MSG_IFUP | NETIF_MSG_INTR | \
- NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
- NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
- NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
- NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
- NETIF_MSG_RX_STATUS)
-
-#define cpsw_info(priv, type, format, ...) \
-do { \
- if (netif_msg_##type(priv) && net_ratelimit()) \
- dev_info(priv->dev, format, ## __VA_ARGS__); \
-} while (0)
-
-#define cpsw_err(priv, type, format, ...) \
-do { \
- if (netif_msg_##type(priv) && net_ratelimit()) \
- dev_err(priv->dev, format, ## __VA_ARGS__); \
-} while (0)
-
-#define cpsw_dbg(priv, type, format, ...) \
-do { \
- if (netif_msg_##type(priv) && net_ratelimit()) \
- dev_dbg(priv->dev, format, ## __VA_ARGS__); \
-} while (0)
-
-#define cpsw_notice(priv, type, format, ...) \
-do { \
- if (netif_msg_##type(priv) && net_ratelimit()) \
- dev_notice(priv->dev, format, ## __VA_ARGS__); \
-} while (0)
-
-#define ALE_ALL_PORTS 0x7
-
-#define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
-#define CPSW_MINOR_VERSION(reg) (reg & 0xff)
-#define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
-
-#define CPSW_VERSION_1 0x19010a
-#define CPSW_VERSION_2 0x19010c
-#define CPSW_VERSION_3 0x19010f
-#define CPSW_VERSION_4 0x190112
-
-#define HOST_PORT_NUM 0
-#define CPSW_ALE_PORTS_NUM 3
-#define SLIVER_SIZE 0x40
-
-#define CPSW1_HOST_PORT_OFFSET 0x028
-#define CPSW1_SLAVE_OFFSET 0x050
-#define CPSW1_SLAVE_SIZE 0x040
-#define CPSW1_CPDMA_OFFSET 0x100
-#define CPSW1_STATERAM_OFFSET 0x200
-#define CPSW1_HW_STATS 0x400
-#define CPSW1_CPTS_OFFSET 0x500
-#define CPSW1_ALE_OFFSET 0x600
-#define CPSW1_SLIVER_OFFSET 0x700
-
-#define CPSW2_HOST_PORT_OFFSET 0x108
-#define CPSW2_SLAVE_OFFSET 0x200
-#define CPSW2_SLAVE_SIZE 0x100
-#define CPSW2_CPDMA_OFFSET 0x800
-#define CPSW2_HW_STATS 0x900
-#define CPSW2_STATERAM_OFFSET 0xa00
-#define CPSW2_CPTS_OFFSET 0xc00
-#define CPSW2_ALE_OFFSET 0xd00
-#define CPSW2_SLIVER_OFFSET 0xd80
-#define CPSW2_BD_OFFSET 0x2000
-
-#define CPDMA_RXTHRESH 0x0c0
-#define CPDMA_RXFREE 0x0e0
-#define CPDMA_TXHDP 0x00
-#define CPDMA_RXHDP 0x20
-#define CPDMA_TXCP 0x40
-#define CPDMA_RXCP 0x60
-
-#define CPSW_POLL_WEIGHT 64
-#define CPSW_RX_VLAN_ENCAP_HDR_SIZE 4
-#define CPSW_MIN_PACKET_SIZE (VLAN_ETH_ZLEN)
-#define CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN +\
- ETH_FCS_LEN +\
- CPSW_RX_VLAN_ENCAP_HDR_SIZE)
-
-#define RX_PRIORITY_MAPPING 0x76543210
-#define TX_PRIORITY_MAPPING 0x33221100
-#define CPDMA_TX_PRIORITY_MAP 0x76543210
-
-#define CPSW_VLAN_AWARE BIT(1)
-#define CPSW_RX_VLAN_ENCAP BIT(2)
-#define CPSW_ALE_VLAN_AWARE 1
-
-#define CPSW_FIFO_NORMAL_MODE (0 << 16)
-#define CPSW_FIFO_DUAL_MAC_MODE (1 << 16)
-#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 16)
-
-#define CPSW_INTPACEEN (0x3f << 16)
-#define CPSW_INTPRESCALE_MASK (0x7FF << 0)
-#define CPSW_CMINTMAX_CNT 63
-#define CPSW_CMINTMIN_CNT 2
-#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
-#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
-
-#define cpsw_slave_index(cpsw, priv) \
- ((cpsw->data.dual_emac) ? priv->emac_port : \
- cpsw->data.active_slave)
-#define IRQ_NUM 2
-#define CPSW_MAX_QUEUES 8
-#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
-#define CPSW_FIFO_QUEUE_TYPE_SHIFT 16
-#define CPSW_FIFO_SHAPE_EN_SHIFT 16
-#define CPSW_FIFO_RATE_EN_SHIFT 20
-#define CPSW_TC_NUM 4
-#define CPSW_FIFO_SHAPERS_NUM (CPSW_TC_NUM - 1)
-#define CPSW_PCT_MASK 0x7f
-
-#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29
-#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0)
-#define CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT 16
-#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT 8
-#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK GENMASK(1, 0)
-enum {
- CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG = 0,
- CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV,
- CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG,
- CPSW_RX_VLAN_ENCAP_HDR_PKT_UNTAG,
-};
-
static int debug_level;
module_param(debug_level, int, 0);
MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
@@ -192,369 +60,6 @@ static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
module_param(descs_pool_size, int, 0444);
MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
-struct cpsw_wr_regs {
- u32 id_ver;
- u32 soft_reset;
- u32 control;
- u32 int_control;
- u32 rx_thresh_en;
- u32 rx_en;
- u32 tx_en;
- u32 misc_en;
- u32 mem_allign1[8];
- u32 rx_thresh_stat;
- u32 rx_stat;
- u32 tx_stat;
- u32 misc_stat;
- u32 mem_allign2[8];
- u32 rx_imax;
- u32 tx_imax;
-
-};
-
-struct cpsw_ss_regs {
- u32 id_ver;
- u32 control;
- u32 soft_reset;
- u32 stat_port_en;
- u32 ptype;
- u32 soft_idle;
- u32 thru_rate;
- u32 gap_thresh;
- u32 tx_start_wds;
- u32 flow_control;
- u32 vlan_ltype;
- u32 ts_ltype;
- u32 dlr_ltype;
-};
-
-/* CPSW_PORT_V1 */
-#define CPSW1_MAX_BLKS 0x00 /* Maximum FIFO Blocks */
-#define CPSW1_BLK_CNT 0x04 /* FIFO Block Usage Count (Read Only) */
-#define CPSW1_TX_IN_CTL 0x08 /* Transmit FIFO Control */
-#define CPSW1_PORT_VLAN 0x0c /* VLAN Register */
-#define CPSW1_TX_PRI_MAP 0x10 /* Tx Header Priority to Switch Pri Mapping */
-#define CPSW1_TS_CTL 0x14 /* Time Sync Control */
-#define CPSW1_TS_SEQ_LTYPE 0x18 /* Time Sync Sequence ID Offset and Msg Type */
-#define CPSW1_TS_VLAN 0x1c /* Time Sync VLAN1 and VLAN2 */
-
-/* CPSW_PORT_V2 */
-#define CPSW2_CONTROL 0x00 /* Control Register */
-#define CPSW2_MAX_BLKS 0x08 /* Maximum FIFO Blocks */
-#define CPSW2_BLK_CNT 0x0c /* FIFO Block Usage Count (Read Only) */
-#define CPSW2_TX_IN_CTL 0x10 /* Transmit FIFO Control */
-#define CPSW2_PORT_VLAN 0x14 /* VLAN Register */
-#define CPSW2_TX_PRI_MAP 0x18 /* Tx Header Priority to Switch Pri Mapping */
-#define CPSW2_TS_SEQ_MTYPE 0x1c /* Time Sync Sequence ID Offset and Msg Type */
-
-/* CPSW_PORT_V1 and V2 */
-#define SA_LO 0x20 /* CPGMAC_SL Source Address Low */
-#define SA_HI 0x24 /* CPGMAC_SL Source Address High */
-#define SEND_PERCENT 0x28 /* Transmit Queue Send Percentages */
-
-/* CPSW_PORT_V2 only */
-#define RX_DSCP_PRI_MAP0 0x30 /* Rx DSCP Priority to Rx Packet Mapping */
-#define RX_DSCP_PRI_MAP1 0x34 /* Rx DSCP Priority to Rx Packet Mapping */
-#define RX_DSCP_PRI_MAP2 0x38 /* Rx DSCP Priority to Rx Packet Mapping */
-#define RX_DSCP_PRI_MAP3 0x3c /* Rx DSCP Priority to Rx Packet Mapping */
-#define RX_DSCP_PRI_MAP4 0x40 /* Rx DSCP Priority to Rx Packet Mapping */
-#define RX_DSCP_PRI_MAP5 0x44 /* Rx DSCP Priority to Rx Packet Mapping */
-#define RX_DSCP_PRI_MAP6 0x48 /* Rx DSCP Priority to Rx Packet Mapping */
-#define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */
-
-/* Bit definitions for the CPSW2_CONTROL register */
-#define PASS_PRI_TAGGED BIT(24) /* Pass Priority Tagged */
-#define VLAN_LTYPE2_EN BIT(21) /* VLAN LTYPE 2 enable */
-#define VLAN_LTYPE1_EN BIT(20) /* VLAN LTYPE 1 enable */
-#define DSCP_PRI_EN BIT(16) /* DSCP Priority Enable */
-#define TS_107 BIT(15) /* Tyme Sync Dest IP Address 107 */
-#define TS_320 BIT(14) /* Time Sync Dest Port 320 enable */
-#define TS_319 BIT(13) /* Time Sync Dest Port 319 enable */
-#define TS_132 BIT(12) /* Time Sync Dest IP Addr 132 enable */
-#define TS_131 BIT(11) /* Time Sync Dest IP Addr 131 enable */
-#define TS_130 BIT(10) /* Time Sync Dest IP Addr 130 enable */
-#define TS_129 BIT(9) /* Time Sync Dest IP Addr 129 enable */
-#define TS_TTL_NONZERO BIT(8) /* Time Sync Time To Live Non-zero enable */
-#define TS_ANNEX_F_EN BIT(6) /* Time Sync Annex F enable */
-#define TS_ANNEX_D_EN BIT(4) /* Time Sync Annex D enable */
-#define TS_LTYPE2_EN BIT(3) /* Time Sync LTYPE 2 enable */
-#define TS_LTYPE1_EN BIT(2) /* Time Sync LTYPE 1 enable */
-#define TS_TX_EN BIT(1) /* Time Sync Transmit Enable */
-#define TS_RX_EN BIT(0) /* Time Sync Receive Enable */
-
-#define CTRL_V2_TS_BITS \
- (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
- TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN | VLAN_LTYPE1_EN)
-
-#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
-#define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN)
-#define CTRL_V2_RX_TS_BITS (CTRL_V2_TS_BITS | TS_RX_EN)
-
-
-#define CTRL_V3_TS_BITS \
- (TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
- TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
- TS_LTYPE1_EN | VLAN_LTYPE1_EN)
-
-#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
-#define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN)
-#define CTRL_V3_RX_TS_BITS (CTRL_V3_TS_BITS | TS_RX_EN)
-
-/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
-#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */
-#define TS_SEQ_ID_OFFSET_MASK (0x3f)
-#define TS_MSG_TYPE_EN_SHIFT (0) /* Time Sync Message Type Enable */
-#define TS_MSG_TYPE_EN_MASK (0xffff)
-
-/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
-#define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
-
-/* Bit definitions for the CPSW1_TS_CTL register */
-#define CPSW_V1_TS_RX_EN BIT(0)
-#define CPSW_V1_TS_TX_EN BIT(4)
-#define CPSW_V1_MSG_TYPE_OFS 16
-
-/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
-#define CPSW_V1_SEQ_ID_OFS_SHIFT 16
-
-#define CPSW_MAX_BLKS_TX 15
-#define CPSW_MAX_BLKS_TX_SHIFT 4
-#define CPSW_MAX_BLKS_RX 5
-
-struct cpsw_host_regs {
- u32 max_blks;
- u32 blk_cnt;
- u32 tx_in_ctl;
- u32 port_vlan;
- u32 tx_pri_map;
- u32 cpdma_tx_pri_map;
- u32 cpdma_rx_chan_map;
-};
-
-struct cpsw_sliver_regs {
- u32 id_ver;
- u32 mac_control;
- u32 mac_status;
- u32 soft_reset;
- u32 rx_maxlen;
- u32 __reserved_0;
- u32 rx_pause;
- u32 tx_pause;
- u32 __reserved_1;
- u32 rx_pri_map;
-};
-
-struct cpsw_hw_stats {
- u32 rxgoodframes;
- u32 rxbroadcastframes;
- u32 rxmulticastframes;
- u32 rxpauseframes;
- u32 rxcrcerrors;
- u32 rxaligncodeerrors;
- u32 rxoversizedframes;
- u32 rxjabberframes;
- u32 rxundersizedframes;
- u32 rxfragments;
- u32 __pad_0[2];
- u32 rxoctets;
- u32 txgoodframes;
- u32 txbroadcastframes;
- u32 txmulticastframes;
- u32 txpauseframes;
- u32 txdeferredframes;
- u32 txcollisionframes;
- u32 txsinglecollframes;
- u32 txmultcollframes;
- u32 txexcessivecollisions;
- u32 txlatecollisions;
- u32 txunderrun;
- u32 txcarriersenseerrors;
- u32 txoctets;
- u32 octetframes64;
- u32 octetframes65t127;
- u32 octetframes128t255;
- u32 octetframes256t511;
- u32 octetframes512t1023;
- u32 octetframes1024tup;
- u32 netoctets;
- u32 rxsofoverruns;
- u32 rxmofoverruns;
- u32 rxdmaoverruns;
-};
-
-struct cpsw_slave_data {
- struct device_node *phy_node;
- char phy_id[MII_BUS_ID_SIZE];
- int phy_if;
- u8 mac_addr[ETH_ALEN];
- u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
- struct phy *ifphy;
-};
-
-struct cpsw_platform_data {
- struct cpsw_slave_data *slave_data;
- u32 ss_reg_ofs; /* Subsystem control register offset */
- u32 channels; /* number of cpdma channels (symmetric) */
- u32 slaves; /* number of slave cpgmac ports */
- u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
- u32 ale_entries; /* ale table size */
- u32 bd_ram_size; /*buffer descriptor ram size */
- u32 mac_control; /* Mac control register */
- u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
- bool dual_emac; /* Enable Dual EMAC mode */
-};
-
-struct cpsw_slave {
- void __iomem *regs;
- struct cpsw_sliver_regs __iomem *sliver;
- int slave_num;
- u32 mac_control;
- struct cpsw_slave_data *data;
- struct phy_device *phy;
- struct net_device *ndev;
- u32 port_vlan;
-};
-
-static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
-{
- return readl_relaxed(slave->regs + offset);
-}
-
-static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
-{
- writel_relaxed(val, slave->regs + offset);
-}
-
-struct cpsw_vector {
- struct cpdma_chan *ch;
- int budget;
-};
-
-struct cpsw_common {
- struct device *dev;
- struct cpsw_platform_data data;
- struct napi_struct napi_rx;
- struct napi_struct napi_tx;
- struct cpsw_ss_regs __iomem *regs;
- struct cpsw_wr_regs __iomem *wr_regs;
- u8 __iomem *hw_stats;
- struct cpsw_host_regs __iomem *host_port_regs;
- u32 version;
- u32 coal_intvl;
- u32 bus_freq_mhz;
- int rx_packet_max;
- struct cpsw_slave *slaves;
- struct cpdma_ctlr *dma;
- struct cpsw_vector txv[CPSW_MAX_QUEUES];
- struct cpsw_vector rxv[CPSW_MAX_QUEUES];
- struct cpsw_ale *ale;
- bool quirk_irq;
- bool rx_irq_disabled;
- bool tx_irq_disabled;
- u32 irqs_table[IRQ_NUM];
- struct cpts *cpts;
- int rx_ch_num, tx_ch_num;
- int speed;
- int usage_count;
-};
-
-struct cpsw_priv {
- struct net_device *ndev;
- struct device *dev;
- u32 msg_enable;
- u8 mac_addr[ETH_ALEN];
- bool rx_pause;
- bool tx_pause;
- bool mqprio_hw;
- int fifo_bw[CPSW_TC_NUM];
- int shp_cfg_speed;
- int tx_ts_enabled;
- int rx_ts_enabled;
- u32 emac_port;
- struct cpsw_common *cpsw;
-};
-
-struct cpsw_stats {
- char stat_string[ETH_GSTRING_LEN];
- int type;
- int sizeof_stat;
- int stat_offset;
-};
-
-enum {
- CPSW_STATS,
- CPDMA_RX_STATS,
- CPDMA_TX_STATS,
-};
-
-#define CPSW_STAT(m) CPSW_STATS, \
- FIELD_SIZEOF(struct cpsw_hw_stats, m), \
- offsetof(struct cpsw_hw_stats, m)
-#define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \
- FIELD_SIZEOF(struct cpdma_chan_stats, m), \
- offsetof(struct cpdma_chan_stats, m)
-#define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \
- FIELD_SIZEOF(struct cpdma_chan_stats, m), \
- offsetof(struct cpdma_chan_stats, m)
-
-static const struct cpsw_stats cpsw_gstrings_stats[] = {
- { "Good Rx Frames", CPSW_STAT(rxgoodframes) },
- { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
- { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
- { "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
- { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
- { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
- { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
- { "Rx Jabbers", CPSW_STAT(rxjabberframes) },
- { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
- { "Rx Fragments", CPSW_STAT(rxfragments) },
- { "Rx Octets", CPSW_STAT(rxoctets) },
- { "Good Tx Frames", CPSW_STAT(txgoodframes) },
- { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
- { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
- { "Pause Tx Frames", CPSW_STAT(txpauseframes) },
- { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
- { "Collisions", CPSW_STAT(txcollisionframes) },
- { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
- { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
- { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
- { "Late Collisions", CPSW_STAT(txlatecollisions) },
- { "Tx Underrun", CPSW_STAT(txunderrun) },
- { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
- { "Tx Octets", CPSW_STAT(txoctets) },
- { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
- { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
- { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
- { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
- { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
- { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
- { "Net Octets", CPSW_STAT(netoctets) },
- { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
- { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
- { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
-};
-
-static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
- { "head_enqueue", CPDMA_RX_STAT(head_enqueue) },
- { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
- { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
- { "misqueued", CPDMA_RX_STAT(misqueued) },
- { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
- { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
- { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
- { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
- { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
- { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
- { "good_dequeue", CPDMA_RX_STAT(good_dequeue) },
- { "requeue", CPDMA_RX_STAT(requeue) },
- { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
-};
-
-#define CPSW_STATS_COMMON_LEN ARRAY_SIZE(cpsw_gstrings_stats)
-#define CPSW_STATS_CH_LEN ARRAY_SIZE(cpsw_gstrings_ch_stats)
-
-#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
-#define napi_to_cpsw(napi) container_of(napi, struct cpsw_common, napi)
#define for_each_slave(priv, func, arg...) \
do { \
struct cpsw_slave *slave; \
@@ -572,11 +77,6 @@ static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
__be16 proto, u16 vid);
-static inline int cpsw_get_slave_port(u32 slave_num)
-{
- return slave_num + 1;
-}
-
static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
{
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
@@ -653,13 +153,6 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
}
}
-struct addr_sync_ctx {
- struct net_device *ndev;
- const u8 *addr; /* address to be synched */
- int consumed; /* number of address instances */
- int flush; /* flush flag */
-};
-
/**
* cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
* if it's not deleted
@@ -800,12 +293,17 @@ static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
{
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_port = -1;
+
+ if (cpsw->data.dual_emac)
+ slave_port = priv->emac_port + 1;
if (ndev->flags & IFF_PROMISC) {
/* Enable promiscuous mode */
cpsw_set_promiscious(ndev, true);
- cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI);
+ cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, slave_port);
return;
} else {
/* Disable promiscuous mode */
@@ -813,14 +311,15 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
}
/* Restore allmulti on vlans if necessary */
- cpsw_ale_set_allmulti(cpsw->ale, ndev->flags & IFF_ALLMULTI);
+ cpsw_ale_set_allmulti(cpsw->ale,
+ ndev->flags & IFF_ALLMULTI, slave_port);
/* add/remove mcast address either for real netdev or for vlan */
__hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
cpsw_del_mc_addr);
}
-static void cpsw_intr_enable(struct cpsw_common *cpsw)
+void cpsw_intr_enable(struct cpsw_common *cpsw)
{
writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
@@ -829,7 +328,7 @@ static void cpsw_intr_enable(struct cpsw_common *cpsw)
return;
}
-static void cpsw_intr_disable(struct cpsw_common *cpsw)
+void cpsw_intr_disable(struct cpsw_common *cpsw)
{
writel_relaxed(0, &cpsw->wr_regs->tx_en);
writel_relaxed(0, &cpsw->wr_regs->rx_en);
@@ -838,7 +337,7 @@ static void cpsw_intr_disable(struct cpsw_common *cpsw)
return;
}
-static void cpsw_tx_handler(void *token, int len, int status)
+void cpsw_tx_handler(void *token, int len, int status)
{
struct netdev_queue *txq;
struct sk_buff *skb = token;
@@ -970,11 +469,9 @@ requeue:
dev_kfree_skb_any(new_skb);
}
-static void cpsw_split_res(struct net_device *ndev)
+void cpsw_split_res(struct cpsw_common *cpsw)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
u32 consumed_rate = 0, bigest_rate = 0;
- struct cpsw_common *cpsw = priv->cpsw;
struct cpsw_vector *txv = cpsw->txv;
int i, ch_weight, rlim_ch_num = 0;
int budget, bigest_rate_ch = 0;
@@ -1254,29 +751,32 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
slave_port = cpsw_get_slave_port(slave->slave_num);
if (phy->link) {
- mac_control = cpsw->data.mac_control;
-
- /* enable forwarding */
- cpsw_ale_control_set(cpsw->ale, slave_port,
- ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+ mac_control = CPSW_SL_CTL_GMII_EN;
if (phy->speed == 1000)
- mac_control |= BIT(7); /* GIGABITEN */
+ mac_control |= CPSW_SL_CTL_GIG;
if (phy->duplex)
- mac_control |= BIT(0); /* FULLDUPLEXEN */
+ mac_control |= CPSW_SL_CTL_FULLDUPLEX;
/* set speed_in input in case RMII mode is used in 100Mbps */
if (phy->speed == 100)
- mac_control |= BIT(15);
+ mac_control |= CPSW_SL_CTL_IFCTL_A;
/* in band mode only works in 10Mbps RGMII mode */
else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
- mac_control |= BIT(18); /* In Band mode */
+ mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */
if (priv->rx_pause)
- mac_control |= BIT(3);
+ mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
if (priv->tx_pause)
- mac_control |= BIT(4);
+ mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
+
+ if (mac_control != slave->mac_control)
+ cpsw_sl_ctl_set(slave->mac_sl, mac_control);
+
+ /* enable forwarding */
+ cpsw_ale_control_set(cpsw->ale, slave_port,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
*link = true;
@@ -1290,12 +790,14 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
/* disable forwarding */
cpsw_ale_control_set(cpsw->ale, slave_port,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+
+ cpsw_sl_wait_for_idle(slave->mac_sl, 100);
+
+ cpsw_sl_ctl_reset(slave->mac_sl);
}
- if (mac_control != slave->mac_control) {
+ if (mac_control != slave->mac_control)
phy_print_status(phy);
- writel_relaxed(mac_control, &slave->sliver->mac_control);
- }
slave->mac_control = mac_control;
}
@@ -1348,7 +850,7 @@ static void cpsw_adjust_link(struct net_device *ndev)
if (link) {
if (cpsw_need_resplit(cpsw))
- cpsw_split_res(ndev);
+ cpsw_split_res(cpsw);
netif_carrier_on(ndev);
if (netif_running(ndev))
@@ -1359,167 +861,6 @@ static void cpsw_adjust_link(struct net_device *ndev)
}
}
-static int cpsw_get_coalesce(struct net_device *ndev,
- struct ethtool_coalesce *coal)
-{
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
-
- coal->rx_coalesce_usecs = cpsw->coal_intvl;
- return 0;
-}
-
-static int cpsw_set_coalesce(struct net_device *ndev,
- struct ethtool_coalesce *coal)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- u32 int_ctrl;
- u32 num_interrupts = 0;
- u32 prescale = 0;
- u32 addnl_dvdr = 1;
- u32 coal_intvl = 0;
- struct cpsw_common *cpsw = priv->cpsw;
-
- coal_intvl = coal->rx_coalesce_usecs;
-
- int_ctrl = readl(&cpsw->wr_regs->int_control);
- prescale = cpsw->bus_freq_mhz * 4;
-
- if (!coal->rx_coalesce_usecs) {
- int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
- goto update_return;
- }
-
- if (coal_intvl < CPSW_CMINTMIN_INTVL)
- coal_intvl = CPSW_CMINTMIN_INTVL;
-
- if (coal_intvl > CPSW_CMINTMAX_INTVL) {
- /* Interrupt pacer works with 4us Pulse, we can
- * throttle further by dilating the 4us pulse.
- */
- addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
-
- if (addnl_dvdr > 1) {
- prescale *= addnl_dvdr;
- if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
- coal_intvl = (CPSW_CMINTMAX_INTVL
- * addnl_dvdr);
- } else {
- addnl_dvdr = 1;
- coal_intvl = CPSW_CMINTMAX_INTVL;
- }
- }
-
- num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
- writel(num_interrupts, &cpsw->wr_regs->rx_imax);
- writel(num_interrupts, &cpsw->wr_regs->tx_imax);
-
- int_ctrl |= CPSW_INTPACEEN;
- int_ctrl &= (~CPSW_INTPRESCALE_MASK);
- int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
-
-update_return:
- writel(int_ctrl, &cpsw->wr_regs->int_control);
-
- cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
- cpsw->coal_intvl = coal_intvl;
-
- return 0;
-}
-
-static int cpsw_get_sset_count(struct net_device *ndev, int sset)
-{
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
-
- switch (sset) {
- case ETH_SS_STATS:
- return (CPSW_STATS_COMMON_LEN +
- (cpsw->rx_ch_num + cpsw->tx_ch_num) *
- CPSW_STATS_CH_LEN);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
-{
- int ch_stats_len;
- int line;
- int i;
-
- ch_stats_len = CPSW_STATS_CH_LEN * ch_num;
- for (i = 0; i < ch_stats_len; i++) {
- line = i % CPSW_STATS_CH_LEN;
- snprintf(*p, ETH_GSTRING_LEN,
- "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx",
- (long)(i / CPSW_STATS_CH_LEN),
- cpsw_gstrings_ch_stats[line].stat_string);
- *p += ETH_GSTRING_LEN;
- }
-}
-
-static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
-{
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- u8 *p = data;
- int i;
-
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) {
- memcpy(p, cpsw_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
-
- cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1);
- cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0);
- break;
- }
-}
-
-static void cpsw_get_ethtool_stats(struct net_device *ndev,
- struct ethtool_stats *stats, u64 *data)
-{
- u8 *p;
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- struct cpdma_chan_stats ch_stats;
- int i, l, ch;
-
- /* Collect Davinci CPDMA stats for Rx and Tx Channel */
- for (l = 0; l < CPSW_STATS_COMMON_LEN; l++)
- data[l] = readl(cpsw->hw_stats +
- cpsw_gstrings_stats[l].stat_offset);
-
- for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
- cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats);
- for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
- p = (u8 *)&ch_stats +
- cpsw_gstrings_ch_stats[i].stat_offset;
- data[l] = *(u32 *)p;
- }
- }
-
- for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
- cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats);
- for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
- p = (u8 *)&ch_stats +
- cpsw_gstrings_ch_stats[i].stat_offset;
- data[l] = *(u32 *)p;
- }
- }
-}
-
-static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv,
- struct sk_buff *skb,
- struct cpdma_chan *txch)
-{
- struct cpsw_common *cpsw = priv->cpsw;
-
- skb_tx_timestamp(skb);
- return cpdma_chan_submit(txch, skb, skb->data, skb->len,
- priv->emac_port + cpsw->data.dual_emac);
-}
-
static inline void cpsw_add_dual_emac_def_ale_entries(
struct cpsw_priv *priv, struct cpsw_slave *slave,
u32 slave_port)
@@ -1542,24 +883,18 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
ALE_PORT_DROP_UNKNOWN_VLAN, 1);
}
-static void soft_reset_slave(struct cpsw_slave *slave)
-{
- char name[32];
-
- snprintf(name, sizeof(name), "slave-%d", slave->slave_num);
- soft_reset(name, &slave->sliver->soft_reset);
-}
-
static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
{
u32 slave_port;
struct phy_device *phy;
struct cpsw_common *cpsw = priv->cpsw;
- soft_reset_slave(slave);
+ cpsw_sl_reset(slave->mac_sl, 100);
+ cpsw_sl_ctl_reset(slave->mac_sl);
/* setup priority mapping */
- writel_relaxed(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
+ cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP,
+ RX_PRIORITY_MAPPING);
switch (cpsw->version) {
case CPSW_VERSION_1:
@@ -1585,7 +920,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
}
/* setup max packet size, and mac address */
- writel_relaxed(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
+ cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN,
+ cpsw->rx_packet_max);
cpsw_set_slave_mac(slave, priv);
slave->mac_control = 0; /* no link yet */
@@ -1696,7 +1032,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
}
}
-static int cpsw_fill_rx_channels(struct cpsw_priv *priv)
+int cpsw_fill_rx_channels(struct cpsw_priv *priv)
{
struct cpsw_common *cpsw = priv->cpsw;
struct sk_buff *skb;
@@ -1748,7 +1084,8 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
slave->phy = NULL;
cpsw_ale_control_set(cpsw->ale, slave_port,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
- soft_reset_slave(slave);
+ cpsw_sl_reset(slave->mac_sl, 100);
+ cpsw_sl_ctl_reset(slave->mac_sl);
}
static int cpsw_tc_to_fifo(int tc, int num_tc)
@@ -2114,7 +1451,7 @@ static int cpsw_ndo_stop(struct net_device *ndev)
for_each_slave(priv, cpsw_slave_stop, cpsw);
if (cpsw_need_resplit(cpsw))
- cpsw_split_res(ndev);
+ cpsw_split_res(cpsw);
cpsw->usage_count--;
pm_runtime_put_sync(cpsw->dev);
@@ -2147,7 +1484,9 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
txch = cpsw->txv[q_idx].ch;
txq = netdev_get_tx_queue(ndev, q_idx);
- ret = cpsw_tx_packet_submit(priv, skb, txch);
+ skb_tx_timestamp(skb);
+ ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
+ priv->emac_port + cpsw->data.dual_emac);
if (unlikely(ret != 0)) {
cpsw_err(priv, tx_err, "desc submit failed\n");
goto fail;
@@ -2418,18 +1757,6 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
return 0;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void cpsw_ndo_poll_controller(struct net_device *ndev)
-{
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
-
- cpsw_intr_disable(cpsw);
- cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
- cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
- cpsw_intr_enable(cpsw);
-}
-#endif
-
static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
unsigned short vid)
{
@@ -2601,7 +1928,7 @@ static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
}
- cpsw_split_res(ndev);
+ cpsw_split_res(cpsw);
return ret;
}
@@ -2677,6 +2004,18 @@ static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
}
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void cpsw_ndo_poll_controller(struct net_device *ndev)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ cpsw_intr_disable(cpsw);
+ cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
+ cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
+ cpsw_intr_enable(cpsw);
+}
+#endif
+
static const struct net_device_ops cpsw_netdev_ops = {
.ndo_open = cpsw_ndo_open,
.ndo_stop = cpsw_ndo_stop,
@@ -2695,25 +2034,6 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_setup_tc = cpsw_ndo_setup_tc,
};
-static int cpsw_get_regs_len(struct net_device *ndev)
-{
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
-
- return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
-}
-
-static void cpsw_get_regs(struct net_device *ndev,
- struct ethtool_regs *regs, void *p)
-{
- u32 *reg = p;
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
-
- /* update CPSW IP version */
- regs->version = cpsw->version;
-
- cpsw_ale_dump(cpsw->ale, reg);
-}
-
static void cpsw_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
@@ -2725,119 +2045,6 @@ static void cpsw_get_drvinfo(struct net_device *ndev,
strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
}
-static u32 cpsw_get_msglevel(struct net_device *ndev)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- return priv->msg_enable;
-}
-
-static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- priv->msg_enable = value;
-}
-
-#if IS_ENABLED(CONFIG_TI_CPTS)
-static int cpsw_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
-{
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
-
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = cpsw->cpts->phc_index;
- info->tx_types =
- (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
- info->rx_filters =
- (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
- return 0;
-}
-#else
-static int cpsw_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
-{
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- info->phc_index = -1;
- info->tx_types = 0;
- info->rx_filters = 0;
- return 0;
-}
-#endif
-
-static int cpsw_get_link_ksettings(struct net_device *ndev,
- struct ethtool_link_ksettings *ecmd)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
-
- if (!cpsw->slaves[slave_no].phy)
- return -EOPNOTSUPP;
-
- phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd);
- return 0;
-}
-
-static int cpsw_set_link_ksettings(struct net_device *ndev,
- const struct ethtool_link_ksettings *ecmd)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
-
- if (cpsw->slaves[slave_no].phy)
- return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy,
- ecmd);
- else
- return -EOPNOTSUPP;
-}
-
-static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
-
- wol->supported = 0;
- wol->wolopts = 0;
-
- if (cpsw->slaves[slave_no].phy)
- phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol);
-}
-
-static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
-
- if (cpsw->slaves[slave_no].phy)
- return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol);
- else
- return -EOPNOTSUPP;
-}
-
-static void cpsw_get_pauseparam(struct net_device *ndev,
- struct ethtool_pauseparam *pause)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
-
- pause->autoneg = AUTONEG_DISABLE;
- pause->rx_pause = priv->rx_pause ? true : false;
- pause->tx_pause = priv->tx_pause ? true : false;
-}
-
static int cpsw_set_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *pause)
{
@@ -2851,316 +2058,10 @@ static int cpsw_set_pauseparam(struct net_device *ndev,
return 0;
}
-static int cpsw_ethtool_op_begin(struct net_device *ndev)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int ret;
-
- ret = pm_runtime_get_sync(cpsw->dev);
- if (ret < 0) {
- cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
- pm_runtime_put_noidle(cpsw->dev);
- }
-
- return ret;
-}
-
-static void cpsw_ethtool_op_complete(struct net_device *ndev)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- int ret;
-
- ret = pm_runtime_put(priv->cpsw->dev);
- if (ret < 0)
- cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
-}
-
-static void cpsw_get_channels(struct net_device *ndev,
- struct ethtool_channels *ch)
-{
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
-
- ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
- ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
- ch->max_combined = 0;
- ch->max_other = 0;
- ch->other_count = 0;
- ch->rx_count = cpsw->rx_ch_num;
- ch->tx_count = cpsw->tx_ch_num;
- ch->combined_count = 0;
-}
-
-static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
- struct ethtool_channels *ch)
-{
- if (cpsw->quirk_irq) {
- dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed");
- return -EOPNOTSUPP;
- }
-
- if (ch->combined_count)
- return -EINVAL;
-
- /* verify we have at least one channel in each direction */
- if (!ch->rx_count || !ch->tx_count)
- return -EINVAL;
-
- if (ch->rx_count > cpsw->data.channels ||
- ch->tx_count > cpsw->data.channels)
- return -EINVAL;
-
- return 0;
-}
-
-static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
-{
- struct cpsw_common *cpsw = priv->cpsw;
- void (*handler)(void *, int, int);
- struct netdev_queue *queue;
- struct cpsw_vector *vec;
- int ret, *ch, vch;
-
- if (rx) {
- ch = &cpsw->rx_ch_num;
- vec = cpsw->rxv;
- handler = cpsw_rx_handler;
- } else {
- ch = &cpsw->tx_ch_num;
- vec = cpsw->txv;
- handler = cpsw_tx_handler;
- }
-
- while (*ch < ch_num) {
- vch = rx ? *ch : 7 - *ch;
- vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx);
- queue = netdev_get_tx_queue(priv->ndev, *ch);
- queue->tx_maxrate = 0;
-
- if (IS_ERR(vec[*ch].ch))
- return PTR_ERR(vec[*ch].ch);
-
- if (!vec[*ch].ch)
- return -EINVAL;
-
- cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
- (rx ? "rx" : "tx"));
- (*ch)++;
- }
-
- while (*ch > ch_num) {
- (*ch)--;
-
- ret = cpdma_chan_destroy(vec[*ch].ch);
- if (ret)
- return ret;
-
- cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
- (rx ? "rx" : "tx"));
- }
-
- return 0;
-}
-
-static int cpsw_update_channels(struct cpsw_priv *priv,
- struct ethtool_channels *ch)
-{
- int ret;
-
- ret = cpsw_update_channels_res(priv, ch->rx_count, 1);
- if (ret)
- return ret;
-
- ret = cpsw_update_channels_res(priv, ch->tx_count, 0);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void cpsw_suspend_data_pass(struct net_device *ndev)
-{
- struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- struct cpsw_slave *slave;
- int i;
-
- /* Disable NAPI scheduling */
- cpsw_intr_disable(cpsw);
-
- /* Stop all transmit queues for every network device.
- * Disable re-using rx descriptors with dormant_on.
- */
- for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
- if (!(slave->ndev && netif_running(slave->ndev)))
- continue;
-
- netif_tx_stop_all_queues(slave->ndev);
- netif_dormant_on(slave->ndev);
- }
-
- /* Handle rest of tx packets and stop cpdma channels */
- cpdma_ctlr_stop(cpsw->dma);
-}
-
-static int cpsw_resume_data_pass(struct net_device *ndev)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
- int i, ret;
-
- /* Allow rx packets handling */
- for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
- if (slave->ndev && netif_running(slave->ndev))
- netif_dormant_off(slave->ndev);
-
- /* After this receive is started */
- if (cpsw->usage_count) {
- ret = cpsw_fill_rx_channels(priv);
- if (ret)
- return ret;
-
- cpdma_ctlr_start(cpsw->dma);
- cpsw_intr_enable(cpsw);
- }
-
- /* Resume transmit for every affected interface */
- for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
- if (slave->ndev && netif_running(slave->ndev))
- netif_tx_start_all_queues(slave->ndev);
-
- return 0;
-}
-
static int cpsw_set_channels(struct net_device *ndev,
struct ethtool_channels *chs)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
- int i, ret;
-
- ret = cpsw_check_ch_settings(cpsw, chs);
- if (ret < 0)
- return ret;
-
- cpsw_suspend_data_pass(ndev);
- ret = cpsw_update_channels(priv, chs);
- if (ret)
- goto err;
-
- for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
- if (!(slave->ndev && netif_running(slave->ndev)))
- continue;
-
- /* Inform stack about new count of queues */
- ret = netif_set_real_num_tx_queues(slave->ndev,
- cpsw->tx_ch_num);
- if (ret) {
- dev_err(priv->dev, "cannot set real number of tx queues\n");
- goto err;
- }
-
- ret = netif_set_real_num_rx_queues(slave->ndev,
- cpsw->rx_ch_num);
- if (ret) {
- dev_err(priv->dev, "cannot set real number of rx queues\n");
- goto err;
- }
- }
-
- if (cpsw->usage_count)
- cpsw_split_res(ndev);
-
- ret = cpsw_resume_data_pass(ndev);
- if (!ret)
- return 0;
-err:
- dev_err(priv->dev, "cannot update channels number, closing device\n");
- dev_close(ndev);
- return ret;
-}
-
-static int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
-
- if (cpsw->slaves[slave_no].phy)
- return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata);
- else
- return -EOPNOTSUPP;
-}
-
-static int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
-
- if (cpsw->slaves[slave_no].phy)
- return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
- else
- return -EOPNOTSUPP;
-}
-
-static int cpsw_nway_reset(struct net_device *ndev)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int slave_no = cpsw_slave_index(cpsw, priv);
-
- if (cpsw->slaves[slave_no].phy)
- return genphy_restart_aneg(cpsw->slaves[slave_no].phy);
- else
- return -EOPNOTSUPP;
-}
-
-static void cpsw_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
-
- /* not supported */
- ering->tx_max_pending = 0;
- ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
- ering->rx_max_pending = descs_pool_size - CPSW_MAX_QUEUES;
- ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
-}
-
-static int cpsw_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
-{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int ret;
-
- /* ignore ering->tx_pending - only rx_pending adjustment is supported */
-
- if (ering->rx_mini_pending || ering->rx_jumbo_pending ||
- ering->rx_pending < CPSW_MAX_QUEUES ||
- ering->rx_pending > (descs_pool_size - CPSW_MAX_QUEUES))
- return -EINVAL;
-
- if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma))
- return 0;
-
- cpsw_suspend_data_pass(ndev);
-
- cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
-
- if (cpsw->usage_count)
- cpdma_chan_split_pool(cpsw->dma);
-
- ret = cpsw_resume_data_pass(ndev);
- if (!ret)
- return 0;
-
- dev_err(&ndev->dev, "cannot set ring params, closing device\n");
- dev_close(ndev);
- return ret;
+ return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler);
}
static const struct ethtool_ops cpsw_ethtool_ops = {
@@ -3193,19 +2094,6 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.set_ringparam = cpsw_set_ringparam,
};
-static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw,
- u32 slave_reg_ofs, u32 sliver_reg_ofs)
-{
- void __iomem *regs = cpsw->regs;
- int slave_num = slave->slave_num;
- struct cpsw_slave_data *data = cpsw->data.slave_data + slave_num;
-
- slave->data = data;
- slave->regs = regs + slave_reg_ofs;
- slave->sliver = regs + sliver_reg_ofs;
- slave->port_vlan = data->dual_emac_res_vlan;
-}
-
static int cpsw_probe_dt(struct cpsw_platform_data *data,
struct platform_device *pdev)
{
@@ -3344,7 +2232,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
no_phy_slave:
mac_addr = of_get_mac_address(slave_node);
- if (mac_addr) {
+ if (!IS_ERR(mac_addr)) {
memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
} else {
ret = ti_cm_get_macid(&pdev->dev, i,
@@ -3408,7 +2296,8 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
struct cpsw_priv *priv_sl2;
int ret = 0;
- ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
+ ndev = devm_alloc_etherdev_mqs(cpsw->dev, sizeof(struct cpsw_priv),
+ CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
if (!ndev) {
dev_err(cpsw->dev, "cpsw: error allocating net_device\n");
return -ENOMEM;
@@ -3442,11 +2331,8 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
/* register the network device */
SET_NETDEV_DEV(ndev, cpsw->dev);
ret = register_netdev(ndev);
- if (ret) {
+ if (ret)
dev_err(cpsw->dev, "cpsw: error registering net device\n");
- free_netdev(ndev);
- ret = -ENODEV;
- }
return ret;
}
@@ -3467,63 +2353,74 @@ static const struct soc_device_attribute cpsw_soc_devices[] = {
static int cpsw_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct clk *clk;
struct cpsw_platform_data *data;
struct net_device *ndev;
struct cpsw_priv *priv;
- struct cpdma_params dma_params;
- struct cpsw_ale_params ale_params;
void __iomem *ss_regs;
- void __iomem *cpts_regs;
struct resource *res, *ss_res;
struct gpio_descs *mode;
- u32 slave_offset, sliver_offset, slave_size;
const struct soc_device_attribute *soc;
struct cpsw_common *cpsw;
- int ret = 0, i, ch;
+ int ret = 0, ch;
int irq;
- cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL);
+ cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL);
if (!cpsw)
return -ENOMEM;
- cpsw->dev = &pdev->dev;
+ cpsw->dev = dev;
- ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
- if (!ndev) {
- dev_err(&pdev->dev, "error allocating net_device\n");
- return -ENOMEM;
- }
-
- platform_set_drvdata(pdev, ndev);
- priv = netdev_priv(ndev);
- priv->cpsw = cpsw;
- priv->ndev = ndev;
- priv->dev = &ndev->dev;
- priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
- cpsw->rx_packet_max = max(rx_packet_max, 128);
-
- mode = devm_gpiod_get_array_optional(&pdev->dev, "mode", GPIOD_OUT_LOW);
+ mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
if (IS_ERR(mode)) {
ret = PTR_ERR(mode);
- dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
- goto clean_ndev_ret;
+ dev_err(dev, "gpio request failed, ret %d\n", ret);
+ return ret;
}
+ clk = devm_clk_get(dev, "fck");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(dev, "fck is not found %d\n", ret);
+ return ret;
+ }
+ cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
+
+ ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ss_regs = devm_ioremap_resource(dev, ss_res);
+ if (IS_ERR(ss_regs))
+ return PTR_ERR(ss_regs);
+ cpsw->regs = ss_regs;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ cpsw->wr_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cpsw->wr_regs))
+ return PTR_ERR(cpsw->wr_regs);
+
+ /* RX IRQ */
+ irq = platform_get_irq(pdev, 1);
+ if (irq < 0)
+ return irq;
+ cpsw->irqs_table[0] = irq;
+
+ /* TX IRQ */
+ irq = platform_get_irq(pdev, 2);
+ if (irq < 0)
+ return irq;
+ cpsw->irqs_table[1] = irq;
+
/*
* This may be required here for child devices.
*/
- pm_runtime_enable(&pdev->dev);
-
- /* Select default pin state */
- pinctrl_pm_select_default_state(&pdev->dev);
+ pm_runtime_enable(dev);
/* Need to enable clocks with runtime PM api to access module
* registers
*/
- ret = pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_get_sync(dev);
if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_put_noidle(dev);
goto clean_runtime_disable_ret;
}
@@ -3531,170 +2428,72 @@ static int cpsw_probe(struct platform_device *pdev)
if (ret)
goto clean_dt_ret;
- data = &cpsw->data;
- cpsw->rx_ch_num = 1;
- cpsw->tx_ch_num = 1;
-
- if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
- memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
- dev_info(&pdev->dev, "Detected MACID = %pM\n", priv->mac_addr);
- } else {
- eth_random_addr(priv->mac_addr);
- dev_info(&pdev->dev, "Random MACID = %pM\n", priv->mac_addr);
- }
-
- memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
+ soc = soc_device_match(cpsw_soc_devices);
+ if (soc)
+ cpsw->quirk_irq = 1;
- cpsw->slaves = devm_kcalloc(&pdev->dev,
+ data = &cpsw->data;
+ cpsw->slaves = devm_kcalloc(dev,
data->slaves, sizeof(struct cpsw_slave),
GFP_KERNEL);
if (!cpsw->slaves) {
ret = -ENOMEM;
goto clean_dt_ret;
}
- for (i = 0; i < data->slaves; i++)
- cpsw->slaves[i].slave_num = i;
-
- cpsw->slaves[0].ndev = ndev;
- priv->emac_port = 0;
-
- clk = devm_clk_get(&pdev->dev, "fck");
- if (IS_ERR(clk)) {
- dev_err(priv->dev, "fck is not found\n");
- ret = -ENODEV;
- goto clean_dt_ret;
- }
- cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
-
- ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
- if (IS_ERR(ss_regs)) {
- ret = PTR_ERR(ss_regs);
- goto clean_dt_ret;
- }
- cpsw->regs = ss_regs;
-
- cpsw->version = readl(&cpsw->regs->id_ver);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(cpsw->wr_regs)) {
- ret = PTR_ERR(cpsw->wr_regs);
- goto clean_dt_ret;
- }
- memset(&dma_params, 0, sizeof(dma_params));
- memset(&ale_params, 0, sizeof(ale_params));
+ cpsw->rx_packet_max = max(rx_packet_max, CPSW_MAX_PACKET_SIZE);
+ cpsw->descs_pool_size = descs_pool_size;
- switch (cpsw->version) {
- case CPSW_VERSION_1:
- cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
- cpts_regs = ss_regs + CPSW1_CPTS_OFFSET;
- cpsw->hw_stats = ss_regs + CPSW1_HW_STATS;
- dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
- dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
- ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
- slave_offset = CPSW1_SLAVE_OFFSET;
- slave_size = CPSW1_SLAVE_SIZE;
- sliver_offset = CPSW1_SLIVER_OFFSET;
- dma_params.desc_mem_phys = 0;
- break;
- case CPSW_VERSION_2:
- case CPSW_VERSION_3:
- case CPSW_VERSION_4:
- cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
- cpts_regs = ss_regs + CPSW2_CPTS_OFFSET;
- cpsw->hw_stats = ss_regs + CPSW2_HW_STATS;
- dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
- dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
- ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
- slave_offset = CPSW2_SLAVE_OFFSET;
- slave_size = CPSW2_SLAVE_SIZE;
- sliver_offset = CPSW2_SLIVER_OFFSET;
- dma_params.desc_mem_phys =
- (u32 __force) ss_res->start + CPSW2_BD_OFFSET;
- break;
- default:
- dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version);
- ret = -ENODEV;
- goto clean_dt_ret;
- }
- for (i = 0; i < cpsw->data.slaves; i++) {
- struct cpsw_slave *slave = &cpsw->slaves[i];
-
- cpsw_slave_init(slave, cpsw, slave_offset, sliver_offset);
- slave_offset += slave_size;
- sliver_offset += SLIVER_SIZE;
- }
-
- dma_params.dev = &pdev->dev;
- dma_params.rxthresh = dma_params.dmaregs + CPDMA_RXTHRESH;
- dma_params.rxfree = dma_params.dmaregs + CPDMA_RXFREE;
- dma_params.rxhdp = dma_params.txhdp + CPDMA_RXHDP;
- dma_params.txcp = dma_params.txhdp + CPDMA_TXCP;
- dma_params.rxcp = dma_params.txhdp + CPDMA_RXCP;
-
- dma_params.num_chan = data->channels;
- dma_params.has_soft_reset = true;
- dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE;
- dma_params.desc_mem_size = data->bd_ram_size;
- dma_params.desc_align = 16;
- dma_params.has_ext_regs = true;
- dma_params.desc_hw_addr = dma_params.desc_mem_phys;
- dma_params.bus_freq_mhz = cpsw->bus_freq_mhz;
- dma_params.descs_pool_size = descs_pool_size;
-
- cpsw->dma = cpdma_ctlr_create(&dma_params);
- if (!cpsw->dma) {
- dev_err(priv->dev, "error initializing dma\n");
- ret = -ENOMEM;
+ ret = cpsw_init_common(cpsw, ss_regs, ale_ageout,
+ ss_res->start + CPSW2_BD_OFFSET,
+ descs_pool_size);
+ if (ret)
goto clean_dt_ret;
- }
-
- soc = soc_device_match(cpsw_soc_devices);
- if (soc)
- cpsw->quirk_irq = 1;
ch = cpsw->quirk_irq ? 0 : 7;
cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
if (IS_ERR(cpsw->txv[0].ch)) {
- dev_err(priv->dev, "error initializing tx dma channel\n");
+ dev_err(dev, "error initializing tx dma channel\n");
ret = PTR_ERR(cpsw->txv[0].ch);
- goto clean_dma_ret;
+ goto clean_cpts;
}
cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
if (IS_ERR(cpsw->rxv[0].ch)) {
- dev_err(priv->dev, "error initializing rx dma channel\n");
+ dev_err(dev, "error initializing rx dma channel\n");
ret = PTR_ERR(cpsw->rxv[0].ch);
- goto clean_dma_ret;
+ goto clean_cpts;
}
+ cpsw_split_res(cpsw);
- ale_params.dev = &pdev->dev;
- ale_params.ale_ageout = ale_ageout;
- ale_params.ale_entries = data->ale_entries;
- ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
-
- cpsw->ale = cpsw_ale_create(&ale_params);
- if (!cpsw->ale) {
- dev_err(priv->dev, "error initializing ale engine\n");
- ret = -ENODEV;
- goto clean_dma_ret;
+ /* setup netdev */
+ ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv),
+ CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
+ if (!ndev) {
+ dev_err(dev, "error allocating net_device\n");
+ goto clean_cpts;
}
- cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node);
- if (IS_ERR(cpsw->cpts)) {
- ret = PTR_ERR(cpsw->cpts);
- goto clean_dma_ret;
- }
+ platform_set_drvdata(pdev, ndev);
+ priv = netdev_priv(ndev);
+ priv->cpsw = cpsw;
+ priv->ndev = ndev;
+ priv->dev = dev;
+ priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
+ priv->emac_port = 0;
- ndev->irq = platform_get_irq(pdev, 1);
- if (ndev->irq < 0) {
- dev_err(priv->dev, "error getting irq resource\n");
- ret = ndev->irq;
- goto clean_dma_ret;
+ if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
+ memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
+ dev_info(dev, "Detected MACID = %pM\n", priv->mac_addr);
+ } else {
+ eth_random_addr(priv->mac_addr);
+ dev_info(dev, "Random MACID = %pM\n", priv->mac_addr);
}
+ memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
+
+ cpsw->slaves[0].ndev = ndev;
+
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
ndev->netdev_ops = &cpsw_netdev_ops;
@@ -3705,15 +2504,14 @@ static int cpsw_probe(struct platform_device *pdev)
netif_tx_napi_add(ndev, &cpsw->napi_tx,
cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll,
CPSW_POLL_WEIGHT);
- cpsw_split_res(ndev);
/* register the network device */
- SET_NETDEV_DEV(ndev, &pdev->dev);
+ SET_NETDEV_DEV(ndev, dev);
ret = register_netdev(ndev);
if (ret) {
- dev_err(priv->dev, "error registering net device\n");
+ dev_err(dev, "error registering net device\n");
ret = -ENODEV;
- goto clean_dma_ret;
+ goto clean_cpts;
}
if (cpsw->data.dual_emac) {
@@ -3731,40 +2529,24 @@ static int cpsw_probe(struct platform_device *pdev)
* If anyone wants to implement support for those, make sure to
* first request and append them to irqs_table array.
*/
-
- /* RX IRQ */
- irq = platform_get_irq(pdev, 1);
- if (irq < 0) {
- ret = irq;
- goto clean_dma_ret;
- }
-
- cpsw->irqs_table[0] = irq;
- ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt,
- 0, dev_name(&pdev->dev), cpsw);
+ ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt,
+ 0, dev_name(dev), cpsw);
if (ret < 0) {
- dev_err(priv->dev, "error attaching irq (%d)\n", ret);
- goto clean_dma_ret;
+ dev_err(dev, "error attaching irq (%d)\n", ret);
+ goto clean_unregister_netdev_ret;
}
- /* TX IRQ */
- irq = platform_get_irq(pdev, 2);
- if (irq < 0) {
- ret = irq;
- goto clean_dma_ret;
- }
- cpsw->irqs_table[1] = irq;
- ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt,
+ ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
0, dev_name(&pdev->dev), cpsw);
if (ret < 0) {
- dev_err(priv->dev, "error attaching irq (%d)\n", ret);
- goto clean_dma_ret;
+ dev_err(dev, "error attaching irq (%d)\n", ret);
+ goto clean_unregister_netdev_ret;
}
cpsw_notice(priv, probe,
"initialized device (regs %pa, irq %d, pool size %d)\n",
- &ss_res->start, ndev->irq, dma_params.descs_pool_size);
+ &ss_res->start, cpsw->irqs_table[0], descs_pool_size);
pm_runtime_put(&pdev->dev);
@@ -3772,15 +2554,14 @@ static int cpsw_probe(struct platform_device *pdev)
clean_unregister_netdev_ret:
unregister_netdev(ndev);
-clean_dma_ret:
+clean_cpts:
+ cpts_release(cpsw->cpts);
cpdma_ctlr_destroy(cpsw->dma);
clean_dt_ret:
cpsw_remove_dt(pdev);
pm_runtime_put_sync(&pdev->dev);
clean_runtime_disable_ret:
pm_runtime_disable(&pdev->dev);
-clean_ndev_ret:
- free_netdev(priv->ndev);
return ret;
}
@@ -3805,9 +2586,6 @@ static int cpsw_remove(struct platform_device *pdev)
cpsw_remove_dt(pdev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- if (cpsw->data.dual_emac)
- free_netdev(cpsw->slaves[1].ndev);
- free_netdev(ndev);
return 0;
}
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index 907e05fc22e4..35d602f03281 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -1,15 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/* Texas Instruments Ethernet Switch Driver
*
* Copyright (C) 2013 Texas Instruments
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __CPSW_H__
#define __CPSW_H__
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 798c989d5d93..84025dcc78d5 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -1,16 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Texas Instruments N-Port Ethernet Switch Address Lookup Engine
*
* Copyright (C) 2012 Texas Instruments
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/module.h>
@@ -287,6 +280,9 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
if (cpsw_ale_get_mcast(ale_entry)) {
u8 addr[6];
+ if (cpsw_ale_get_super(ale_entry))
+ continue;
+
cpsw_ale_get_addr(ale_entry, addr);
if (!is_broadcast_ether_addr(addr))
cpsw_ale_flush_mcast(ale, ale_entry, port_mask);
@@ -296,7 +292,6 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
}
return 0;
}
-EXPORT_SYMBOL_GPL(cpsw_ale_flush_multicast);
static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
int flags, u16 vid)
@@ -334,7 +329,6 @@ int cpsw_ale_add_ucast(struct cpsw_ale *ale, const u8 *addr, int port,
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
-EXPORT_SYMBOL_GPL(cpsw_ale_add_ucast);
int cpsw_ale_del_ucast(struct cpsw_ale *ale, const u8 *addr, int port,
int flags, u16 vid)
@@ -350,7 +344,6 @@ int cpsw_ale_del_ucast(struct cpsw_ale *ale, const u8 *addr, int port,
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
-EXPORT_SYMBOL_GPL(cpsw_ale_del_ucast);
int cpsw_ale_add_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
int flags, u16 vid, int mcast_state)
@@ -365,7 +358,7 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid);
cpsw_ale_set_addr(ale_entry, addr);
- cpsw_ale_set_super(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
+ cpsw_ale_set_super(ale_entry, (flags & ALE_SUPER) ? 1 : 0);
cpsw_ale_set_mcast_state(ale_entry, mcast_state);
mask = cpsw_ale_get_port_mask(ale_entry,
@@ -384,7 +377,6 @@ int cpsw_ale_add_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
-EXPORT_SYMBOL_GPL(cpsw_ale_add_mcast);
int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
int flags, u16 vid)
@@ -407,7 +399,6 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
-EXPORT_SYMBOL_GPL(cpsw_ale_del_mcast);
/* ALE NetCP NU switch specific vlan functions */
static void cpsw_ale_set_vlan_mcast(struct cpsw_ale *ale, u32 *ale_entry,
@@ -458,7 +449,6 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
-EXPORT_SYMBOL_GPL(cpsw_ale_add_vlan);
int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
{
@@ -480,40 +470,39 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
cpsw_ale_write(ale, idx, ale_entry);
return 0;
}
-EXPORT_SYMBOL_GPL(cpsw_ale_del_vlan);
-void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti)
+void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port)
{
u32 ale_entry[ALE_ENTRY_WORDS];
- int type, idx;
int unreg_mcast = 0;
-
- /* Only bother doing the work if the setting is actually changing */
- if (ale->allmulti == allmulti)
- return;
-
- /* Remember the new setting to check against next time */
- ale->allmulti = allmulti;
+ int type, idx;
for (idx = 0; idx < ale->params.ale_entries; idx++) {
+ int vlan_members;
+
cpsw_ale_read(ale, idx, ale_entry);
type = cpsw_ale_get_entry_type(ale_entry);
if (type != ALE_TYPE_VLAN)
continue;
+ vlan_members =
+ cpsw_ale_get_vlan_member_list(ale_entry,
+ ale->vlan_field_bits);
+
+ if (port != -1 && !(vlan_members & BIT(port)))
+ continue;
unreg_mcast =
cpsw_ale_get_vlan_unreg_mcast(ale_entry,
ale->vlan_field_bits);
if (allmulti)
- unreg_mcast |= 1;
+ unreg_mcast |= ALE_PORT_HOST;
else
- unreg_mcast &= ~1;
+ unreg_mcast &= ~ALE_PORT_HOST;
cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast,
ale->vlan_field_bits);
cpsw_ale_write(ale, idx, ale_entry);
}
}
-EXPORT_SYMBOL_GPL(cpsw_ale_set_allmulti);
struct ale_control_info {
const char *name;
@@ -739,7 +728,6 @@ int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control,
return 0;
}
-EXPORT_SYMBOL_GPL(cpsw_ale_control_set);
int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
{
@@ -763,7 +751,6 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
tmp = readl_relaxed(ale->params.ale_regs + offset) >> shift;
return tmp & BITMASK(info->bits);
}
-EXPORT_SYMBOL_GPL(cpsw_ale_control_get);
static void cpsw_ale_timer(struct timer_list *t)
{
@@ -788,14 +775,12 @@ void cpsw_ale_start(struct cpsw_ale *ale)
add_timer(&ale->timer);
}
}
-EXPORT_SYMBOL_GPL(cpsw_ale_start);
void cpsw_ale_stop(struct cpsw_ale *ale)
{
del_timer_sync(&ale->timer);
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
}
-EXPORT_SYMBOL_GPL(cpsw_ale_stop);
struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
{
@@ -879,7 +864,6 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
return ale;
}
-EXPORT_SYMBOL_GPL(cpsw_ale_create);
void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
{
@@ -890,8 +874,3 @@ void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
data += ALE_ENTRY_WORDS;
}
}
-EXPORT_SYMBOL_GPL(cpsw_ale_dump);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("TI CPSW ALE driver");
-MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index cd07a3e96d57..370df254eb12 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -1,16 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Texas Instruments N-Port Ethernet Switch Address Lookup Engine APIs
*
* Copyright (C) 2012 Texas Instruments
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __TI_CPSW_ALE_H__
#define __TI_CPSW_ALE_H__
@@ -37,7 +30,6 @@ struct cpsw_ale {
struct cpsw_ale_params params;
struct timer_list timer;
unsigned long ageout;
- int allmulti;
u32 version;
/* These bits are different on NetCP NU Switch ALE */
u32 port_mask_bits;
@@ -116,7 +108,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
int reg_mcast, int unreg_mcast);
int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port);
-void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti);
+void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port);
int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
new file mode 100644
index 000000000000..a4a7ec0d2531
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -0,0 +1,719 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments Ethernet Switch Driver ethtool intf
+ *
+ * Copyright (C) 2019 Texas Instruments
+ */
+
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/kmemleak.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/pm_runtime.h>
+#include <linux/skbuff.h>
+
+#include "cpsw.h"
+#include "cpts.h"
+#include "cpsw_ale.h"
+#include "cpsw_priv.h"
+#include "davinci_cpdma.h"
+
+struct cpsw_hw_stats {
+ u32 rxgoodframes;
+ u32 rxbroadcastframes;
+ u32 rxmulticastframes;
+ u32 rxpauseframes;
+ u32 rxcrcerrors;
+ u32 rxaligncodeerrors;
+ u32 rxoversizedframes;
+ u32 rxjabberframes;
+ u32 rxundersizedframes;
+ u32 rxfragments;
+ u32 __pad_0[2];
+ u32 rxoctets;
+ u32 txgoodframes;
+ u32 txbroadcastframes;
+ u32 txmulticastframes;
+ u32 txpauseframes;
+ u32 txdeferredframes;
+ u32 txcollisionframes;
+ u32 txsinglecollframes;
+ u32 txmultcollframes;
+ u32 txexcessivecollisions;
+ u32 txlatecollisions;
+ u32 txunderrun;
+ u32 txcarriersenseerrors;
+ u32 txoctets;
+ u32 octetframes64;
+ u32 octetframes65t127;
+ u32 octetframes128t255;
+ u32 octetframes256t511;
+ u32 octetframes512t1023;
+ u32 octetframes1024tup;
+ u32 netoctets;
+ u32 rxsofoverruns;
+ u32 rxmofoverruns;
+ u32 rxdmaoverruns;
+};
+
+struct cpsw_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int type;
+ int sizeof_stat;
+ int stat_offset;
+};
+
+enum {
+ CPSW_STATS,
+ CPDMA_RX_STATS,
+ CPDMA_TX_STATS,
+};
+
+#define CPSW_STAT(m) CPSW_STATS, \
+ FIELD_SIZEOF(struct cpsw_hw_stats, m), \
+ offsetof(struct cpsw_hw_stats, m)
+#define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \
+ FIELD_SIZEOF(struct cpdma_chan_stats, m), \
+ offsetof(struct cpdma_chan_stats, m)
+#define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \
+ FIELD_SIZEOF(struct cpdma_chan_stats, m), \
+ offsetof(struct cpdma_chan_stats, m)
+
+static const struct cpsw_stats cpsw_gstrings_stats[] = {
+ { "Good Rx Frames", CPSW_STAT(rxgoodframes) },
+ { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
+ { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
+ { "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
+ { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
+ { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
+ { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
+ { "Rx Jabbers", CPSW_STAT(rxjabberframes) },
+ { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
+ { "Rx Fragments", CPSW_STAT(rxfragments) },
+ { "Rx Octets", CPSW_STAT(rxoctets) },
+ { "Good Tx Frames", CPSW_STAT(txgoodframes) },
+ { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
+ { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
+ { "Pause Tx Frames", CPSW_STAT(txpauseframes) },
+ { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
+ { "Collisions", CPSW_STAT(txcollisionframes) },
+ { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
+ { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
+ { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
+ { "Late Collisions", CPSW_STAT(txlatecollisions) },
+ { "Tx Underrun", CPSW_STAT(txunderrun) },
+ { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
+ { "Tx Octets", CPSW_STAT(txoctets) },
+ { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
+ { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
+ { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
+ { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
+ { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
+ { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
+ { "Net Octets", CPSW_STAT(netoctets) },
+ { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
+ { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
+ { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
+};
+
+static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
+ { "head_enqueue", CPDMA_RX_STAT(head_enqueue) },
+ { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
+ { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
+ { "misqueued", CPDMA_RX_STAT(misqueued) },
+ { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
+ { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
+ { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
+ { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
+ { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
+ { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
+ { "good_dequeue", CPDMA_RX_STAT(good_dequeue) },
+ { "requeue", CPDMA_RX_STAT(requeue) },
+ { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
+};
+
+#define CPSW_STATS_COMMON_LEN ARRAY_SIZE(cpsw_gstrings_stats)
+#define CPSW_STATS_CH_LEN ARRAY_SIZE(cpsw_gstrings_ch_stats)
+
+u32 cpsw_get_msglevel(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ return priv->msg_enable;
+}
+
+void cpsw_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ priv->msg_enable = value;
+}
+
+int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ coal->rx_coalesce_usecs = cpsw->coal_intvl;
+ return 0;
+}
+
+int cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ u32 int_ctrl;
+ u32 num_interrupts = 0;
+ u32 prescale = 0;
+ u32 addnl_dvdr = 1;
+ u32 coal_intvl = 0;
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ coal_intvl = coal->rx_coalesce_usecs;
+
+ int_ctrl = readl(&cpsw->wr_regs->int_control);
+ prescale = cpsw->bus_freq_mhz * 4;
+
+ if (!coal->rx_coalesce_usecs) {
+ int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
+ goto update_return;
+ }
+
+ if (coal_intvl < CPSW_CMINTMIN_INTVL)
+ coal_intvl = CPSW_CMINTMIN_INTVL;
+
+ if (coal_intvl > CPSW_CMINTMAX_INTVL) {
+ /* Interrupt pacer works with 4us Pulse, we can
+ * throttle further by dilating the 4us pulse.
+ */
+ addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
+
+ if (addnl_dvdr > 1) {
+ prescale *= addnl_dvdr;
+ if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
+ coal_intvl = (CPSW_CMINTMAX_INTVL
+ * addnl_dvdr);
+ } else {
+ addnl_dvdr = 1;
+ coal_intvl = CPSW_CMINTMAX_INTVL;
+ }
+ }
+
+ num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
+ writel(num_interrupts, &cpsw->wr_regs->rx_imax);
+ writel(num_interrupts, &cpsw->wr_regs->tx_imax);
+
+ int_ctrl |= CPSW_INTPACEEN;
+ int_ctrl &= (~CPSW_INTPRESCALE_MASK);
+ int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
+
+update_return:
+ writel(int_ctrl, &cpsw->wr_regs->int_control);
+
+ cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
+ cpsw->coal_intvl = coal_intvl;
+
+ return 0;
+}
+
+int cpsw_get_sset_count(struct net_device *ndev, int sset)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return (CPSW_STATS_COMMON_LEN +
+ (cpsw->rx_ch_num + cpsw->tx_ch_num) *
+ CPSW_STATS_CH_LEN);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
+{
+ int ch_stats_len;
+ int line;
+ int i;
+
+ ch_stats_len = CPSW_STATS_CH_LEN * ch_num;
+ for (i = 0; i < ch_stats_len; i++) {
+ line = i % CPSW_STATS_CH_LEN;
+ snprintf(*p, ETH_GSTRING_LEN,
+ "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx",
+ (long)(i / CPSW_STATS_CH_LEN),
+ cpsw_gstrings_ch_stats[line].stat_string);
+ *p += ETH_GSTRING_LEN;
+ }
+}
+
+void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) {
+ memcpy(p, cpsw_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1);
+ cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0);
+ break;
+ }
+}
+
+void cpsw_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ u8 *p;
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpdma_chan_stats ch_stats;
+ int i, l, ch;
+
+ /* Collect Davinci CPDMA stats for Rx and Tx Channel */
+ for (l = 0; l < CPSW_STATS_COMMON_LEN; l++)
+ data[l] = readl(cpsw->hw_stats +
+ cpsw_gstrings_stats[l].stat_offset);
+
+ for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+ cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats);
+ for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
+ p = (u8 *)&ch_stats +
+ cpsw_gstrings_ch_stats[i].stat_offset;
+ data[l] = *(u32 *)p;
+ }
+ }
+
+ for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
+ cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats);
+ for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
+ p = (u8 *)&ch_stats +
+ cpsw_gstrings_ch_stats[i].stat_offset;
+ data[l] = *(u32 *)p;
+ }
+ }
+}
+
+void cpsw_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ pause->autoneg = AUTONEG_DISABLE;
+ pause->rx_pause = priv->rx_pause ? true : false;
+ pause->tx_pause = priv->tx_pause ? true : false;
+}
+
+void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (cpsw->slaves[slave_no].phy)
+ phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol);
+}
+
+int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (cpsw->slaves[slave_no].phy)
+ return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol);
+ else
+ return -EOPNOTSUPP;
+}
+
+int cpsw_get_regs_len(struct net_device *ndev)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
+}
+
+void cpsw_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p)
+{
+ u32 *reg = p;
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ /* update CPSW IP version */
+ regs->version = cpsw->version;
+
+ cpsw_ale_dump(cpsw->ale, reg);
+}
+
+int cpsw_ethtool_op_begin(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret;
+
+ ret = pm_runtime_get_sync(cpsw->dev);
+ if (ret < 0) {
+ cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
+ pm_runtime_put_noidle(cpsw->dev);
+ }
+
+ return ret;
+}
+
+void cpsw_ethtool_op_complete(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = pm_runtime_put(priv->cpsw->dev);
+ if (ret < 0)
+ cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
+}
+
+void cpsw_get_channels(struct net_device *ndev, struct ethtool_channels *ch)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
+ ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
+ ch->max_combined = 0;
+ ch->max_other = 0;
+ ch->other_count = 0;
+ ch->rx_count = cpsw->rx_ch_num;
+ ch->tx_count = cpsw->tx_ch_num;
+ ch->combined_count = 0;
+}
+
+int cpsw_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *ecmd)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (!cpsw->slaves[slave_no].phy)
+ return -EOPNOTSUPP;
+
+ phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd);
+ return 0;
+}
+
+int cpsw_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *ecmd)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (!cpsw->slaves[slave_no].phy)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy, ecmd);
+}
+
+int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (cpsw->slaves[slave_no].phy)
+ return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata);
+ else
+ return -EOPNOTSUPP;
+}
+
+int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (cpsw->slaves[slave_no].phy)
+ return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
+ else
+ return -EOPNOTSUPP;
+}
+
+int cpsw_nway_reset(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (cpsw->slaves[slave_no].phy)
+ return genphy_restart_aneg(cpsw->slaves[slave_no].phy);
+ else
+ return -EOPNOTSUPP;
+}
+
+static void cpsw_suspend_data_pass(struct net_device *ndev)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpsw_slave *slave;
+ int i;
+
+ /* Disable NAPI scheduling */
+ cpsw_intr_disable(cpsw);
+
+ /* Stop all transmit queues for every network device.
+ * Disable re-using rx descriptors with dormant_on.
+ */
+ for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
+ if (!(slave->ndev && netif_running(slave->ndev)))
+ continue;
+
+ netif_tx_stop_all_queues(slave->ndev);
+ netif_dormant_on(slave->ndev);
+ }
+
+ /* Handle rest of tx packets and stop cpdma channels */
+ cpdma_ctlr_stop(cpsw->dma);
+}
+
+static int cpsw_resume_data_pass(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ int i, ret;
+
+ /* Allow rx packets handling */
+ for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
+ if (slave->ndev && netif_running(slave->ndev))
+ netif_dormant_off(slave->ndev);
+
+ /* After this receive is started */
+ if (cpsw->usage_count) {
+ ret = cpsw_fill_rx_channels(priv);
+ if (ret)
+ return ret;
+
+ cpdma_ctlr_start(cpsw->dma);
+ cpsw_intr_enable(cpsw);
+ }
+
+ /* Resume transmit for every affected interface */
+ for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
+ if (slave->ndev && netif_running(slave->ndev))
+ netif_tx_start_all_queues(slave->ndev);
+
+ return 0;
+}
+
+static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
+ struct ethtool_channels *ch)
+{
+ if (cpsw->quirk_irq) {
+ dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed");
+ return -EOPNOTSUPP;
+ }
+
+ if (ch->combined_count)
+ return -EINVAL;
+
+ /* verify we have at least one channel in each direction */
+ if (!ch->rx_count || !ch->tx_count)
+ return -EINVAL;
+
+ if (ch->rx_count > cpsw->data.channels ||
+ ch->tx_count > cpsw->data.channels)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx,
+ cpdma_handler_fn rx_handler)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ void (*handler)(void *, int, int);
+ struct netdev_queue *queue;
+ struct cpsw_vector *vec;
+ int ret, *ch, vch;
+
+ if (rx) {
+ ch = &cpsw->rx_ch_num;
+ vec = cpsw->rxv;
+ handler = rx_handler;
+ } else {
+ ch = &cpsw->tx_ch_num;
+ vec = cpsw->txv;
+ handler = cpsw_tx_handler;
+ }
+
+ while (*ch < ch_num) {
+ vch = rx ? *ch : 7 - *ch;
+ vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx);
+ queue = netdev_get_tx_queue(priv->ndev, *ch);
+ queue->tx_maxrate = 0;
+
+ if (IS_ERR(vec[*ch].ch))
+ return PTR_ERR(vec[*ch].ch);
+
+ if (!vec[*ch].ch)
+ return -EINVAL;
+
+ cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
+ (rx ? "rx" : "tx"));
+ (*ch)++;
+ }
+
+ while (*ch > ch_num) {
+ (*ch)--;
+
+ ret = cpdma_chan_destroy(vec[*ch].ch);
+ if (ret)
+ return ret;
+
+ cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
+ (rx ? "rx" : "tx"));
+ }
+
+ return 0;
+}
+
+int cpsw_set_channels_common(struct net_device *ndev,
+ struct ethtool_channels *chs,
+ cpdma_handler_fn rx_handler)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ int i, ret;
+
+ ret = cpsw_check_ch_settings(cpsw, chs);
+ if (ret < 0)
+ return ret;
+
+ cpsw_suspend_data_pass(ndev);
+
+ ret = cpsw_update_channels_res(priv, chs->rx_count, 1, rx_handler);
+ if (ret)
+ goto err;
+
+ ret = cpsw_update_channels_res(priv, chs->tx_count, 0, rx_handler);
+ if (ret)
+ goto err;
+
+ for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
+ if (!(slave->ndev && netif_running(slave->ndev)))
+ continue;
+
+ /* Inform stack about new count of queues */
+ ret = netif_set_real_num_tx_queues(slave->ndev,
+ cpsw->tx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of tx queues\n");
+ goto err;
+ }
+
+ ret = netif_set_real_num_rx_queues(slave->ndev,
+ cpsw->rx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of rx queues\n");
+ goto err;
+ }
+ }
+
+ if (cpsw->usage_count)
+ cpsw_split_res(cpsw);
+
+ ret = cpsw_resume_data_pass(ndev);
+ if (!ret)
+ return 0;
+err:
+ dev_err(priv->dev, "cannot update channels number, closing device\n");
+ dev_close(ndev);
+ return ret;
+}
+
+void cpsw_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ /* not supported */
+ ering->tx_max_pending = 0;
+ ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
+ ering->rx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES;
+ ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
+}
+
+int cpsw_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret;
+
+ /* ignore ering->tx_pending - only rx_pending adjustment is supported */
+
+ if (ering->rx_mini_pending || ering->rx_jumbo_pending ||
+ ering->rx_pending < CPSW_MAX_QUEUES ||
+ ering->rx_pending > (cpsw->descs_pool_size - CPSW_MAX_QUEUES))
+ return -EINVAL;
+
+ if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma))
+ return 0;
+
+ cpsw_suspend_data_pass(ndev);
+
+ cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
+
+ if (cpsw->usage_count)
+ cpdma_chan_split_pool(cpsw->dma);
+
+ ret = cpsw_resume_data_pass(ndev);
+ if (!ret)
+ return 0;
+
+ dev_err(cpsw->dev, "cannot set ring params, closing device\n");
+ dev_close(ndev);
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_TI_CPTS)
+int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = cpsw->cpts->phc_index;
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+ return 0;
+}
+#else
+int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+ info->tx_types = 0;
+ info->rx_filters = 0;
+ return 0;
+}
+#endif
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
new file mode 100644
index 000000000000..476d050a022c
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments Ethernet Switch Driver
+ *
+ * Copyright (C) 2019 Texas Instruments
+ */
+
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+
+#include "cpts.h"
+#include "cpsw_ale.h"
+#include "cpsw_priv.h"
+#include "cpsw_sl.h"
+#include "davinci_cpdma.h"
+
+int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
+ int ale_ageout, phys_addr_t desc_mem_phys,
+ int descs_pool_size)
+{
+ u32 slave_offset, sliver_offset, slave_size;
+ struct cpsw_ale_params ale_params;
+ struct cpsw_platform_data *data;
+ struct cpdma_params dma_params;
+ struct device *dev = cpsw->dev;
+ void __iomem *cpts_regs;
+ int ret = 0, i;
+
+ data = &cpsw->data;
+ cpsw->rx_ch_num = 1;
+ cpsw->tx_ch_num = 1;
+
+ cpsw->version = readl(&cpsw->regs->id_ver);
+
+ memset(&dma_params, 0, sizeof(dma_params));
+ memset(&ale_params, 0, sizeof(ale_params));
+
+ switch (cpsw->version) {
+ case CPSW_VERSION_1:
+ cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
+ cpts_regs = ss_regs + CPSW1_CPTS_OFFSET;
+ cpsw->hw_stats = ss_regs + CPSW1_HW_STATS;
+ dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
+ dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
+ ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
+ slave_offset = CPSW1_SLAVE_OFFSET;
+ slave_size = CPSW1_SLAVE_SIZE;
+ sliver_offset = CPSW1_SLIVER_OFFSET;
+ dma_params.desc_mem_phys = 0;
+ break;
+ case CPSW_VERSION_2:
+ case CPSW_VERSION_3:
+ case CPSW_VERSION_4:
+ cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
+ cpts_regs = ss_regs + CPSW2_CPTS_OFFSET;
+ cpsw->hw_stats = ss_regs + CPSW2_HW_STATS;
+ dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
+ dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
+ ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
+ slave_offset = CPSW2_SLAVE_OFFSET;
+ slave_size = CPSW2_SLAVE_SIZE;
+ sliver_offset = CPSW2_SLIVER_OFFSET;
+ dma_params.desc_mem_phys = desc_mem_phys;
+ break;
+ default:
+ dev_err(dev, "unknown version 0x%08x\n", cpsw->version);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave *slave = &cpsw->slaves[i];
+ void __iomem *regs = cpsw->regs;
+
+ slave->slave_num = i;
+ slave->data = &cpsw->data.slave_data[i];
+ slave->regs = regs + slave_offset;
+ slave->port_vlan = slave->data->dual_emac_res_vlan;
+ slave->mac_sl = cpsw_sl_get("cpsw", dev, regs + sliver_offset);
+ if (IS_ERR(slave->mac_sl))
+ return PTR_ERR(slave->mac_sl);
+
+ slave_offset += slave_size;
+ sliver_offset += SLIVER_SIZE;
+ }
+
+ ale_params.dev = dev;
+ ale_params.ale_ageout = ale_ageout;
+ ale_params.ale_entries = data->ale_entries;
+ ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
+
+ cpsw->ale = cpsw_ale_create(&ale_params);
+ if (!cpsw->ale) {
+ dev_err(dev, "error initializing ale engine\n");
+ return -ENODEV;
+ }
+
+ dma_params.dev = dev;
+ dma_params.rxthresh = dma_params.dmaregs + CPDMA_RXTHRESH;
+ dma_params.rxfree = dma_params.dmaregs + CPDMA_RXFREE;
+ dma_params.rxhdp = dma_params.txhdp + CPDMA_RXHDP;
+ dma_params.txcp = dma_params.txhdp + CPDMA_TXCP;
+ dma_params.rxcp = dma_params.txhdp + CPDMA_RXCP;
+
+ dma_params.num_chan = data->channels;
+ dma_params.has_soft_reset = true;
+ dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE;
+ dma_params.desc_mem_size = data->bd_ram_size;
+ dma_params.desc_align = 16;
+ dma_params.has_ext_regs = true;
+ dma_params.desc_hw_addr = dma_params.desc_mem_phys;
+ dma_params.bus_freq_mhz = cpsw->bus_freq_mhz;
+ dma_params.descs_pool_size = descs_pool_size;
+
+ cpsw->dma = cpdma_ctlr_create(&dma_params);
+ if (!cpsw->dma) {
+ dev_err(dev, "error initializing dma\n");
+ return -ENOMEM;
+ }
+
+ cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node);
+ if (IS_ERR(cpsw->cpts)) {
+ ret = PTR_ERR(cpsw->cpts);
+ cpdma_ctlr_destroy(cpsw->dma);
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
new file mode 100644
index 000000000000..04795b97ee71
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -0,0 +1,429 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Texas Instruments Ethernet Switch Driver
+ */
+
+#ifndef DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_
+#define DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_
+
+#include "davinci_cpdma.h"
+
+#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
+ NETIF_MSG_DRV | NETIF_MSG_LINK | \
+ NETIF_MSG_IFUP | NETIF_MSG_INTR | \
+ NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
+ NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
+ NETIF_MSG_RX_STATUS)
+
+#define cpsw_info(priv, type, format, ...) \
+do { \
+ if (netif_msg_##type(priv) && net_ratelimit()) \
+ dev_info(priv->dev, format, ## __VA_ARGS__); \
+} while (0)
+
+#define cpsw_err(priv, type, format, ...) \
+do { \
+ if (netif_msg_##type(priv) && net_ratelimit()) \
+ dev_err(priv->dev, format, ## __VA_ARGS__); \
+} while (0)
+
+#define cpsw_dbg(priv, type, format, ...) \
+do { \
+ if (netif_msg_##type(priv) && net_ratelimit()) \
+ dev_dbg(priv->dev, format, ## __VA_ARGS__); \
+} while (0)
+
+#define cpsw_notice(priv, type, format, ...) \
+do { \
+ if (netif_msg_##type(priv) && net_ratelimit()) \
+ dev_notice(priv->dev, format, ## __VA_ARGS__); \
+} while (0)
+
+#define ALE_ALL_PORTS 0x7
+
+#define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
+#define CPSW_MINOR_VERSION(reg) (reg & 0xff)
+#define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
+
+#define CPSW_VERSION_1 0x19010a
+#define CPSW_VERSION_2 0x19010c
+#define CPSW_VERSION_3 0x19010f
+#define CPSW_VERSION_4 0x190112
+
+#define HOST_PORT_NUM 0
+#define CPSW_ALE_PORTS_NUM 3
+#define SLIVER_SIZE 0x40
+
+#define CPSW1_HOST_PORT_OFFSET 0x028
+#define CPSW1_SLAVE_OFFSET 0x050
+#define CPSW1_SLAVE_SIZE 0x040
+#define CPSW1_CPDMA_OFFSET 0x100
+#define CPSW1_STATERAM_OFFSET 0x200
+#define CPSW1_HW_STATS 0x400
+#define CPSW1_CPTS_OFFSET 0x500
+#define CPSW1_ALE_OFFSET 0x600
+#define CPSW1_SLIVER_OFFSET 0x700
+
+#define CPSW2_HOST_PORT_OFFSET 0x108
+#define CPSW2_SLAVE_OFFSET 0x200
+#define CPSW2_SLAVE_SIZE 0x100
+#define CPSW2_CPDMA_OFFSET 0x800
+#define CPSW2_HW_STATS 0x900
+#define CPSW2_STATERAM_OFFSET 0xa00
+#define CPSW2_CPTS_OFFSET 0xc00
+#define CPSW2_ALE_OFFSET 0xd00
+#define CPSW2_SLIVER_OFFSET 0xd80
+#define CPSW2_BD_OFFSET 0x2000
+
+#define CPDMA_RXTHRESH 0x0c0
+#define CPDMA_RXFREE 0x0e0
+#define CPDMA_TXHDP 0x00
+#define CPDMA_RXHDP 0x20
+#define CPDMA_TXCP 0x40
+#define CPDMA_RXCP 0x60
+
+#define CPSW_POLL_WEIGHT 64
+#define CPSW_RX_VLAN_ENCAP_HDR_SIZE 4
+#define CPSW_MIN_PACKET_SIZE (VLAN_ETH_ZLEN)
+#define CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN +\
+ ETH_FCS_LEN +\
+ CPSW_RX_VLAN_ENCAP_HDR_SIZE)
+
+#define RX_PRIORITY_MAPPING 0x76543210
+#define TX_PRIORITY_MAPPING 0x33221100
+#define CPDMA_TX_PRIORITY_MAP 0x76543210
+
+#define CPSW_VLAN_AWARE BIT(1)
+#define CPSW_RX_VLAN_ENCAP BIT(2)
+#define CPSW_ALE_VLAN_AWARE 1
+
+#define CPSW_FIFO_NORMAL_MODE (0 << 16)
+#define CPSW_FIFO_DUAL_MAC_MODE (1 << 16)
+#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 16)
+
+#define CPSW_INTPACEEN (0x3f << 16)
+#define CPSW_INTPRESCALE_MASK (0x7FF << 0)
+#define CPSW_CMINTMAX_CNT 63
+#define CPSW_CMINTMIN_CNT 2
+#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
+#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
+
+#define IRQ_NUM 2
+#define CPSW_MAX_QUEUES 8
+#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
+#define CPSW_FIFO_QUEUE_TYPE_SHIFT 16
+#define CPSW_FIFO_SHAPE_EN_SHIFT 16
+#define CPSW_FIFO_RATE_EN_SHIFT 20
+#define CPSW_TC_NUM 4
+#define CPSW_FIFO_SHAPERS_NUM (CPSW_TC_NUM - 1)
+#define CPSW_PCT_MASK 0x7f
+
+#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29
+#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0)
+#define CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT 16
+#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT 8
+#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK GENMASK(1, 0)
+enum {
+ CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG = 0,
+ CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV,
+ CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG,
+ CPSW_RX_VLAN_ENCAP_HDR_PKT_UNTAG,
+};
+
+struct cpsw_wr_regs {
+ u32 id_ver;
+ u32 soft_reset;
+ u32 control;
+ u32 int_control;
+ u32 rx_thresh_en;
+ u32 rx_en;
+ u32 tx_en;
+ u32 misc_en;
+ u32 mem_allign1[8];
+ u32 rx_thresh_stat;
+ u32 rx_stat;
+ u32 tx_stat;
+ u32 misc_stat;
+ u32 mem_allign2[8];
+ u32 rx_imax;
+ u32 tx_imax;
+
+};
+
+struct cpsw_ss_regs {
+ u32 id_ver;
+ u32 control;
+ u32 soft_reset;
+ u32 stat_port_en;
+ u32 ptype;
+ u32 soft_idle;
+ u32 thru_rate;
+ u32 gap_thresh;
+ u32 tx_start_wds;
+ u32 flow_control;
+ u32 vlan_ltype;
+ u32 ts_ltype;
+ u32 dlr_ltype;
+};
+
+/* CPSW_PORT_V1 */
+#define CPSW1_MAX_BLKS 0x00 /* Maximum FIFO Blocks */
+#define CPSW1_BLK_CNT 0x04 /* FIFO Block Usage Count (Read Only) */
+#define CPSW1_TX_IN_CTL 0x08 /* Transmit FIFO Control */
+#define CPSW1_PORT_VLAN 0x0c /* VLAN Register */
+#define CPSW1_TX_PRI_MAP 0x10 /* Tx Header Priority to Switch Pri Mapping */
+#define CPSW1_TS_CTL 0x14 /* Time Sync Control */
+#define CPSW1_TS_SEQ_LTYPE 0x18 /* Time Sync Sequence ID Offset and Msg Type */
+#define CPSW1_TS_VLAN 0x1c /* Time Sync VLAN1 and VLAN2 */
+
+/* CPSW_PORT_V2 */
+#define CPSW2_CONTROL 0x00 /* Control Register */
+#define CPSW2_MAX_BLKS 0x08 /* Maximum FIFO Blocks */
+#define CPSW2_BLK_CNT 0x0c /* FIFO Block Usage Count (Read Only) */
+#define CPSW2_TX_IN_CTL 0x10 /* Transmit FIFO Control */
+#define CPSW2_PORT_VLAN 0x14 /* VLAN Register */
+#define CPSW2_TX_PRI_MAP 0x18 /* Tx Header Priority to Switch Pri Mapping */
+#define CPSW2_TS_SEQ_MTYPE 0x1c /* Time Sync Sequence ID Offset and Msg Type */
+
+/* CPSW_PORT_V1 and V2 */
+#define SA_LO 0x20 /* CPGMAC_SL Source Address Low */
+#define SA_HI 0x24 /* CPGMAC_SL Source Address High */
+#define SEND_PERCENT 0x28 /* Transmit Queue Send Percentages */
+
+/* CPSW_PORT_V2 only */
+#define RX_DSCP_PRI_MAP0 0x30 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP1 0x34 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP2 0x38 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP3 0x3c /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP4 0x40 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP5 0x44 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP6 0x48 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */
+
+/* Bit definitions for the CPSW2_CONTROL register */
+#define PASS_PRI_TAGGED BIT(24) /* Pass Priority Tagged */
+#define VLAN_LTYPE2_EN BIT(21) /* VLAN LTYPE 2 enable */
+#define VLAN_LTYPE1_EN BIT(20) /* VLAN LTYPE 1 enable */
+#define DSCP_PRI_EN BIT(16) /* DSCP Priority Enable */
+#define TS_107 BIT(15) /* Tyme Sync Dest IP Address 107 */
+#define TS_320 BIT(14) /* Time Sync Dest Port 320 enable */
+#define TS_319 BIT(13) /* Time Sync Dest Port 319 enable */
+#define TS_132 BIT(12) /* Time Sync Dest IP Addr 132 enable */
+#define TS_131 BIT(11) /* Time Sync Dest IP Addr 131 enable */
+#define TS_130 BIT(10) /* Time Sync Dest IP Addr 130 enable */
+#define TS_129 BIT(9) /* Time Sync Dest IP Addr 129 enable */
+#define TS_TTL_NONZERO BIT(8) /* Time Sync Time To Live Non-zero enable */
+#define TS_ANNEX_F_EN BIT(6) /* Time Sync Annex F enable */
+#define TS_ANNEX_D_EN BIT(4) /* Time Sync Annex D enable */
+#define TS_LTYPE2_EN BIT(3) /* Time Sync LTYPE 2 enable */
+#define TS_LTYPE1_EN BIT(2) /* Time Sync LTYPE 1 enable */
+#define TS_TX_EN BIT(1) /* Time Sync Transmit Enable */
+#define TS_RX_EN BIT(0) /* Time Sync Receive Enable */
+
+#define CTRL_V2_TS_BITS \
+ (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+ TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN | VLAN_LTYPE1_EN)
+
+#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN)
+#define CTRL_V2_RX_TS_BITS (CTRL_V2_TS_BITS | TS_RX_EN)
+
+
+#define CTRL_V3_TS_BITS \
+ (TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+ TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
+ TS_LTYPE1_EN | VLAN_LTYPE1_EN)
+
+#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN)
+#define CTRL_V3_RX_TS_BITS (CTRL_V3_TS_BITS | TS_RX_EN)
+
+/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
+#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */
+#define TS_SEQ_ID_OFFSET_MASK (0x3f)
+#define TS_MSG_TYPE_EN_SHIFT (0) /* Time Sync Message Type Enable */
+#define TS_MSG_TYPE_EN_MASK (0xffff)
+
+/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
+#define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
+
+/* Bit definitions for the CPSW1_TS_CTL register */
+#define CPSW_V1_TS_RX_EN BIT(0)
+#define CPSW_V1_TS_TX_EN BIT(4)
+#define CPSW_V1_MSG_TYPE_OFS 16
+
+/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
+#define CPSW_V1_SEQ_ID_OFS_SHIFT 16
+
+#define CPSW_MAX_BLKS_TX 15
+#define CPSW_MAX_BLKS_TX_SHIFT 4
+#define CPSW_MAX_BLKS_RX 5
+
+struct cpsw_host_regs {
+ u32 max_blks;
+ u32 blk_cnt;
+ u32 tx_in_ctl;
+ u32 port_vlan;
+ u32 tx_pri_map;
+ u32 cpdma_tx_pri_map;
+ u32 cpdma_rx_chan_map;
+};
+
+struct cpsw_slave_data {
+ struct device_node *phy_node;
+ char phy_id[MII_BUS_ID_SIZE];
+ int phy_if;
+ u8 mac_addr[ETH_ALEN];
+ u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
+ struct phy *ifphy;
+};
+
+struct cpsw_platform_data {
+ struct cpsw_slave_data *slave_data;
+ u32 ss_reg_ofs; /* Subsystem control register offset */
+ u32 channels; /* number of cpdma channels (symmetric) */
+ u32 slaves; /* number of slave cpgmac ports */
+ u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
+ u32 ale_entries; /* ale table size */
+ u32 bd_ram_size; /*buffer descriptor ram size */
+ u32 mac_control; /* Mac control register */
+ u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
+ bool dual_emac; /* Enable Dual EMAC mode */
+};
+
+struct cpsw_slave {
+ void __iomem *regs;
+ int slave_num;
+ u32 mac_control;
+ struct cpsw_slave_data *data;
+ struct phy_device *phy;
+ struct net_device *ndev;
+ u32 port_vlan;
+ struct cpsw_sl *mac_sl;
+};
+
+static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
+{
+ return readl_relaxed(slave->regs + offset);
+}
+
+static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
+{
+ writel_relaxed(val, slave->regs + offset);
+}
+
+struct cpsw_vector {
+ struct cpdma_chan *ch;
+ int budget;
+};
+
+struct cpsw_common {
+ struct device *dev;
+ struct cpsw_platform_data data;
+ struct napi_struct napi_rx;
+ struct napi_struct napi_tx;
+ struct cpsw_ss_regs __iomem *regs;
+ struct cpsw_wr_regs __iomem *wr_regs;
+ u8 __iomem *hw_stats;
+ struct cpsw_host_regs __iomem *host_port_regs;
+ u32 version;
+ u32 coal_intvl;
+ u32 bus_freq_mhz;
+ int rx_packet_max;
+ int descs_pool_size;
+ struct cpsw_slave *slaves;
+ struct cpdma_ctlr *dma;
+ struct cpsw_vector txv[CPSW_MAX_QUEUES];
+ struct cpsw_vector rxv[CPSW_MAX_QUEUES];
+ struct cpsw_ale *ale;
+ bool quirk_irq;
+ bool rx_irq_disabled;
+ bool tx_irq_disabled;
+ u32 irqs_table[IRQ_NUM];
+ struct cpts *cpts;
+ int rx_ch_num, tx_ch_num;
+ int speed;
+ int usage_count;
+};
+
+struct cpsw_priv {
+ struct net_device *ndev;
+ struct device *dev;
+ u32 msg_enable;
+ u8 mac_addr[ETH_ALEN];
+ bool rx_pause;
+ bool tx_pause;
+ bool mqprio_hw;
+ int fifo_bw[CPSW_TC_NUM];
+ int shp_cfg_speed;
+ int tx_ts_enabled;
+ int rx_ts_enabled;
+ u32 emac_port;
+ struct cpsw_common *cpsw;
+};
+
+#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
+#define napi_to_cpsw(napi) container_of(napi, struct cpsw_common, napi)
+
+#define cpsw_slave_index(cpsw, priv) \
+ ((cpsw->data.dual_emac) ? priv->emac_port : \
+ cpsw->data.active_slave)
+
+static inline int cpsw_get_slave_port(u32 slave_num)
+{
+ return slave_num + 1;
+}
+
+struct addr_sync_ctx {
+ struct net_device *ndev;
+ const u8 *addr; /* address to be synched */
+ int consumed; /* number of address instances */
+ int flush; /* flush flag */
+};
+
+int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
+ int ale_ageout, phys_addr_t desc_mem_phys,
+ int descs_pool_size);
+void cpsw_split_res(struct cpsw_common *cpsw);
+int cpsw_fill_rx_channels(struct cpsw_priv *priv);
+void cpsw_intr_enable(struct cpsw_common *cpsw);
+void cpsw_intr_disable(struct cpsw_common *cpsw);
+void cpsw_tx_handler(void *token, int len, int status);
+
+/* ethtool */
+u32 cpsw_get_msglevel(struct net_device *ndev);
+void cpsw_set_msglevel(struct net_device *ndev, u32 value);
+int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal);
+int cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal);
+int cpsw_get_sset_count(struct net_device *ndev, int sset);
+void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data);
+void cpsw_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data);
+void cpsw_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause);
+void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol);
+int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol);
+int cpsw_get_regs_len(struct net_device *ndev);
+void cpsw_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p);
+int cpsw_ethtool_op_begin(struct net_device *ndev);
+void cpsw_ethtool_op_complete(struct net_device *ndev);
+void cpsw_get_channels(struct net_device *ndev, struct ethtool_channels *ch);
+int cpsw_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *ecmd);
+int cpsw_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *ecmd);
+int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata);
+int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata);
+int cpsw_nway_reset(struct net_device *ndev);
+void cpsw_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering);
+int cpsw_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering);
+int cpsw_set_channels_common(struct net_device *ndev,
+ struct ethtool_channels *chs,
+ cpdma_handler_fn rx_handler);
+int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info);
+
+#endif /* DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_ */
diff --git a/drivers/net/ethernet/ti/cpsw_sl.c b/drivers/net/ethernet/ti/cpsw_sl.c
new file mode 100644
index 000000000000..0c7531cb0f39
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_sl.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments Ethernet Switch media-access-controller (MAC) submodule/
+ * Ethernet MAC Sliver (CPGMAC_SL)
+ *
+ * Copyright (C) 2019 Texas Instruments
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+
+#include "cpsw_sl.h"
+
+#define CPSW_SL_REG_NOTUSED U16_MAX
+
+static const u16 cpsw_sl_reg_map_cpsw[] = {
+ [CPSW_SL_IDVER] = 0x00,
+ [CPSW_SL_MACCONTROL] = 0x04,
+ [CPSW_SL_MACSTATUS] = 0x08,
+ [CPSW_SL_SOFT_RESET] = 0x0c,
+ [CPSW_SL_RX_MAXLEN] = 0x10,
+ [CPSW_SL_BOFFTEST] = 0x14,
+ [CPSW_SL_RX_PAUSE] = 0x18,
+ [CPSW_SL_TX_PAUSE] = 0x1c,
+ [CPSW_SL_EMCONTROL] = 0x20,
+ [CPSW_SL_RX_PRI_MAP] = 0x24,
+ [CPSW_SL_TX_GAP] = 0x28,
+};
+
+static const u16 cpsw_sl_reg_map_66ak2hk[] = {
+ [CPSW_SL_IDVER] = 0x00,
+ [CPSW_SL_MACCONTROL] = 0x04,
+ [CPSW_SL_MACSTATUS] = 0x08,
+ [CPSW_SL_SOFT_RESET] = 0x0c,
+ [CPSW_SL_RX_MAXLEN] = 0x10,
+ [CPSW_SL_BOFFTEST] = CPSW_SL_REG_NOTUSED,
+ [CPSW_SL_RX_PAUSE] = 0x18,
+ [CPSW_SL_TX_PAUSE] = 0x1c,
+ [CPSW_SL_EMCONTROL] = 0x20,
+ [CPSW_SL_RX_PRI_MAP] = 0x24,
+ [CPSW_SL_TX_GAP] = CPSW_SL_REG_NOTUSED,
+};
+
+static const u16 cpsw_sl_reg_map_66ak2x_xgbe[] = {
+ [CPSW_SL_IDVER] = 0x00,
+ [CPSW_SL_MACCONTROL] = 0x04,
+ [CPSW_SL_MACSTATUS] = 0x08,
+ [CPSW_SL_SOFT_RESET] = 0x0c,
+ [CPSW_SL_RX_MAXLEN] = 0x10,
+ [CPSW_SL_BOFFTEST] = CPSW_SL_REG_NOTUSED,
+ [CPSW_SL_RX_PAUSE] = 0x18,
+ [CPSW_SL_TX_PAUSE] = 0x1c,
+ [CPSW_SL_EMCONTROL] = 0x20,
+ [CPSW_SL_RX_PRI_MAP] = CPSW_SL_REG_NOTUSED,
+ [CPSW_SL_TX_GAP] = 0x28,
+};
+
+static const u16 cpsw_sl_reg_map_66ak2elg_am65[] = {
+ [CPSW_SL_IDVER] = CPSW_SL_REG_NOTUSED,
+ [CPSW_SL_MACCONTROL] = 0x00,
+ [CPSW_SL_MACSTATUS] = 0x04,
+ [CPSW_SL_SOFT_RESET] = 0x08,
+ [CPSW_SL_RX_MAXLEN] = CPSW_SL_REG_NOTUSED,
+ [CPSW_SL_BOFFTEST] = 0x0c,
+ [CPSW_SL_RX_PAUSE] = 0x10,
+ [CPSW_SL_TX_PAUSE] = 0x40,
+ [CPSW_SL_EMCONTROL] = 0x70,
+ [CPSW_SL_RX_PRI_MAP] = CPSW_SL_REG_NOTUSED,
+ [CPSW_SL_TX_GAP] = 0x74,
+};
+
+#define CPSW_SL_SOFT_RESET_BIT BIT(0)
+
+#define CPSW_SL_STATUS_PN_IDLE BIT(31)
+#define CPSW_SL_AM65_STATUS_PN_E_IDLE BIT(30)
+#define CPSW_SL_AM65_STATUS_PN_P_IDLE BIT(29)
+#define CPSW_SL_AM65_STATUS_PN_TX_IDLE BIT(28)
+
+#define CPSW_SL_STATUS_IDLE_MASK_BASE (CPSW_SL_STATUS_PN_IDLE)
+
+#define CPSW_SL_STATUS_IDLE_MASK_K3 \
+ (CPSW_SL_STATUS_IDLE_MASK_BASE | CPSW_SL_AM65_STATUS_PN_E_IDLE | \
+ CPSW_SL_AM65_STATUS_PN_P_IDLE | CPSW_SL_AM65_STATUS_PN_TX_IDLE)
+
+#define CPSW_SL_CTL_FUNC_BASE \
+ (CPSW_SL_CTL_FULLDUPLEX |\
+ CPSW_SL_CTL_LOOPBACK |\
+ CPSW_SL_CTL_RX_FLOW_EN |\
+ CPSW_SL_CTL_TX_FLOW_EN |\
+ CPSW_SL_CTL_GMII_EN |\
+ CPSW_SL_CTL_TX_PACE |\
+ CPSW_SL_CTL_GIG |\
+ CPSW_SL_CTL_CMD_IDLE |\
+ CPSW_SL_CTL_IFCTL_A |\
+ CPSW_SL_CTL_IFCTL_B |\
+ CPSW_SL_CTL_GIG_FORCE |\
+ CPSW_SL_CTL_EXT_EN |\
+ CPSW_SL_CTL_RX_CEF_EN |\
+ CPSW_SL_CTL_RX_CSF_EN |\
+ CPSW_SL_CTL_RX_CMF_EN)
+
+struct cpsw_sl {
+ struct device *dev;
+ void __iomem *sl_base;
+ const u16 *regs;
+ u32 control_features;
+ u32 idle_mask;
+};
+
+struct cpsw_sl_dev_id {
+ const char *device_id;
+ const u16 *regs;
+ const u32 control_features;
+ const u32 regs_offset;
+ const u32 idle_mask;
+};
+
+static const struct cpsw_sl_dev_id cpsw_sl_id_match[] = {
+ {
+ .device_id = "cpsw",
+ .regs = cpsw_sl_reg_map_cpsw,
+ .control_features = CPSW_SL_CTL_FUNC_BASE |
+ CPSW_SL_CTL_MTEST |
+ CPSW_SL_CTL_TX_SHORT_GAP_EN |
+ CPSW_SL_CTL_TX_SG_LIM_EN,
+ .idle_mask = CPSW_SL_STATUS_IDLE_MASK_BASE,
+ },
+ {
+ .device_id = "66ak2hk",
+ .regs = cpsw_sl_reg_map_66ak2hk,
+ .control_features = CPSW_SL_CTL_FUNC_BASE |
+ CPSW_SL_CTL_TX_SHORT_GAP_EN,
+ .idle_mask = CPSW_SL_STATUS_IDLE_MASK_BASE,
+ },
+ {
+ .device_id = "66ak2x_xgbe",
+ .regs = cpsw_sl_reg_map_66ak2x_xgbe,
+ .control_features = CPSW_SL_CTL_FUNC_BASE |
+ CPSW_SL_CTL_XGIG |
+ CPSW_SL_CTL_TX_SHORT_GAP_EN |
+ CPSW_SL_CTL_CRC_TYPE |
+ CPSW_SL_CTL_XGMII_EN,
+ .idle_mask = CPSW_SL_STATUS_IDLE_MASK_BASE,
+ },
+ {
+ .device_id = "66ak2el",
+ .regs = cpsw_sl_reg_map_66ak2elg_am65,
+ .regs_offset = 0x330,
+ .control_features = CPSW_SL_CTL_FUNC_BASE |
+ CPSW_SL_CTL_MTEST |
+ CPSW_SL_CTL_TX_SHORT_GAP_EN |
+ CPSW_SL_CTL_CRC_TYPE |
+ CPSW_SL_CTL_EXT_EN_RX_FLO |
+ CPSW_SL_CTL_EXT_EN_TX_FLO |
+ CPSW_SL_CTL_TX_SG_LIM_EN,
+ .idle_mask = CPSW_SL_STATUS_IDLE_MASK_BASE,
+ },
+ {
+ .device_id = "66ak2g",
+ .regs = cpsw_sl_reg_map_66ak2elg_am65,
+ .regs_offset = 0x330,
+ .control_features = CPSW_SL_CTL_FUNC_BASE |
+ CPSW_SL_CTL_MTEST |
+ CPSW_SL_CTL_CRC_TYPE |
+ CPSW_SL_CTL_EXT_EN_RX_FLO |
+ CPSW_SL_CTL_EXT_EN_TX_FLO,
+ },
+ {
+ .device_id = "am65",
+ .regs = cpsw_sl_reg_map_66ak2elg_am65,
+ .regs_offset = 0x330,
+ .control_features = CPSW_SL_CTL_FUNC_BASE |
+ CPSW_SL_CTL_MTEST |
+ CPSW_SL_CTL_XGIG |
+ CPSW_SL_CTL_TX_SHORT_GAP_EN |
+ CPSW_SL_CTL_CRC_TYPE |
+ CPSW_SL_CTL_XGMII_EN |
+ CPSW_SL_CTL_EXT_EN_RX_FLO |
+ CPSW_SL_CTL_EXT_EN_TX_FLO |
+ CPSW_SL_CTL_TX_SG_LIM_EN |
+ CPSW_SL_CTL_EXT_EN_XGIG,
+ .idle_mask = CPSW_SL_STATUS_IDLE_MASK_K3,
+ },
+ { },
+};
+
+u32 cpsw_sl_reg_read(struct cpsw_sl *sl, enum cpsw_sl_regs reg)
+{
+ int val;
+
+ if (sl->regs[reg] == CPSW_SL_REG_NOTUSED) {
+ dev_err(sl->dev, "cpsw_sl: not sup r reg: %04X\n",
+ sl->regs[reg]);
+ return 0;
+ }
+
+ val = readl(sl->sl_base + sl->regs[reg]);
+ dev_dbg(sl->dev, "cpsw_sl: reg: %04X r 0x%08X\n", sl->regs[reg], val);
+ return val;
+}
+
+void cpsw_sl_reg_write(struct cpsw_sl *sl, enum cpsw_sl_regs reg, u32 val)
+{
+ if (sl->regs[reg] == CPSW_SL_REG_NOTUSED) {
+ dev_err(sl->dev, "cpsw_sl: not sup w reg: %04X\n",
+ sl->regs[reg]);
+ return;
+ }
+
+ dev_dbg(sl->dev, "cpsw_sl: reg: %04X w 0x%08X\n", sl->regs[reg], val);
+ writel(val, sl->sl_base + sl->regs[reg]);
+}
+
+static const struct cpsw_sl_dev_id *cpsw_sl_match_id(
+ const struct cpsw_sl_dev_id *id,
+ const char *device_id)
+{
+ if (!id || !device_id)
+ return NULL;
+
+ while (id->device_id) {
+ if (strcmp(device_id, id->device_id) == 0)
+ return id;
+ id++;
+ }
+ return NULL;
+}
+
+struct cpsw_sl *cpsw_sl_get(const char *device_id, struct device *dev,
+ void __iomem *sl_base)
+{
+ const struct cpsw_sl_dev_id *sl_dev_id;
+ struct cpsw_sl *sl;
+
+ sl = devm_kzalloc(dev, sizeof(struct cpsw_sl), GFP_KERNEL);
+ if (!sl)
+ return ERR_PTR(-ENOMEM);
+ sl->dev = dev;
+ sl->sl_base = sl_base;
+
+ sl_dev_id = cpsw_sl_match_id(cpsw_sl_id_match, device_id);
+ if (!sl_dev_id) {
+ dev_err(sl->dev, "cpsw_sl: dev_id %s not found.\n", device_id);
+ return ERR_PTR(-EINVAL);
+ }
+ sl->regs = sl_dev_id->regs;
+ sl->control_features = sl_dev_id->control_features;
+ sl->idle_mask = sl_dev_id->idle_mask;
+ sl->sl_base += sl_dev_id->regs_offset;
+
+ return sl;
+}
+
+void cpsw_sl_reset(struct cpsw_sl *sl, unsigned long tmo)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(tmo);
+
+ /* Set the soft reset bit */
+ cpsw_sl_reg_write(sl, CPSW_SL_SOFT_RESET, CPSW_SL_SOFT_RESET_BIT);
+
+ /* Wait for the bit to clear */
+ do {
+ usleep_range(100, 200);
+ } while ((cpsw_sl_reg_read(sl, CPSW_SL_SOFT_RESET) &
+ CPSW_SL_SOFT_RESET_BIT) &&
+ time_after(timeout, jiffies));
+
+ if (cpsw_sl_reg_read(sl, CPSW_SL_SOFT_RESET) & CPSW_SL_SOFT_RESET_BIT)
+ dev_err(sl->dev, "cpsw_sl failed to soft-reset.\n");
+}
+
+u32 cpsw_sl_ctl_set(struct cpsw_sl *sl, u32 ctl_funcs)
+{
+ u32 val;
+
+ if (ctl_funcs & ~sl->control_features) {
+ dev_err(sl->dev, "cpsw_sl: unsupported func 0x%08X\n",
+ ctl_funcs & (~sl->control_features));
+ return -EINVAL;
+ }
+
+ val = cpsw_sl_reg_read(sl, CPSW_SL_MACCONTROL);
+ val |= ctl_funcs;
+ cpsw_sl_reg_write(sl, CPSW_SL_MACCONTROL, val);
+
+ return 0;
+}
+
+u32 cpsw_sl_ctl_clr(struct cpsw_sl *sl, u32 ctl_funcs)
+{
+ u32 val;
+
+ if (ctl_funcs & ~sl->control_features) {
+ dev_err(sl->dev, "cpsw_sl: unsupported func 0x%08X\n",
+ ctl_funcs & (~sl->control_features));
+ return -EINVAL;
+ }
+
+ val = cpsw_sl_reg_read(sl, CPSW_SL_MACCONTROL);
+ val &= ~ctl_funcs;
+ cpsw_sl_reg_write(sl, CPSW_SL_MACCONTROL, val);
+
+ return 0;
+}
+
+void cpsw_sl_ctl_reset(struct cpsw_sl *sl)
+{
+ cpsw_sl_reg_write(sl, CPSW_SL_MACCONTROL, 0);
+}
+
+int cpsw_sl_wait_for_idle(struct cpsw_sl *sl, unsigned long tmo)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(tmo);
+
+ do {
+ usleep_range(100, 200);
+ } while (!(cpsw_sl_reg_read(sl, CPSW_SL_MACSTATUS) &
+ sl->idle_mask) && time_after(timeout, jiffies));
+
+ if (!(cpsw_sl_reg_read(sl, CPSW_SL_MACSTATUS) & sl->idle_mask)) {
+ dev_err(sl->dev, "cpsw_sl failed to soft-reset.\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/ti/cpsw_sl.h b/drivers/net/ethernet/ti/cpsw_sl.h
new file mode 100644
index 000000000000..a6d06a5a420f
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_sl.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Texas Instruments Ethernet Switch media-access-controller (MAC) submodule/
+ * Ethernet MAC Sliver (CPGMAC_SL) APIs
+ *
+ * Copyright (C) 2019 Texas Instruments
+ *
+ */
+
+#ifndef __TI_CPSW_SL_H__
+#define __TI_CPSW_SL_H__
+
+#include <linux/device.h>
+
+enum cpsw_sl_regs {
+ CPSW_SL_IDVER,
+ CPSW_SL_MACCONTROL,
+ CPSW_SL_MACSTATUS,
+ CPSW_SL_SOFT_RESET,
+ CPSW_SL_RX_MAXLEN,
+ CPSW_SL_BOFFTEST,
+ CPSW_SL_RX_PAUSE,
+ CPSW_SL_TX_PAUSE,
+ CPSW_SL_EMCONTROL,
+ CPSW_SL_RX_PRI_MAP,
+ CPSW_SL_TX_GAP,
+};
+
+enum {
+ CPSW_SL_CTL_FULLDUPLEX = BIT(0), /* Full Duplex mode */
+ CPSW_SL_CTL_LOOPBACK = BIT(1), /* Loop Back Mode */
+ CPSW_SL_CTL_MTEST = BIT(2), /* Manufacturing Test mode */
+ CPSW_SL_CTL_RX_FLOW_EN = BIT(3), /* Receive Flow Control Enable */
+ CPSW_SL_CTL_TX_FLOW_EN = BIT(4), /* Transmit Flow Control Enable */
+ CPSW_SL_CTL_GMII_EN = BIT(5), /* GMII Enable */
+ CPSW_SL_CTL_TX_PACE = BIT(6), /* Transmit Pacing Enable */
+ CPSW_SL_CTL_GIG = BIT(7), /* Gigabit Mode */
+ CPSW_SL_CTL_XGIG = BIT(8), /* 10 Gigabit Mode */
+ CPSW_SL_CTL_TX_SHORT_GAP_EN = BIT(10), /* Transmit Short Gap Enable */
+ CPSW_SL_CTL_CMD_IDLE = BIT(11), /* Command Idle */
+ CPSW_SL_CTL_CRC_TYPE = BIT(12), /* Port CRC Type */
+ CPSW_SL_CTL_XGMII_EN = BIT(13), /* XGMII Enable */
+ CPSW_SL_CTL_IFCTL_A = BIT(15), /* Interface Control A */
+ CPSW_SL_CTL_IFCTL_B = BIT(16), /* Interface Control B */
+ CPSW_SL_CTL_GIG_FORCE = BIT(17), /* Gigabit Mode Force */
+ CPSW_SL_CTL_EXT_EN = BIT(18), /* External Control Enable */
+ CPSW_SL_CTL_EXT_EN_RX_FLO = BIT(19), /* Ext RX Flow Control Enable */
+ CPSW_SL_CTL_EXT_EN_TX_FLO = BIT(20), /* Ext TX Flow Control Enable */
+ CPSW_SL_CTL_TX_SG_LIM_EN = BIT(21), /* TXt Short Gap Limit Enable */
+ CPSW_SL_CTL_RX_CEF_EN = BIT(22), /* RX Copy Error Frames Enable */
+ CPSW_SL_CTL_RX_CSF_EN = BIT(23), /* RX Copy Short Frames Enable */
+ CPSW_SL_CTL_RX_CMF_EN = BIT(24), /* RX Copy MAC Control Frames Enable */
+ CPSW_SL_CTL_EXT_EN_XGIG = BIT(25), /* Ext XGIG Control En, k3 only */
+
+ CPSW_SL_CTL_FUNCS_COUNT
+};
+
+struct cpsw_sl;
+
+struct cpsw_sl *cpsw_sl_get(const char *device_id, struct device *dev,
+ void __iomem *sl_base);
+
+void cpsw_sl_reset(struct cpsw_sl *sl, unsigned long tmo);
+
+u32 cpsw_sl_ctl_set(struct cpsw_sl *sl, u32 ctl_funcs);
+u32 cpsw_sl_ctl_clr(struct cpsw_sl *sl, u32 ctl_funcs);
+void cpsw_sl_ctl_reset(struct cpsw_sl *sl);
+int cpsw_sl_wait_for_idle(struct cpsw_sl *sl, unsigned long tmo);
+
+u32 cpsw_sl_reg_read(struct cpsw_sl *sl, enum cpsw_sl_regs reg);
+void cpsw_sl_reg_write(struct cpsw_sl *sl, enum cpsw_sl_regs reg, u32 val);
+
+#endif /* __TI_CPSW_SL_H__ */
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 2a9ba4acd7fa..e257018ada71 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -1,21 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* TI Common Platform Time Sync
*
* Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/err.h>
#include <linux/if.h>
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index d2c7decd59b6..024aab6af12f 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -1,21 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* TI Common Platform Time Sync
*
* Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _TI_CPTS_H_
#define _TI_CPTS_H_
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 4236dcdd5634..35bf14d8e7af 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -1,16 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Texas Instruments CPDMA Driver
*
* Copyright (C) 2010 Texas Instruments
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/spinlock.h>
@@ -527,7 +520,6 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
ctlr->num_chan = CPDMA_MAX_CHANNELS;
return ctlr;
}
-EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
{
@@ -588,7 +580,6 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
spin_unlock_irqrestore(&ctlr->lock, flags);
return 0;
}
-EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
{
@@ -621,7 +612,6 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
spin_unlock_irqrestore(&ctlr->lock, flags);
return 0;
}
-EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
{
@@ -639,7 +629,6 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
cpdma_desc_pool_destroy(ctlr);
return ret;
}
-EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
{
@@ -660,25 +649,21 @@ int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
spin_unlock_irqrestore(&ctlr->lock, flags);
return 0;
}
-EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl);
void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
{
dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
}
-EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr)
{
return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED);
}
-EXPORT_SYMBOL_GPL(cpdma_ctrl_rxchs_state);
u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr)
{
return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED);
}
-EXPORT_SYMBOL_GPL(cpdma_ctrl_txchs_state);
static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
int rx, int desc_num,
@@ -774,7 +759,6 @@ int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
return 0;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_split_pool);
/* cpdma_chan_set_weight - set weight of a channel in percentage.
@@ -807,7 +791,6 @@ int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight)
spin_unlock_irqrestore(&ctlr->lock, flags);
return ret;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_set_weight);
/* cpdma_chan_get_min_rate - get minimum allowed rate for channel
* Should be called before cpdma_chan_set_rate.
@@ -822,7 +805,6 @@ u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr)
return DIV_ROUND_UP(divident, divisor);
}
-EXPORT_SYMBOL_GPL(cpdma_chan_get_min_rate);
/* cpdma_chan_set_rate - limits bandwidth for transmit channel.
* The bandwidth * limited channels have to be in order beginning from lowest.
@@ -867,7 +849,6 @@ err:
spin_unlock_irqrestore(&ctlr->lock, flags);
return ret;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_set_rate);
u32 cpdma_chan_get_rate(struct cpdma_chan *ch)
{
@@ -880,7 +861,6 @@ u32 cpdma_chan_get_rate(struct cpdma_chan *ch)
return rate;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_get_rate);
struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
cpdma_handler_fn handler, int rx_type)
@@ -940,7 +920,6 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
spin_unlock_irqrestore(&ctlr->lock, flags);
return chan;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_create);
int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
{
@@ -953,7 +932,6 @@ int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
return desc_num;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num);
int cpdma_chan_destroy(struct cpdma_chan *chan)
{
@@ -975,7 +953,6 @@ int cpdma_chan_destroy(struct cpdma_chan *chan)
spin_unlock_irqrestore(&ctlr->lock, flags);
return 0;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
int cpdma_chan_get_stats(struct cpdma_chan *chan,
struct cpdma_chan_stats *stats)
@@ -988,7 +965,6 @@ int cpdma_chan_get_stats(struct cpdma_chan *chan,
spin_unlock_irqrestore(&chan->lock, flags);
return 0;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
static void __cpdma_chan_submit(struct cpdma_chan *chan,
struct cpdma_desc __iomem *desc)
@@ -1095,7 +1071,6 @@ unlock_ret:
spin_unlock_irqrestore(&chan->lock, flags);
return ret;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_submit);
bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
{
@@ -1110,7 +1085,6 @@ bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
spin_unlock_irqrestore(&chan->lock, flags);
return free_tx_desc;
}
-EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
static void __cpdma_chan_free(struct cpdma_chan *chan,
struct cpdma_desc __iomem *desc,
@@ -1204,7 +1178,6 @@ int cpdma_chan_process(struct cpdma_chan *chan, int quota)
}
return used;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_process);
int cpdma_chan_start(struct cpdma_chan *chan)
{
@@ -1224,7 +1197,6 @@ int cpdma_chan_start(struct cpdma_chan *chan)
return 0;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_start);
int cpdma_chan_stop(struct cpdma_chan *chan)
{
@@ -1287,7 +1259,6 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
spin_unlock_irqrestore(&chan->lock, flags);
return 0;
}
-EXPORT_SYMBOL_GPL(cpdma_chan_stop);
int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
{
@@ -1329,25 +1300,19 @@ int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
return ret;
}
-EXPORT_SYMBOL_GPL(cpdma_control_set);
int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr)
{
return ctlr->num_rx_desc;
}
-EXPORT_SYMBOL_GPL(cpdma_get_num_rx_descs);
int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr)
{
return ctlr->num_tx_desc;
}
-EXPORT_SYMBOL_GPL(cpdma_get_num_tx_descs);
void cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
{
ctlr->num_rx_desc = num_rx_desc;
ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
}
-EXPORT_SYMBOL_GPL(cpdma_set_num_rx_descs);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index d399af5389b8..10376062dafa 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -1,16 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Texas Instruments CPDMA Driver
*
* Copyright (C) 2010 Texas Instruments
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __DAVINCI_CPDMA_H__
#define __DAVINCI_CPDMA_H__
@@ -34,8 +27,8 @@ struct cpdma_params {
int num_chan;
bool has_soft_reset;
int min_packet_size;
- u32 desc_mem_phys;
- u32 desc_hw_addr;
+ dma_addr_t desc_mem_phys;
+ dma_addr_t desc_hw_addr;
int desc_mem_size;
int desc_align;
u32 bus_freq_mhz;
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 57450b174fc4..4bf65cab79e6 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* DaVinci Ethernet Medium Access Controller
*
@@ -6,21 +7,6 @@
* Copyright (C) 2009 Texas Instruments.
*
* ---------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- * ---------------------------------------------------------------------------
* History:
* 0-5 A number of folks worked on this driver in bits and pieces but the major
* contribution came from Suraj Iyer and Anant Gole
@@ -1714,7 +1700,7 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
if (!is_valid_ether_addr(pdata->mac_addr)) {
mac_addr = of_get_mac_address(np);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
ether_addr_copy(pdata->mac_addr, mac_addr);
}
@@ -1912,15 +1898,11 @@ static int davinci_emac_probe(struct platform_device *pdev)
ether_addr_copy(ndev->dev_addr, priv->mac_addr);
if (!is_valid_ether_addr(priv->mac_addr)) {
- /* Try nvmem if MAC wasn't passed over pdata or DT. */
- rc = nvmem_get_mac_address(&pdev->dev, priv->mac_addr);
- if (rc) {
- /* Use random MAC if still none obtained. */
- eth_hw_addr_random(ndev);
- memcpy(priv->mac_addr, ndev->dev_addr, ndev->addr_len);
- dev_warn(&pdev->dev, "using random MAC addr: %pM\n",
- priv->mac_addr);
- }
+ /* Use random MAC if still none obtained. */
+ eth_hw_addr_random(ndev);
+ memcpy(priv->mac_addr, ndev->dev_addr, ndev->addr_len);
+ dev_warn(&pdev->dev, "using random MAC addr: %pM\n",
+ priv->mac_addr);
}
ndev->netdev_ops = &emac_netdev_ops;
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index a98aedae1b41..38b7f6d35759 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* DaVinci MDIO Module driver
*
@@ -7,22 +8,6 @@
*
* Copyright (C) 2009 Texas Instruments.
*
- * ---------------------------------------------------------------------------
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- * ---------------------------------------------------------------------------
*/
#include <linux/module.h>
#include <linux/kernel.h>
@@ -140,7 +125,7 @@ static void davinci_mdio_init_clk(struct davinci_mdio_data *data)
static void davinci_mdio_enable(struct davinci_mdio_data *data)
{
/* set enable and clock divider */
- __raw_writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
+ writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
}
static int davinci_mdio_reset(struct mii_bus *bus)
@@ -159,7 +144,7 @@ static int davinci_mdio_reset(struct mii_bus *bus)
msleep(PHY_MAX_ADDR * data->access_time);
/* dump hardware version info */
- ver = __raw_readl(&data->regs->version);
+ ver = readl(&data->regs->version);
dev_info(data->dev,
"davinci mdio revision %d.%d, bus freq %ld\n",
(ver >> 8) & 0xff, ver & 0xff,
@@ -169,7 +154,7 @@ static int davinci_mdio_reset(struct mii_bus *bus)
goto done;
/* get phy mask from the alive register */
- phy_mask = __raw_readl(&data->regs->alive);
+ phy_mask = readl(&data->regs->alive);
if (phy_mask) {
/* restrict mdio bus to live phys only */
dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
@@ -196,11 +181,11 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
u32 reg;
while (time_after(timeout, jiffies)) {
- reg = __raw_readl(&regs->user[0].access);
+ reg = readl(&regs->user[0].access);
if ((reg & USERACCESS_GO) == 0)
return 0;
- reg = __raw_readl(&regs->control);
+ reg = readl(&regs->control);
if ((reg & CONTROL_IDLE) == 0) {
usleep_range(100, 200);
continue;
@@ -216,7 +201,7 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
return -EAGAIN;
}
- reg = __raw_readl(&regs->user[0].access);
+ reg = readl(&regs->user[0].access);
if ((reg & USERACCESS_GO) == 0)
return 0;
@@ -263,7 +248,7 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
if (ret < 0)
break;
- __raw_writel(reg, &data->regs->user[0].access);
+ writel(reg, &data->regs->user[0].access);
ret = wait_for_user_access(data);
if (ret == -EAGAIN)
@@ -271,7 +256,7 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
if (ret < 0)
break;
- reg = __raw_readl(&data->regs->user[0].access);
+ reg = readl(&data->regs->user[0].access);
ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO;
break;
}
@@ -307,7 +292,7 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
if (ret < 0)
break;
- __raw_writel(reg, &data->regs->user[0].access);
+ writel(reg, &data->regs->user[0].access);
ret = wait_for_user_access(data);
if (ret == -EAGAIN)
@@ -412,9 +397,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
data->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(data->regs))
- return PTR_ERR(data->regs);
+ data->regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (!data->regs)
+ return -ENOMEM;
davinci_mdio_init_clk(data);
@@ -472,9 +457,9 @@ static int davinci_mdio_runtime_suspend(struct device *dev)
u32 ctrl;
/* shutdown the scan state machine */
- ctrl = __raw_readl(&data->regs->control);
+ ctrl = readl(&data->regs->control);
ctrl &= ~CONTROL_ENABLE;
- __raw_writel(ctrl, &data->regs->control);
+ writel(ctrl, &data->regs->control);
wait_for_idle(data);
return 0;
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
index c4ffdf47bad5..43d5cd59b56b 100644
--- a/drivers/net/ethernet/ti/netcp.h
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* NetCP driver local header
*
@@ -8,15 +9,6 @@
* Santosh Shilimkar <santosh.shilimkar@ti.com>
* Wingman Kwok <w-kwok2@ti.com>
* Murali Karicheri <m-karicheri2@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __NETCP_H__
#define __NETCP_H__
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index d847f672a705..642843945031 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Keystone NetCP Core driver
*
@@ -8,15 +9,6 @@
* Santosh Shilimkar <santosh.shilimkar@ti.com>
* Murali Karicheri <m-karicheri2@ti.com>
* Wingman Kwok <w-kwok2@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/io.h>
@@ -2045,7 +2037,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
devm_release_mem_region(dev, res.start, size);
} else {
mac_addr = of_get_mac_address(node_interface);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
ether_addr_copy(ndev->dev_addr, mac_addr);
else
eth_random_addr(ndev->dev_addr);
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 0a920c5936b2..ec179700c184 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Keystone GBE and XGBE subsystem code
*
@@ -7,15 +8,6 @@
* Cyril Chemparathy <cyril@ti.com>
* Santosh Shilimkar <santosh.shilimkar@ti.com>
* Wingman Kwok <w-kwok2@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/io.h>
diff --git a/drivers/net/ethernet/ti/netcp_sgmii.c b/drivers/net/ethernet/ti/netcp_sgmii.c
index 5d8419f658d0..f7cf56d6351d 100644
--- a/drivers/net/ethernet/ti/netcp_sgmii.c
+++ b/drivers/net/ethernet/ti/netcp_sgmii.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SGMI module initialisation
*
@@ -6,14 +7,6 @@
* Sandeep Paulraj <s-paulraj@ti.com>
* Wingman Kwok <w-kwok2@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "netcp.h"
diff --git a/drivers/net/ethernet/ti/netcp_xgbepcsr.c b/drivers/net/ethernet/ti/netcp_xgbepcsr.c
index 33571acc52b6..112778aedd8a 100644
--- a/drivers/net/ethernet/ti/netcp_xgbepcsr.c
+++ b/drivers/net/ethernet/ti/netcp_xgbepcsr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* XGE PCSR module initialisation
*
@@ -5,14 +6,6 @@
* Authors: Sandeep Nair <sandeep_n@ti.com>
* WingMan Kwok <w-kwok2@ti.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include "netcp.h"
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 1713c2d2dccf..8788953eaafd 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -1158,7 +1158,7 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops,
INIT_WORK(&priv->setrx_work, w5100_setrx_work);
INIT_WORK(&priv->restart_work, w5100_restart_work);
- if (mac_addr)
+ if (!IS_ERR_OR_NULL(mac_addr))
memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
else
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index da4ec575ccf9..db448fad621b 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_XILINX
bool "Xilinx devices"
default y
- depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS
+ depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || X86 || COMPILE_TEST
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -33,8 +33,7 @@ config XILINX_AXI_EMAC
config XILINX_LL_TEMAC
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
- depends on (PPC || MICROBLAZE)
- depends on !64BIT || BROKEN
+ depends on PPC || MICROBLAZE || X86 || COMPILE_TEST
select PHYLIB
---help---
This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
index 107575225383..1aeda084b8f1 100644
--- a/drivers/net/ethernet/xilinx/ll_temac.h
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
@@ -334,6 +334,9 @@ struct temac_local {
/* Connection to PHY device */
struct device_node *phy_node;
+ /* For non-device-tree devices */
+ char phy_name[MII_BUS_ID_SIZE + 3];
+ phy_interface_t phy_interface;
/* MDIO bus data */
struct mii_bus *mii_bus; /* MII bus reference */
@@ -344,8 +347,10 @@ struct temac_local {
#ifdef CONFIG_PPC_DCR
dcr_host_t sdma_dcrs;
#endif
- u32 (*dma_in)(struct temac_local *, int);
- void (*dma_out)(struct temac_local *, int, u32);
+ u32 (*temac_ior)(struct temac_local *lp, int offset);
+ void (*temac_iow)(struct temac_local *lp, int offset, u32 value);
+ u32 (*dma_in)(struct temac_local *lp, int reg);
+ void (*dma_out)(struct temac_local *lp, int reg, u32 value);
int tx_irq;
int rx_irq;
@@ -353,7 +358,10 @@ struct temac_local {
struct sk_buff **rx_skb;
spinlock_t rx_lock;
- struct mutex indirect_mutex;
+ /* For synchronization of indirect register access. Must be
+ * shared mutex between interfaces in same TEMAC block.
+ */
+ struct mutex *indirect_mutex;
u32 options; /* Current options word */
int last_link;
unsigned int temac_features;
@@ -367,18 +375,24 @@ struct temac_local {
int tx_bd_next;
int tx_bd_tail;
int rx_bd_ci;
+
+ /* DMA channel control setup */
+ u32 tx_chnl_ctrl;
+ u32 rx_chnl_ctrl;
};
+/* Wrappers for temac_ior()/temac_iow() function pointers above */
+#define temac_ior(lp, o) ((lp)->temac_ior(lp, o))
+#define temac_iow(lp, o, v) ((lp)->temac_iow(lp, o, v))
+
/* xilinx_temac.c */
-u32 temac_ior(struct temac_local *lp, int offset);
-void temac_iow(struct temac_local *lp, int offset, u32 value);
int temac_indirect_busywait(struct temac_local *lp);
u32 temac_indirect_in32(struct temac_local *lp, int reg);
void temac_indirect_out32(struct temac_local *lp, int reg, u32 value);
/* xilinx_temac_mdio.c */
-int temac_mdio_setup(struct temac_local *lp, struct device_node *np);
+int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev);
void temac_mdio_teardown(struct temac_local *lp);
#endif /* XILINX_LL_TEMAC_H */
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 44efffbe7970..997475c209c0 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -33,6 +33,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
+#include <linux/if_ether.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
@@ -51,6 +52,7 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
+#include <linux/platform_data/xilinx-ll-temac.h>
#include "ll_temac.h"
@@ -61,14 +63,24 @@
* Low level register access functions
*/
-u32 temac_ior(struct temac_local *lp, int offset)
+static u32 _temac_ior_be(struct temac_local *lp, int offset)
{
- return in_be32(lp->regs + offset);
+ return ioread32be(lp->regs + offset);
}
-void temac_iow(struct temac_local *lp, int offset, u32 value)
+static void _temac_iow_be(struct temac_local *lp, int offset, u32 value)
{
- out_be32(lp->regs + offset, value);
+ return iowrite32be(value, lp->regs + offset);
+}
+
+static u32 _temac_ior_le(struct temac_local *lp, int offset)
+{
+ return ioread32(lp->regs + offset);
+}
+
+static void _temac_iow_le(struct temac_local *lp, int offset, u32 value)
+{
+ return iowrite32(value, lp->regs + offset);
}
int temac_indirect_busywait(struct temac_local *lp)
@@ -80,7 +92,7 @@ int temac_indirect_busywait(struct temac_local *lp)
WARN_ON(1);
return -ETIMEDOUT;
}
- msleep(1);
+ usleep_range(500, 1000);
}
return 0;
}
@@ -119,23 +131,35 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
}
/**
- * temac_dma_in32 - Memory mapped DMA read, this function expects a
- * register input that is based on DCR word addresses which
- * are then converted to memory mapped byte addresses
+ * temac_dma_in32_* - Memory mapped DMA read, these function expects a
+ * register input that is based on DCR word addresses which are then
+ * converted to memory mapped byte addresses. To be assigned to
+ * lp->dma_in32.
*/
-static u32 temac_dma_in32(struct temac_local *lp, int reg)
+static u32 temac_dma_in32_be(struct temac_local *lp, int reg)
{
- return in_be32(lp->sdma_regs + (reg << 2));
+ return ioread32be(lp->sdma_regs + (reg << 2));
+}
+
+static u32 temac_dma_in32_le(struct temac_local *lp, int reg)
+{
+ return ioread32(lp->sdma_regs + (reg << 2));
}
/**
- * temac_dma_out32 - Memory mapped DMA read, this function expects a
- * register input that is based on DCR word addresses which
- * are then converted to memory mapped byte addresses
+ * temac_dma_out32_* - Memory mapped DMA read, these function expects
+ * a register input that is based on DCR word addresses which are then
+ * converted to memory mapped byte addresses. To be assigned to
+ * lp->dma_out32.
*/
-static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
+static void temac_dma_out32_be(struct temac_local *lp, int reg, u32 value)
+{
+ iowrite32be(value, lp->sdma_regs + (reg << 2));
+}
+
+static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value)
{
- out_be32(lp->sdma_regs + (reg << 2), value);
+ iowrite32(value, lp->sdma_regs + (reg << 2));
}
/* DMA register access functions can be DCR based or memory mapped.
@@ -187,7 +211,7 @@ static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
/*
* temac_dcr_setup - This is a stub for when DCR is not supported,
- * such as with MicroBlaze
+ * such as with MicroBlaze and x86
*/
static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
struct device_node *np)
@@ -225,7 +249,6 @@ static void temac_dma_bd_release(struct net_device *ndev)
dma_free_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM,
lp->tx_bd_v, lp->tx_bd_p);
- kfree(lp->rx_skb);
}
/**
@@ -235,9 +258,11 @@ static int temac_dma_bd_init(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct sk_buff *skb;
+ dma_addr_t skb_dma_addr;
int i;
- lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL);
+ lp->rx_skb = devm_kcalloc(&ndev->dev, RX_BD_NUM, sizeof(*lp->rx_skb),
+ GFP_KERNEL);
if (!lp->rx_skb)
goto out;
@@ -256,13 +281,13 @@ static int temac_dma_bd_init(struct net_device *ndev)
goto out;
for (i = 0; i < TX_BD_NUM; i++) {
- lp->tx_bd_v[i].next = lp->tx_bd_p +
- sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
+ lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p
+ + sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM));
}
for (i = 0; i < RX_BD_NUM; i++) {
- lp->rx_bd_v[i].next = lp->rx_bd_p +
- sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
+ lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
+ + sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM));
skb = netdev_alloc_skb_ip_align(ndev,
XTE_MAX_JUMBO_FRAME_SIZE);
@@ -271,31 +296,23 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->rx_skb[i] = skb;
/* returns physical address of skb->data */
- lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
- skb->data,
- XTE_MAX_JUMBO_FRAME_SIZE,
- DMA_FROM_DEVICE);
- lp->rx_bd_v[i].len = XTE_MAX_JUMBO_FRAME_SIZE;
- lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
+ skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
+ XTE_MAX_JUMBO_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+ lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr);
+ lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
+ lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
}
- lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 |
- CHNL_CTRL_IRQ_EN |
- CHNL_CTRL_IRQ_DLY_EN |
- CHNL_CTRL_IRQ_COAL_EN);
- /* 0x10220483 */
- /* 0x00100483 */
- lp->dma_out(lp, RX_CHNL_CTRL, 0xff070000 |
- CHNL_CTRL_IRQ_EN |
- CHNL_CTRL_IRQ_DLY_EN |
- CHNL_CTRL_IRQ_COAL_EN |
- CHNL_CTRL_IRQ_IOE);
- /* 0xff010283 */
-
- lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
- lp->dma_out(lp, RX_TAILDESC_PTR,
- lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
- lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
+ /* Configure DMA channel (irq setup) */
+ lp->dma_out(lp, TX_CHNL_CTRL, lp->tx_chnl_ctrl |
+ 0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used!
+ CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
+ CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
+ lp->dma_out(lp, RX_CHNL_CTRL, lp->rx_chnl_ctrl |
+ CHNL_CTRL_IRQ_IOE |
+ CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
+ CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
/* Init descriptor indexes */
lp->tx_bd_ci = 0;
@@ -303,6 +320,15 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->tx_bd_tail = 0;
lp->rx_bd_ci = 0;
+ /* Enable RX DMA transfers */
+ wmb();
+ lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
+ lp->dma_out(lp, RX_TAILDESC_PTR,
+ lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
+
+ /* Prepare for TX DMA transfer */
+ lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
+
return 0;
out:
@@ -319,7 +345,7 @@ static void temac_do_set_mac_address(struct net_device *ndev)
struct temac_local *lp = netdev_priv(ndev);
/* set up unicast MAC address filter set its mac address */
- mutex_lock(&lp->indirect_mutex);
+ mutex_lock(lp->indirect_mutex);
temac_indirect_out32(lp, XTE_UAW0_OFFSET,
(ndev->dev_addr[0]) |
(ndev->dev_addr[1] << 8) |
@@ -330,7 +356,7 @@ static void temac_do_set_mac_address(struct net_device *ndev)
temac_indirect_out32(lp, XTE_UAW1_OFFSET,
(ndev->dev_addr[4] & 0x000000ff) |
(ndev->dev_addr[5] << 8));
- mutex_unlock(&lp->indirect_mutex);
+ mutex_unlock(lp->indirect_mutex);
}
static int temac_init_mac_address(struct net_device *ndev, const void *address)
@@ -359,7 +385,7 @@ static void temac_set_multicast_list(struct net_device *ndev)
u32 multi_addr_msw, multi_addr_lsw, val;
int i;
- mutex_lock(&lp->indirect_mutex);
+ mutex_lock(lp->indirect_mutex);
if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) {
/*
@@ -398,7 +424,7 @@ static void temac_set_multicast_list(struct net_device *ndev)
temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0);
dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
}
- mutex_unlock(&lp->indirect_mutex);
+ mutex_unlock(lp->indirect_mutex);
}
static struct temac_option {
@@ -490,7 +516,7 @@ static u32 temac_setoptions(struct net_device *ndev, u32 options)
struct temac_option *tp = &temac_options[0];
int reg;
- mutex_lock(&lp->indirect_mutex);
+ mutex_lock(lp->indirect_mutex);
while (tp->opt) {
reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or;
if (options & tp->opt)
@@ -499,7 +525,7 @@ static u32 temac_setoptions(struct net_device *ndev, u32 options)
tp++;
}
lp->options |= options;
- mutex_unlock(&lp->indirect_mutex);
+ mutex_unlock(lp->indirect_mutex);
return 0;
}
@@ -518,7 +544,7 @@ static void temac_device_reset(struct net_device *ndev)
dev_dbg(&ndev->dev, "%s()\n", __func__);
- mutex_lock(&lp->indirect_mutex);
+ mutex_lock(lp->indirect_mutex);
/* Reset the receiver and wait for it to finish reset */
temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
timeout = 1000;
@@ -570,7 +596,7 @@ static void temac_device_reset(struct net_device *ndev)
temac_indirect_out32(lp, XTE_TXC_OFFSET, 0);
temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
- mutex_unlock(&lp->indirect_mutex);
+ mutex_unlock(lp->indirect_mutex);
/* Sync default options with HW
* but leave receiver and transmitter disabled. */
@@ -598,7 +624,7 @@ static void temac_adjust_link(struct net_device *ndev)
/* hash together the state values to decide if something has changed */
link_state = phy->speed | (phy->duplex << 1) | phy->link;
- mutex_lock(&lp->indirect_mutex);
+ mutex_lock(lp->indirect_mutex);
if (lp->last_link != link_state) {
mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET);
mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
@@ -614,23 +640,52 @@ static void temac_adjust_link(struct net_device *ndev)
lp->last_link = link_state;
phy_print_status(phy);
}
- mutex_unlock(&lp->indirect_mutex);
+ mutex_unlock(lp->indirect_mutex);
+}
+
+#ifdef CONFIG_64BIT
+
+static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
+{
+ bd->app3 = (u32)(((u64)p) >> 32);
+ bd->app4 = (u32)((u64)p & 0xFFFFFFFF);
+}
+
+static void *ptr_from_txbd(struct cdmac_bd *bd)
+{
+ return (void *)(((u64)(bd->app3) << 32) | bd->app4);
}
+#else
+
+static void ptr_to_txbd(void *p, struct cdmac_bd *bd)
+{
+ bd->app4 = (u32)p;
+}
+
+static void *ptr_from_txbd(struct cdmac_bd *bd)
+{
+ return (void *)(bd->app4);
+}
+
+#endif
+
static void temac_start_xmit_done(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct cdmac_bd *cur_p;
unsigned int stat = 0;
+ struct sk_buff *skb;
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
- stat = cur_p->app0;
+ stat = be32_to_cpu(cur_p->app0);
while (stat & STS_CTRL_APP0_CMPLT) {
- dma_unmap_single(ndev->dev.parent, cur_p->phys, cur_p->len,
- DMA_TO_DEVICE);
- if (cur_p->app4)
- dev_consume_skb_irq((struct sk_buff *)cur_p->app4);
+ dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
+ be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
+ skb = (struct sk_buff *)ptr_from_txbd(cur_p);
+ if (skb)
+ dev_consume_skb_irq(skb);
cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
@@ -638,14 +693,14 @@ static void temac_start_xmit_done(struct net_device *ndev)
cur_p->app4 = 0;
ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += cur_p->len;
+ ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
lp->tx_bd_ci++;
if (lp->tx_bd_ci >= TX_BD_NUM)
lp->tx_bd_ci = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
- stat = cur_p->app0;
+ stat = be32_to_cpu(cur_p->app0);
}
netif_wake_queue(ndev);
@@ -679,7 +734,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
struct cdmac_bd *cur_p;
- dma_addr_t start_p, tail_p;
+ dma_addr_t start_p, tail_p, skb_dma_addr;
int ii;
unsigned long num_frag;
skb_frag_t *frag;
@@ -689,7 +744,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
- if (temac_check_tx_bd_space(lp, num_frag)) {
+ if (temac_check_tx_bd_space(lp, num_frag + 1)) {
if (!netif_queue_stopped(ndev))
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
@@ -700,16 +755,18 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
unsigned int csum_start_off = skb_checksum_start_offset(skb);
unsigned int csum_index_off = csum_start_off + skb->csum_offset;
- cur_p->app0 |= 1; /* TX Checksum Enabled */
- cur_p->app1 = (csum_start_off << 16) | csum_index_off;
+ cur_p->app0 |= cpu_to_be32(0x000001); /* TX Checksum Enabled */
+ cur_p->app1 = cpu_to_be32((csum_start_off << 16)
+ | csum_index_off);
cur_p->app2 = 0; /* initial checksum seed */
}
- cur_p->app0 |= STS_CTRL_APP0_SOP;
- cur_p->len = skb_headlen(skb);
- cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- cur_p->app4 = (unsigned long)skb;
+ cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_SOP);
+ skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ cur_p->len = cpu_to_be32(skb_headlen(skb));
+ cur_p->phys = cpu_to_be32(skb_dma_addr);
+ ptr_to_txbd((void *)skb, cur_p);
for (ii = 0; ii < num_frag; ii++) {
lp->tx_bd_tail++;
@@ -717,14 +774,16 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
- cur_p->phys = dma_map_single(ndev->dev.parent,
- skb_frag_address(frag),
- skb_frag_size(frag), DMA_TO_DEVICE);
- cur_p->len = skb_frag_size(frag);
+ skb_dma_addr = dma_map_single(ndev->dev.parent,
+ skb_frag_address(frag),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ cur_p->phys = cpu_to_be32(skb_dma_addr);
+ cur_p->len = cpu_to_be32(skb_frag_size(frag));
cur_p->app0 = 0;
frag++;
}
- cur_p->app0 |= STS_CTRL_APP0_EOP;
+ cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
lp->tx_bd_tail++;
@@ -734,6 +793,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_tx_timestamp(skb);
/* Kick off the transfer */
+ wmb();
lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
return NETDEV_TX_OK;
@@ -746,7 +806,7 @@ static void ll_temac_recv(struct net_device *ndev)
struct sk_buff *skb, *new_skb;
unsigned int bdstat;
struct cdmac_bd *cur_p;
- dma_addr_t tail_p;
+ dma_addr_t tail_p, skb_dma_addr;
int length;
unsigned long flags;
@@ -755,14 +815,14 @@ static void ll_temac_recv(struct net_device *ndev)
tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
- bdstat = cur_p->app0;
+ bdstat = be32_to_cpu(cur_p->app0);
while ((bdstat & STS_CTRL_APP0_CMPLT)) {
skb = lp->rx_skb[lp->rx_bd_ci];
- length = cur_p->app4 & 0x3FFF;
+ length = be32_to_cpu(cur_p->app4) & 0x3FFF;
- dma_unmap_single(ndev->dev.parent, cur_p->phys, length,
- DMA_FROM_DEVICE);
+ dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
+ XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, ndev);
@@ -773,7 +833,12 @@ static void ll_temac_recv(struct net_device *ndev)
(skb->protocol == htons(ETH_P_IP)) &&
(skb->len > 64)) {
- skb->csum = cur_p->app3 & 0xFFFF;
+ /* Convert from device endianness (be32) to cpu
+ * endiannes, and if necessary swap the bytes
+ * (back) for proper IP checksum byte order
+ * (be16).
+ */
+ skb->csum = htons(be32_to_cpu(cur_p->app3) & 0xFFFF);
skb->ip_summed = CHECKSUM_COMPLETE;
}
@@ -790,11 +855,12 @@ static void ll_temac_recv(struct net_device *ndev)
return;
}
- cur_p->app0 = STS_CTRL_APP0_IRQONEND;
- cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
- XTE_MAX_JUMBO_FRAME_SIZE,
- DMA_FROM_DEVICE);
- cur_p->len = XTE_MAX_JUMBO_FRAME_SIZE;
+ cur_p->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
+ skb_dma_addr = dma_map_single(ndev->dev.parent, new_skb->data,
+ XTE_MAX_JUMBO_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+ cur_p->phys = cpu_to_be32(skb_dma_addr);
+ cur_p->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
lp->rx_skb[lp->rx_bd_ci] = new_skb;
lp->rx_bd_ci++;
@@ -802,7 +868,7 @@ static void ll_temac_recv(struct net_device *ndev)
lp->rx_bd_ci = 0;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
- bdstat = cur_p->app0;
+ bdstat = be32_to_cpu(cur_p->app0);
}
lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
@@ -820,8 +886,10 @@ static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
if (status & (IRQ_COAL | IRQ_DLY))
temac_start_xmit_done(lp->ndev);
- if (status & 0x080)
- dev_err(&ndev->dev, "DMA error 0x%x\n", status);
+ if (status & (IRQ_ERR | IRQ_DMAERR))
+ dev_err_ratelimited(&ndev->dev,
+ "TX error 0x%x TX_CHNL_STS=0x%08x\n",
+ status, lp->dma_in(lp, TX_CHNL_STS));
return IRQ_HANDLED;
}
@@ -838,6 +906,10 @@ static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
if (status & (IRQ_COAL | IRQ_DLY))
ll_temac_recv(lp->ndev);
+ if (status & (IRQ_ERR | IRQ_DMAERR))
+ dev_err_ratelimited(&ndev->dev,
+ "RX error 0x%x RX_CHNL_STS=0x%08x\n",
+ status, lp->dma_in(lp, RX_CHNL_STS));
return IRQ_HANDLED;
}
@@ -857,7 +929,14 @@ static int temac_open(struct net_device *ndev)
dev_err(lp->dev, "of_phy_connect() failed\n");
return -ENODEV;
}
-
+ phy_start(phydev);
+ } else if (strlen(lp->phy_name) > 0) {
+ phydev = phy_connect(lp->ndev, lp->phy_name, temac_adjust_link,
+ lp->phy_interface);
+ if (IS_ERR(phydev)) {
+ dev_err(lp->dev, "phy_connect() failed\n");
+ return PTR_ERR(phydev);
+ }
phy_start(phydev);
}
@@ -977,22 +1056,25 @@ static const struct ethtool_ops temac_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
-static int temac_of_probe(struct platform_device *op)
+static int temac_probe(struct platform_device *pdev)
{
- struct device_node *np;
+ struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device_node *temac_np = dev_of_node(&pdev->dev), *dma_np;
struct temac_local *lp;
struct net_device *ndev;
+ struct resource *res;
const void *addr;
__be32 *p;
+ bool little_endian;
int rc = 0;
/* Init network device structure */
- ndev = alloc_etherdev(sizeof(*lp));
+ ndev = devm_alloc_etherdev(&pdev->dev, sizeof(*lp));
if (!ndev)
return -ENOMEM;
- platform_set_drvdata(op, ndev);
- SET_NETDEV_DEV(ndev, &op->dev);
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
ndev->features = NETIF_F_SG;
ndev->netdev_ops = &temac_netdev_ops;
@@ -1014,89 +1096,196 @@ static int temac_of_probe(struct platform_device *op)
/* setup temac private info structure */
lp = netdev_priv(ndev);
lp->ndev = ndev;
- lp->dev = &op->dev;
+ lp->dev = &pdev->dev;
lp->options = XTE_OPTION_DEFAULTS;
spin_lock_init(&lp->rx_lock);
- mutex_init(&lp->indirect_mutex);
+
+ /* Setup mutex for synchronization of indirect register access */
+ if (pdata) {
+ if (!pdata->indirect_mutex) {
+ dev_err(&pdev->dev,
+ "indirect_mutex missing in platform_data\n");
+ return -EINVAL;
+ }
+ lp->indirect_mutex = pdata->indirect_mutex;
+ } else {
+ lp->indirect_mutex = devm_kmalloc(&pdev->dev,
+ sizeof(*lp->indirect_mutex),
+ GFP_KERNEL);
+ mutex_init(lp->indirect_mutex);
+ }
/* map device registers */
- lp->regs = of_iomap(op->dev.of_node, 0);
- if (!lp->regs) {
- dev_err(&op->dev, "could not map temac regs.\n");
- rc = -ENOMEM;
- goto nodev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lp->regs = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (IS_ERR(lp->regs)) {
+ dev_err(&pdev->dev, "could not map TEMAC registers\n");
+ return PTR_ERR(lp->regs);
+ }
+
+ /* Select register access functions with the specified
+ * endianness mode. Default for OF devices is big-endian.
+ */
+ little_endian = false;
+ if (temac_np) {
+ if (of_get_property(temac_np, "little-endian", NULL))
+ little_endian = true;
+ } else if (pdata) {
+ little_endian = pdata->reg_little_endian;
+ }
+ if (little_endian) {
+ lp->temac_ior = _temac_ior_le;
+ lp->temac_iow = _temac_iow_le;
+ } else {
+ lp->temac_ior = _temac_ior_be;
+ lp->temac_iow = _temac_iow_be;
}
/* Setup checksum offload, but default to off if not specified */
lp->temac_features = 0;
- p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,txcsum", NULL);
- if (p && be32_to_cpu(*p)) {
- lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
+ if (temac_np) {
+ p = (__be32 *)of_get_property(temac_np, "xlnx,txcsum", NULL);
+ if (p && be32_to_cpu(*p))
+ lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
+ p = (__be32 *)of_get_property(temac_np, "xlnx,rxcsum", NULL);
+ if (p && be32_to_cpu(*p))
+ lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
+ } else if (pdata) {
+ if (pdata->txcsum)
+ lp->temac_features |= TEMAC_FEATURE_TX_CSUM;
+ if (pdata->rxcsum)
+ lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
+ }
+ if (lp->temac_features & TEMAC_FEATURE_TX_CSUM)
/* Can checksum TCP/UDP over IPv4. */
ndev->features |= NETIF_F_IP_CSUM;
- }
- p = (__be32 *)of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL);
- if (p && be32_to_cpu(*p))
- lp->temac_features |= TEMAC_FEATURE_RX_CSUM;
-
- /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
- np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
- if (!np) {
- dev_err(&op->dev, "could not find DMA node\n");
- rc = -ENODEV;
- goto err_iounmap;
- }
- /* Setup the DMA register accesses, could be DCR or memory mapped */
- if (temac_dcr_setup(lp, op, np)) {
+ /* Setup LocalLink DMA */
+ if (temac_np) {
+ /* Find the DMA node, map the DMA registers, and
+ * decode the DMA IRQs.
+ */
+ dma_np = of_parse_phandle(temac_np, "llink-connected", 0);
+ if (!dma_np) {
+ dev_err(&pdev->dev, "could not find DMA node\n");
+ return -ENODEV;
+ }
- /* no DCR in the device tree, try non-DCR */
- lp->sdma_regs = of_iomap(np, 0);
- if (lp->sdma_regs) {
- lp->dma_in = temac_dma_in32;
- lp->dma_out = temac_dma_out32;
- dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs);
- } else {
- dev_err(&op->dev, "unable to map DMA registers\n");
- of_node_put(np);
- goto err_iounmap;
+ /* Setup the DMA register accesses, could be DCR or
+ * memory mapped.
+ */
+ if (temac_dcr_setup(lp, pdev, dma_np)) {
+ /* no DCR in the device tree, try non-DCR */
+ lp->sdma_regs = devm_of_iomap(&pdev->dev, dma_np, 0,
+ NULL);
+ if (IS_ERR(lp->sdma_regs)) {
+ dev_err(&pdev->dev,
+ "unable to map DMA registers\n");
+ of_node_put(dma_np);
+ return PTR_ERR(lp->sdma_regs);
+ }
+ if (of_get_property(dma_np, "little-endian", NULL)) {
+ lp->dma_in = temac_dma_in32_le;
+ lp->dma_out = temac_dma_out32_le;
+ } else {
+ lp->dma_in = temac_dma_in32_be;
+ lp->dma_out = temac_dma_out32_be;
+ }
+ dev_dbg(&pdev->dev, "MEM base: %p\n", lp->sdma_regs);
}
- }
- lp->rx_irq = irq_of_parse_and_map(np, 0);
- lp->tx_irq = irq_of_parse_and_map(np, 1);
+ /* Get DMA RX and TX interrupts */
+ lp->rx_irq = irq_of_parse_and_map(dma_np, 0);
+ lp->tx_irq = irq_of_parse_and_map(dma_np, 1);
- of_node_put(np); /* Finished with the DMA node; drop the reference */
+ /* Use defaults for IRQ delay/coalescing setup. These
+ * are configuration values, so does not belong in
+ * device-tree.
+ */
+ lp->tx_chnl_ctrl = 0x10220000;
+ lp->rx_chnl_ctrl = 0xff070000;
+
+ /* Finished with the DMA node; drop the reference */
+ of_node_put(dma_np);
+ } else if (pdata) {
+ /* 2nd memory resource specifies DMA registers */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ lp->sdma_regs = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (IS_ERR(lp->sdma_regs)) {
+ dev_err(&pdev->dev,
+ "could not map DMA registers\n");
+ return PTR_ERR(lp->sdma_regs);
+ }
+ if (pdata->dma_little_endian) {
+ lp->dma_in = temac_dma_in32_le;
+ lp->dma_out = temac_dma_out32_le;
+ } else {
+ lp->dma_in = temac_dma_in32_be;
+ lp->dma_out = temac_dma_out32_be;
+ }
- if (!lp->rx_irq || !lp->tx_irq) {
- dev_err(&op->dev, "could not determine irqs\n");
- rc = -ENOMEM;
- goto err_iounmap_2;
+ /* Get DMA RX and TX interrupts */
+ lp->rx_irq = platform_get_irq(pdev, 0);
+ lp->tx_irq = platform_get_irq(pdev, 1);
+
+ /* IRQ delay/coalescing setup */
+ if (pdata->tx_irq_timeout || pdata->tx_irq_count)
+ lp->tx_chnl_ctrl = (pdata->tx_irq_timeout << 24) |
+ (pdata->tx_irq_count << 16);
+ else
+ lp->tx_chnl_ctrl = 0x10220000;
+ if (pdata->rx_irq_timeout || pdata->rx_irq_count)
+ lp->rx_chnl_ctrl = (pdata->rx_irq_timeout << 24) |
+ (pdata->rx_irq_count << 16);
+ else
+ lp->rx_chnl_ctrl = 0xff070000;
}
+ /* Error handle returned DMA RX and TX interrupts */
+ if (lp->rx_irq < 0) {
+ if (lp->rx_irq != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "could not get DMA RX irq\n");
+ return lp->rx_irq;
+ }
+ if (lp->tx_irq < 0) {
+ if (lp->tx_irq != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "could not get DMA TX irq\n");
+ return lp->tx_irq;
+ }
- /* Retrieve the MAC address */
- addr = of_get_mac_address(op->dev.of_node);
- if (!addr) {
- dev_err(&op->dev, "could not find MAC address\n");
- rc = -ENODEV;
- goto err_iounmap_2;
+ if (temac_np) {
+ /* Retrieve the MAC address */
+ addr = of_get_mac_address(temac_np);
+ if (IS_ERR(addr)) {
+ dev_err(&pdev->dev, "could not find MAC address\n");
+ return -ENODEV;
+ }
+ temac_init_mac_address(ndev, addr);
+ } else if (pdata) {
+ temac_init_mac_address(ndev, pdata->mac_addr);
}
- temac_init_mac_address(ndev, addr);
- rc = temac_mdio_setup(lp, op->dev.of_node);
+ rc = temac_mdio_setup(lp, pdev);
if (rc)
- dev_warn(&op->dev, "error registering MDIO bus\n");
-
- lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0);
- if (lp->phy_node)
- dev_dbg(lp->dev, "using PHY node %pOF (%p)\n", np, np);
+ dev_warn(&pdev->dev, "error registering MDIO bus\n");
+
+ if (temac_np) {
+ lp->phy_node = of_parse_phandle(temac_np, "phy-handle", 0);
+ if (lp->phy_node)
+ dev_dbg(lp->dev, "using PHY node %pOF\n", temac_np);
+ } else if (pdata) {
+ snprintf(lp->phy_name, sizeof(lp->phy_name),
+ PHY_ID_FMT, lp->mii_bus->id, pdata->phy_addr);
+ lp->phy_interface = pdata->phy_interface;
+ }
/* Add the device attributes */
rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
if (rc) {
dev_err(lp->dev, "Error creating sysfs files\n");
- goto err_iounmap_2;
+ goto err_sysfs_create;
}
rc = register_netdev(lp->ndev);
@@ -1107,33 +1296,25 @@ static int temac_of_probe(struct platform_device *op)
return 0;
- err_register_ndev:
+err_register_ndev:
sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
- err_iounmap_2:
- if (lp->sdma_regs)
- iounmap(lp->sdma_regs);
- err_iounmap:
- iounmap(lp->regs);
- nodev:
- free_netdev(ndev);
- ndev = NULL;
+err_sysfs_create:
+ if (lp->phy_node)
+ of_node_put(lp->phy_node);
+ temac_mdio_teardown(lp);
return rc;
}
-static int temac_of_remove(struct platform_device *op)
+static int temac_remove(struct platform_device *pdev)
{
- struct net_device *ndev = platform_get_drvdata(op);
+ struct net_device *ndev = platform_get_drvdata(pdev);
struct temac_local *lp = netdev_priv(ndev);
- temac_mdio_teardown(lp);
unregister_netdev(ndev);
sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
- of_node_put(lp->phy_node);
- lp->phy_node = NULL;
- iounmap(lp->regs);
- if (lp->sdma_regs)
- iounmap(lp->sdma_regs);
- free_netdev(ndev);
+ if (lp->phy_node)
+ of_node_put(lp->phy_node);
+ temac_mdio_teardown(lp);
return 0;
}
@@ -1146,16 +1327,16 @@ static const struct of_device_id temac_of_match[] = {
};
MODULE_DEVICE_TABLE(of, temac_of_match);
-static struct platform_driver temac_of_driver = {
- .probe = temac_of_probe,
- .remove = temac_of_remove,
+static struct platform_driver temac_driver = {
+ .probe = temac_probe,
+ .remove = temac_remove,
.driver = {
.name = "xilinx_temac",
.of_match_table = temac_of_match,
},
};
-module_platform_driver(temac_of_driver);
+module_platform_driver(temac_driver);
MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
MODULE_AUTHOR("Yoshio Kashiwagi");
diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
index f5e83ac6f7e2..a4667326f745 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_mdio.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
@@ -14,6 +14,7 @@
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/of_mdio.h>
+#include <linux/platform_data/xilinx-ll-temac.h>
#include "ll_temac.h"
@@ -28,10 +29,10 @@ static int temac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
/* Write the PHY address to the MIIM Access Initiator register.
* When the transfer completes, the PHY register value will appear
* in the LSW0 register */
- mutex_lock(&lp->indirect_mutex);
+ mutex_lock(lp->indirect_mutex);
temac_iow(lp, XTE_LSW0_OFFSET, (phy_id << 5) | reg);
rc = temac_indirect_in32(lp, XTE_MIIMAI_OFFSET);
- mutex_unlock(&lp->indirect_mutex);
+ mutex_unlock(lp->indirect_mutex);
dev_dbg(lp->dev, "temac_mdio_read(phy_id=%i, reg=%x) == %x\n",
phy_id, reg, rc);
@@ -49,25 +50,34 @@ static int temac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
/* First write the desired value into the write data register
* and then write the address into the access initiator register
*/
- mutex_lock(&lp->indirect_mutex);
+ mutex_lock(lp->indirect_mutex);
temac_indirect_out32(lp, XTE_MGTDR_OFFSET, val);
temac_indirect_out32(lp, XTE_MIIMAI_OFFSET, (phy_id << 5) | reg);
- mutex_unlock(&lp->indirect_mutex);
+ mutex_unlock(lp->indirect_mutex);
return 0;
}
-int temac_mdio_setup(struct temac_local *lp, struct device_node *np)
+int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev)
{
+ struct ll_temac_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device_node *np = dev_of_node(&pdev->dev);
struct mii_bus *bus;
u32 bus_hz;
int clk_div;
int rc;
struct resource res;
+ /* Get MDIO bus frequency (if specified) */
+ bus_hz = 0;
+ if (np)
+ of_property_read_u32(np, "clock-frequency", &bus_hz);
+ else if (pdata)
+ bus_hz = pdata->mdio_clk_freq;
+
/* Calculate a reasonable divisor for the clock rate */
clk_div = 0x3f; /* worst-case default setting */
- if (of_property_read_u32(np, "clock-frequency", &bus_hz) == 0) {
+ if (bus_hz != 0) {
clk_div = bus_hz / (2500 * 1000 * 2) - 1;
if (clk_div < 1)
clk_div = 1;
@@ -77,17 +87,23 @@ int temac_mdio_setup(struct temac_local *lp, struct device_node *np)
/* Enable the MDIO bus by asserting the enable bit and writing
* in the clock config */
- mutex_lock(&lp->indirect_mutex);
+ mutex_lock(lp->indirect_mutex);
temac_indirect_out32(lp, XTE_MC_OFFSET, 1 << 6 | clk_div);
- mutex_unlock(&lp->indirect_mutex);
+ mutex_unlock(lp->indirect_mutex);
- bus = mdiobus_alloc();
+ bus = devm_mdiobus_alloc(&pdev->dev);
if (!bus)
return -ENOMEM;
- of_address_to_resource(np, 0, &res);
- snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
- (unsigned long long)res.start);
+ if (np) {
+ of_address_to_resource(np, 0, &res);
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
+ (unsigned long long)res.start);
+ } else if (pdata) {
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
+ pdata->mdio_bus_id);
+ }
+
bus->priv = lp;
bus->name = "Xilinx TEMAC MDIO";
bus->read = temac_mdio_read;
@@ -98,23 +114,16 @@ int temac_mdio_setup(struct temac_local *lp, struct device_node *np)
rc = of_mdiobus_register(bus, np);
if (rc)
- goto err_register;
+ return rc;
- mutex_lock(&lp->indirect_mutex);
+ mutex_lock(lp->indirect_mutex);
dev_dbg(lp->dev, "MDIO bus registered; MC:%x\n",
temac_indirect_in32(lp, XTE_MC_OFFSET));
- mutex_unlock(&lp->indirect_mutex);
+ mutex_unlock(lp->indirect_mutex);
return 0;
-
- err_register:
- mdiobus_free(bus);
- return rc;
}
void temac_mdio_teardown(struct temac_local *lp)
{
mdiobus_unregister(lp->mii_bus);
- mdiobus_free(lp->mii_bus);
- lp->mii_bus = NULL;
}
-
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 4041c75997ba..108fbc7f125a 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1596,7 +1596,7 @@ static int axienet_probe(struct platform_device *pdev)
/* Retrieve the MAC address */
mac_addr = of_get_mac_address(pdev->dev.of_node);
- if (!mac_addr) {
+ if (IS_ERR(mac_addr)) {
dev_err(&pdev->dev, "could not find MAC address\n");
goto free_netdev;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index b03a417d0073..691170753563 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -17,6 +17,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
+#include <linux/ethtool.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/of_address.h>
@@ -1078,6 +1079,27 @@ static bool get_bool(struct platform_device *ofdev, const char *s)
return (bool)*p;
}
+/**
+ * xemaclite_ethtools_get_drvinfo - Get various Axi Emac Lite driver info
+ * @ndev: Pointer to net_device structure
+ * @ed: Pointer to ethtool_drvinfo structure
+ *
+ * This implements ethtool command for getting the driver information.
+ * Issue "ethtool -i ethX" under linux prompt to execute this function.
+ */
+static void xemaclite_ethtools_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *ed)
+{
+ strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
+}
+
+static const struct ethtool_ops xemaclite_ethtool_ops = {
+ .get_drvinfo = xemaclite_ethtools_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
static const struct net_device_ops xemaclite_netdev_ops;
/**
@@ -1143,7 +1165,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
mac_address = of_get_mac_address(ofdev->dev.of_node);
- if (mac_address) {
+ if (!IS_ERR(mac_address)) {
/* Set the MAC address. */
memcpy(ndev->dev_addr, mac_address, ETH_ALEN);
} else {
@@ -1164,6 +1186,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
ndev->netdev_ops = &xemaclite_netdev_ops;
+ ndev->ethtool_ops = &xemaclite_ethtool_ops;
ndev->flags &= ~IFF_MULTICAST;
ndev->watchdog_timeo = TX_TIMEOUT;
@@ -1229,12 +1252,29 @@ xemaclite_poll_controller(struct net_device *ndev)
}
#endif
+/* Ioctl MII Interface */
+static int xemaclite_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ if (!dev->phydev || !netif_running(dev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops xemaclite_netdev_ops = {
.ndo_open = xemaclite_open,
.ndo_stop = xemaclite_close,
.ndo_start_xmit = xemaclite_send,
.ndo_set_mac_address = xemaclite_set_mac_address,
.ndo_tx_timeout = xemaclite_tx_timeout,
+ .ndo_do_ioctl = xemaclite_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = xemaclite_poll_controller,
#endif
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 5583d993480d..98d1a45c0606 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/etherdevice.h>
#include <linux/hash.h>
+#include <net/ipv6_stubs.h>
#include <net/dst_metadata.h>
#include <net/gro_cells.h>
#include <net/rtnetlink.h>
@@ -22,8 +23,6 @@
#define GENEVE_NETDEV_VER "0.6"
-#define GENEVE_UDP_PORT 6081
-
#define GENEVE_N_VID (1u << 24)
#define GENEVE_VID_MASK (GENEVE_N_VID - 1)
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 7a145172d503..eaf4311b4004 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1270,21 +1270,21 @@ static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
static const struct genl_ops gtp_genl_ops[] = {
{
.cmd = GTP_CMD_NEWPDP,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = gtp_genl_new_pdp,
- .policy = gtp_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = GTP_CMD_DELPDP,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = gtp_genl_del_pdp,
- .policy = gtp_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = GTP_CMD_GETPDP,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = gtp_genl_get_pdp,
.dumpit = gtp_genl_dump_pdp,
- .policy = gtp_genl_policy,
.flags = GENL_ADMIN_PERM,
},
};
@@ -1294,6 +1294,7 @@ static struct genl_family gtp_genl_family __ro_after_init = {
.version = 0,
.hdrsize = 0,
.maxattr = GTPA_MAX,
+ .policy = gtp_genl_policy,
.netnsok = true,
.module = THIS_MODULE,
.ops = gtp_genl_ops,
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 029206e4da3b..0f7025f3a384 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1298,11 +1298,11 @@ static void rr_dump(struct net_device *dev)
if (rrpriv->tx_skbuff[cons]){
len = min_t(int, 0x80, rrpriv->tx_skbuff[cons]->len);
printk("skbuff for cons %i is valid - dumping data (0x%x bytes - skbuff len 0x%x)\n", cons, len, rrpriv->tx_skbuff[cons]->len);
- printk("mode 0x%x, size 0x%x,\n phys %08Lx, skbuff-addr %08lx, truesize 0x%x\n",
+ printk("mode 0x%x, size 0x%x,\n phys %08Lx, skbuff-addr %p, truesize 0x%x\n",
rrpriv->tx_ring[cons].mode,
rrpriv->tx_ring[cons].size,
(unsigned long long) rrpriv->tx_ring[cons].addr.addrlo,
- (unsigned long)rrpriv->tx_skbuff[cons]->data,
+ rrpriv->tx_skbuff[cons]->data,
(unsigned int)rrpriv->tx_skbuff[cons]->truesize);
for (i = 0; i < len; i++){
if (!(i & 7))
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index e0dce373cdd9..ee198606854d 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -875,12 +875,6 @@ static inline int netvsc_send_pkt(
} else if (ret == -EAGAIN) {
netif_tx_stop_queue(txq);
ndev_ctx->eth_stats.stop_queue++;
- if (atomic_read(&nvchan->queue_sends) < 1 &&
- !net_device->tx_disable) {
- netif_tx_wake_queue(txq);
- ndev_ctx->eth_stats.wake_queue++;
- ret = -ENOSPC;
- }
} else {
netdev_err(ndev,
"Unable to send packet pages %u len %u, ret %d\n",
@@ -888,6 +882,15 @@ static inline int netvsc_send_pkt(
ret);
}
+ if (netif_tx_queue_stopped(txq) &&
+ atomic_read(&nvchan->queue_sends) < 1 &&
+ !net_device->tx_disable) {
+ netif_tx_wake_queue(txq);
+ ndev_ctx->eth_stats.wake_queue++;
+ if (ret == -EAGAIN)
+ ret = -ENOSPC;
+ }
+
return ret;
}
@@ -966,7 +969,7 @@ int netvsc_send(struct net_device *ndev,
/* Keep aggregating only if stack says more data is coming
* and not doing mixed modes send and not flow blocked
*/
- xmit_more = skb->xmit_more &&
+ xmit_more = netdev_xmit_more() &&
!packet->cp_partial &&
!netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index b20fb0fb595b..06393b215102 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -328,7 +328,7 @@ static inline int netvsc_get_tx_queue(struct net_device *ndev,
* If a valid queue has already been assigned, then use that.
* Otherwise compute tx queue based on hash and the send table.
*
- * This is basically similar to default (__netdev_pick_tx) with the added step
+ * This is basically similar to default (netdev_pick_tx) with the added step
* of using the host send_table when no other queue has been assigned.
*
* TODO support XPS - but get_xps_queue not exported
@@ -351,8 +351,7 @@ static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
}
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct net_device_context *ndc = netdev_priv(ndev);
struct net_device *vf_netdev;
@@ -364,10 +363,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
if (vf_ops->ndo_select_queue)
- txq = vf_ops->ndo_select_queue(vf_netdev, skb,
- sb_dev, fallback);
+ txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev);
else
- txq = fallback(vf_netdev, skb, NULL);
+ txq = netdev_pick_tx(vf_netdev, skb, NULL);
/* Record the queue selected by VF so that it can be
* used for common case where VF has more queues than
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index 3b88846de31b..b187ae1a6bd6 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -227,14 +227,16 @@ static int append_radio_msg(struct sk_buff *skb, struct hwsim_phy *phy)
return 0;
}
- nl_edges = nla_nest_start(skb, MAC802154_HWSIM_ATTR_RADIO_EDGES);
+ nl_edges = nla_nest_start_noflag(skb,
+ MAC802154_HWSIM_ATTR_RADIO_EDGES);
if (!nl_edges) {
rcu_read_unlock();
return -ENOBUFS;
}
list_for_each_entry_rcu(e, &phy->edges, list) {
- nl_edge = nla_nest_start(skb, MAC802154_HWSIM_ATTR_RADIO_EDGE);
+ nl_edge = nla_nest_start_noflag(skb,
+ MAC802154_HWSIM_ATTR_RADIO_EDGE);
if (!nl_edge) {
rcu_read_unlock();
nla_nest_cancel(skb, nl_edges);
@@ -428,9 +430,7 @@ static int hwsim_new_edge_nl(struct sk_buff *msg, struct genl_info *info)
!info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
return -EINVAL;
- if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
- info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
- hwsim_edge_policy, NULL))
+ if (nla_parse_nested_deprecated(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL))
return -EINVAL;
if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID])
@@ -492,9 +492,7 @@ static int hwsim_del_edge_nl(struct sk_buff *msg, struct genl_info *info)
!info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
return -EINVAL;
- if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
- info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
- hwsim_edge_policy, NULL))
+ if (nla_parse_nested_deprecated(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL))
return -EINVAL;
if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID])
@@ -542,9 +540,7 @@ static int hwsim_set_edge_lqi(struct sk_buff *msg, struct genl_info *info)
!info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE])
return -EINVAL;
- if (nla_parse_nested(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX,
- info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE],
- hwsim_edge_policy, NULL))
+ if (nla_parse_nested_deprecated(edge_attrs, MAC802154_HWSIM_EDGE_ATTR_MAX, info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE], hwsim_edge_policy, NULL))
return -EINVAL;
if (!edge_attrs[MAC802154_HWSIM_EDGE_ATTR_ENDPOINT_ID] &&
@@ -598,37 +594,37 @@ static const struct nla_policy hwsim_genl_policy[MAC802154_HWSIM_ATTR_MAX + 1] =
static const struct genl_ops hwsim_nl_ops[] = {
{
.cmd = MAC802154_HWSIM_CMD_NEW_RADIO,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_new_radio_nl,
.flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = MAC802154_HWSIM_CMD_DEL_RADIO,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_del_radio_nl,
.flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = MAC802154_HWSIM_CMD_GET_RADIO,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_get_radio_nl,
.dumpit = hwsim_dump_radio_nl,
},
{
.cmd = MAC802154_HWSIM_CMD_NEW_EDGE,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_new_edge_nl,
.flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = MAC802154_HWSIM_CMD_DEL_EDGE,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_del_edge_nl,
.flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = MAC802154_HWSIM_CMD_SET_EDGE,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_set_edge_lqi,
.flags = GENL_UNS_ADMIN_PERM,
},
@@ -638,6 +634,7 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
.name = "MAC802154_HWSIM",
.version = 1,
.maxattr = MAC802154_HWSIM_ATTR_MAX,
+ .policy = hwsim_genl_policy,
.module = THIS_MODULE,
.ops = hwsim_nl_ops,
.n_ops = ARRAY_SIZE(hwsim_nl_ops),
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 2df7f60fe052..857e4bf99883 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -128,21 +128,9 @@ static u32 always_on(struct net_device *dev)
return 1;
}
-static int loopback_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *ts_info)
-{
- ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
-
- ts_info->phc_index = -1;
-
- return 0;
-};
-
static const struct ethtool_ops loopback_ethtool_ops = {
.get_link = always_on,
- .get_ts_info = loopback_get_ts_info,
+ .get_ts_info = ethtool_op_get_ts_info,
};
static int loopback_dev_init(struct net_device *dev)
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 64a982563d59..009b2902c9d3 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -1611,9 +1611,7 @@ static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
if (!attrs[MACSEC_ATTR_SA_CONFIG])
return -EINVAL;
- if (nla_parse_nested(tb_sa, MACSEC_SA_ATTR_MAX,
- attrs[MACSEC_ATTR_SA_CONFIG],
- macsec_genl_sa_policy, NULL))
+ if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL))
return -EINVAL;
return 0;
@@ -1624,9 +1622,7 @@ static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
return -EINVAL;
- if (nla_parse_nested(tb_rxsc, MACSEC_RXSC_ATTR_MAX,
- attrs[MACSEC_ATTR_RXSC_CONFIG],
- macsec_genl_rxsc_policy, NULL))
+ if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL))
return -EINVAL;
return 0;
@@ -2175,8 +2171,9 @@ static int copy_tx_sa_stats(struct sk_buff *skb,
return 0;
}
-static int copy_rx_sa_stats(struct sk_buff *skb,
- struct macsec_rx_sa_stats __percpu *pstats)
+static noinline_for_stack int
+copy_rx_sa_stats(struct sk_buff *skb,
+ struct macsec_rx_sa_stats __percpu *pstats)
{
struct macsec_rx_sa_stats sum = {0, };
int cpu;
@@ -2201,8 +2198,8 @@ static int copy_rx_sa_stats(struct sk_buff *skb,
return 0;
}
-static int copy_rx_sc_stats(struct sk_buff *skb,
- struct pcpu_rx_sc_stats __percpu *pstats)
+static noinline_for_stack int
+copy_rx_sc_stats(struct sk_buff *skb, struct pcpu_rx_sc_stats __percpu *pstats)
{
struct macsec_rx_sc_stats sum = {0, };
int cpu;
@@ -2265,8 +2262,8 @@ static int copy_rx_sc_stats(struct sk_buff *skb,
return 0;
}
-static int copy_tx_sc_stats(struct sk_buff *skb,
- struct pcpu_tx_sc_stats __percpu *pstats)
+static noinline_for_stack int
+copy_tx_sc_stats(struct sk_buff *skb, struct pcpu_tx_sc_stats __percpu *pstats)
{
struct macsec_tx_sc_stats sum = {0, };
int cpu;
@@ -2305,8 +2302,8 @@ static int copy_tx_sc_stats(struct sk_buff *skb,
return 0;
}
-static int copy_secy_stats(struct sk_buff *skb,
- struct pcpu_secy_stats __percpu *pstats)
+static noinline_for_stack int
+copy_secy_stats(struct sk_buff *skb, struct pcpu_secy_stats __percpu *pstats)
{
struct macsec_dev_stats sum = {0, };
int cpu;
@@ -2364,7 +2361,8 @@ static int copy_secy_stats(struct sk_buff *skb,
static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
{
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
- struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY);
+ struct nlattr *secy_nest = nla_nest_start_noflag(skb,
+ MACSEC_ATTR_SECY);
u64 csid;
if (!secy_nest)
@@ -2410,8 +2408,9 @@ cancel:
return 1;
}
-static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
- struct sk_buff *skb, struct netlink_callback *cb)
+static noinline_for_stack int
+dump_secy(struct macsec_secy *secy, struct net_device *dev,
+ struct sk_buff *skb, struct netlink_callback *cb)
{
struct macsec_rx_sc *rx_sc;
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
@@ -2433,7 +2432,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
if (nla_put_secy(secy, skb))
goto nla_put_failure;
- attr = nla_nest_start(skb, MACSEC_ATTR_TXSC_STATS);
+ attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
if (!attr)
goto nla_put_failure;
if (copy_tx_sc_stats(skb, tx_sc->stats)) {
@@ -2442,7 +2441,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
}
nla_nest_end(skb, attr);
- attr = nla_nest_start(skb, MACSEC_ATTR_SECY_STATS);
+ attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
if (!attr)
goto nla_put_failure;
if (copy_secy_stats(skb, macsec_priv(dev)->stats)) {
@@ -2451,7 +2450,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
}
nla_nest_end(skb, attr);
- txsa_list = nla_nest_start(skb, MACSEC_ATTR_TXSA_LIST);
+ txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST);
if (!txsa_list)
goto nla_put_failure;
for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
@@ -2461,7 +2460,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
if (!tx_sa)
continue;
- txsa_nest = nla_nest_start(skb, j++);
+ txsa_nest = nla_nest_start_noflag(skb, j++);
if (!txsa_nest) {
nla_nest_cancel(skb, txsa_list);
goto nla_put_failure;
@@ -2476,7 +2475,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
goto nla_put_failure;
}
- attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS);
+ attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
if (!attr) {
nla_nest_cancel(skb, txsa_nest);
nla_nest_cancel(skb, txsa_list);
@@ -2494,7 +2493,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
}
nla_nest_end(skb, txsa_list);
- rxsc_list = nla_nest_start(skb, MACSEC_ATTR_RXSC_LIST);
+ rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST);
if (!rxsc_list)
goto nla_put_failure;
@@ -2502,7 +2501,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
for_each_rxsc_rtnl(secy, rx_sc) {
int k;
struct nlattr *rxsa_list;
- struct nlattr *rxsc_nest = nla_nest_start(skb, j++);
+ struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++);
if (!rxsc_nest) {
nla_nest_cancel(skb, rxsc_list);
@@ -2517,7 +2516,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
goto nla_put_failure;
}
- attr = nla_nest_start(skb, MACSEC_RXSC_ATTR_STATS);
+ attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS);
if (!attr) {
nla_nest_cancel(skb, rxsc_nest);
nla_nest_cancel(skb, rxsc_list);
@@ -2531,7 +2530,8 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
}
nla_nest_end(skb, attr);
- rxsa_list = nla_nest_start(skb, MACSEC_RXSC_ATTR_SA_LIST);
+ rxsa_list = nla_nest_start_noflag(skb,
+ MACSEC_RXSC_ATTR_SA_LIST);
if (!rxsa_list) {
nla_nest_cancel(skb, rxsc_nest);
nla_nest_cancel(skb, rxsc_list);
@@ -2545,7 +2545,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
if (!rx_sa)
continue;
- rxsa_nest = nla_nest_start(skb, k++);
+ rxsa_nest = nla_nest_start_noflag(skb, k++);
if (!rxsa_nest) {
nla_nest_cancel(skb, rxsa_list);
nla_nest_cancel(skb, rxsc_nest);
@@ -2553,7 +2553,8 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
goto nla_put_failure;
}
- attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS);
+ attr = nla_nest_start_noflag(skb,
+ MACSEC_SA_ATTR_STATS);
if (!attr) {
nla_nest_cancel(skb, rxsa_list);
nla_nest_cancel(skb, rxsc_nest);
@@ -2636,61 +2637,61 @@ done:
static const struct genl_ops macsec_genl_ops[] = {
{
.cmd = MACSEC_CMD_GET_TXSC,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.dumpit = macsec_dump_txsc,
- .policy = macsec_genl_policy,
},
{
.cmd = MACSEC_CMD_ADD_RXSC,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = macsec_add_rxsc,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_DEL_RXSC,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = macsec_del_rxsc,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_UPD_RXSC,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = macsec_upd_rxsc,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_ADD_TXSA,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = macsec_add_txsa,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_DEL_TXSA,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = macsec_del_txsa,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_UPD_TXSA,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = macsec_upd_txsa,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_ADD_RXSA,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = macsec_add_rxsa,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_DEL_RXSA,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = macsec_del_rxsa,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_UPD_RXSA,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = macsec_upd_rxsa,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
};
@@ -2700,6 +2701,7 @@ static struct genl_family macsec_fam __ro_after_init = {
.hdrsize = 0,
.version = MACSEC_GENL_VERSION,
.maxattr = MACSEC_ATTR_MAX,
+ .policy = macsec_genl_policy,
.netnsok = true,
.module = THIS_MODULE,
.ops = macsec_genl_ops,
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 0c0f105657d3..b395423b19bc 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -24,6 +24,7 @@
#include <linux/notifier.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/net_tstamp.h>
#include <linux/ethtool.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
@@ -34,6 +35,7 @@
#include <net/rtnetlink.h>
#include <net/xfrm.h>
#include <linux/netpoll.h>
+#include <linux/phy.h>
#define MACVLAN_HASH_BITS 8
#define MACVLAN_HASH_SIZE (1<<MACVLAN_HASH_BITS)
@@ -822,6 +824,30 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct net_device *real_dev = macvlan_dev_real_dev(dev);
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ struct ifreq ifrr;
+ int err = -EOPNOTSUPP;
+
+ strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
+ ifrr.ifr_ifru = ifr->ifr_ifru;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ case SIOCGHWTSTAMP:
+ if (netif_device_present(real_dev) && ops->ndo_do_ioctl)
+ err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd);
+ break;
+ }
+
+ if (!err)
+ ifr->ifr_ifru = ifrr.ifr_ifru;
+
+ return err;
+}
+
/*
* macvlan network devices have devices nesting below it and are a special
* "super class" of normal network devices; split their locks off into a
@@ -1020,6 +1046,26 @@ static int macvlan_ethtool_get_link_ksettings(struct net_device *dev,
return __ethtool_get_link_ksettings(vlan->lowerdev, cmd);
}
+static int macvlan_ethtool_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct net_device *real_dev = macvlan_dev_real_dev(dev);
+ const struct ethtool_ops *ops = real_dev->ethtool_ops;
+ struct phy_device *phydev = real_dev->phydev;
+
+ if (phydev && phydev->drv && phydev->drv->ts_info) {
+ return phydev->drv->ts_info(phydev, info);
+ } else if (ops->get_ts_info) {
+ return ops->get_ts_info(real_dev, info);
+ } else {
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+ }
+
+ return 0;
+}
+
static netdev_features_t macvlan_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -1094,6 +1140,7 @@ static const struct ethtool_ops macvlan_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_link_ksettings = macvlan_ethtool_get_link_ksettings,
.get_drvinfo = macvlan_ethtool_get_drvinfo,
+ .get_ts_info = macvlan_ethtool_get_ts_info,
};
static const struct net_device_ops macvlan_netdev_ops = {
@@ -1103,6 +1150,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
.ndo_stop = macvlan_stop,
.ndo_start_xmit = macvlan_start_xmit,
.ndo_change_mtu = macvlan_change_mtu,
+ .ndo_do_ioctl = macvlan_do_ioctl,
.ndo_fix_features = macvlan_fix_features,
.ndo_change_rx_flags = macvlan_change_rx_flags,
.ndo_set_mac_address = macvlan_set_mac_address,
@@ -1576,7 +1624,7 @@ static int macvlan_fill_info(struct sk_buff *skb,
if (nla_put_u32(skb, IFLA_MACVLAN_MACADDR_COUNT, vlan->macaddr_count))
goto nla_put_failure;
if (vlan->macaddr_count > 0) {
- nest = nla_nest_start(skb, IFLA_MACVLAN_MACADDR_DATA);
+ nest = nla_nest_start_noflag(skb, IFLA_MACVLAN_MACADDR_DATA);
if (nest == NULL)
goto nla_put_failure;
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index ed1166adaa2f..b16a1221d19b 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -115,8 +115,7 @@ static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb,
static u16 net_failover_select_queue(struct net_device *dev,
struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct net_failover_info *nfo_info = netdev_priv(dev);
struct net_device *primary_dev;
@@ -127,10 +126,9 @@ static u16 net_failover_select_queue(struct net_device *dev,
const struct net_device_ops *ops = primary_dev->netdev_ops;
if (ops->ndo_select_queue)
- txq = ops->ndo_select_queue(primary_dev, skb,
- sb_dev, fallback);
+ txq = ops->ndo_select_queue(primary_dev, skb, sb_dev);
else
- txq = fallback(primary_dev, skb, NULL);
+ txq = netdev_pick_tx(primary_dev, skb, NULL);
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
index 0fee1d06c084..09f1315d2f2a 100644
--- a/drivers/net/netdevsim/Makefile
+++ b/drivers/net/netdevsim/Makefile
@@ -3,17 +3,13 @@
obj-$(CONFIG_NETDEVSIM) += netdevsim.o
netdevsim-objs := \
- netdev.o \
+ netdev.o dev.o fib.o bus.o
ifeq ($(CONFIG_BPF_SYSCALL),y)
netdevsim-objs += \
bpf.o
endif
-ifneq ($(CONFIG_NET_DEVLINK),)
-netdevsim-objs += devlink.o fib.o
-endif
-
ifneq ($(CONFIG_XFRM_OFFLOAD),)
netdevsim-objs += ipsec.o
endif
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index f92c43453ec6..2b74425822ab 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -27,7 +27,7 @@
bpf_verifier_log_write(env, "[netdevsim] " fmt, ##__VA_ARGS__)
struct nsim_bpf_bound_prog {
- struct netdevsim *ns;
+ struct nsim_dev *nsim_dev;
struct bpf_prog *prog;
struct dentry *ddir;
const char *state;
@@ -65,8 +65,8 @@ nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn)
struct nsim_bpf_bound_prog *state;
state = env->prog->aux->offload->dev_priv;
- if (state->ns->bpf_bind_verifier_delay && !insn_idx)
- msleep(state->ns->bpf_bind_verifier_delay);
+ if (state->nsim_dev->bpf_bind_verifier_delay && !insn_idx)
+ msleep(state->nsim_dev->bpf_bind_verifier_delay);
if (insn_idx == env->prog->len - 1)
pr_vlog(env, "Hello from netdevsim!\n");
@@ -213,7 +213,8 @@ nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf,
return 0;
}
-static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
+static int nsim_bpf_create_prog(struct nsim_dev *nsim_dev,
+ struct bpf_prog *prog)
{
struct nsim_bpf_bound_prog *state;
char name[16];
@@ -222,13 +223,13 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
if (!state)
return -ENOMEM;
- state->ns = ns;
+ state->nsim_dev = nsim_dev;
state->prog = prog;
state->state = "verify";
/* Program id is not populated yet when we create the state. */
- sprintf(name, "%u", ns->sdev->prog_id_gen++);
- state->ddir = debugfs_create_dir(name, ns->sdev->ddir_bpf_bound_progs);
+ sprintf(name, "%u", nsim_dev->prog_id_gen++);
+ state->ddir = debugfs_create_dir(name, nsim_dev->ddir_bpf_bound_progs);
if (IS_ERR_OR_NULL(state->ddir)) {
kfree(state);
return -ENOMEM;
@@ -239,7 +240,7 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
&state->state, &nsim_bpf_string_fops);
debugfs_create_bool("loaded", 0400, state->ddir, &state->is_loaded);
- list_add_tail(&state->l, &ns->sdev->bpf_bound_progs);
+ list_add_tail(&state->l, &nsim_dev->bpf_bound_progs);
prog->aux->offload->dev_priv = state;
@@ -248,12 +249,13 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog)
static int nsim_bpf_verifier_prep(struct bpf_prog *prog)
{
- struct netdevsim *ns = bpf_offload_dev_priv(prog->aux->offload->offdev);
+ struct nsim_dev *nsim_dev =
+ bpf_offload_dev_priv(prog->aux->offload->offdev);
- if (!ns->bpf_bind_accept)
+ if (!nsim_dev->bpf_bind_accept)
return -EOPNOTSUPP;
- return nsim_bpf_create_prog(ns, prog);
+ return nsim_bpf_create_prog(nsim_dev, prog);
}
static int nsim_bpf_translate(struct bpf_prog *prog)
@@ -512,7 +514,7 @@ nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap)
}
offmap->dev_ops = &nsim_bpf_map_ops;
- list_add_tail(&nmap->l, &ns->sdev->bpf_bound_maps);
+ list_add_tail(&nmap->l, &ns->nsim_dev->bpf_bound_maps);
return 0;
@@ -576,61 +578,68 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
}
}
-int nsim_bpf_init(struct netdevsim *ns)
+int nsim_bpf_dev_init(struct nsim_dev *nsim_dev)
{
int err;
- if (ns->sdev->refcnt == 1) {
- INIT_LIST_HEAD(&ns->sdev->bpf_bound_progs);
- INIT_LIST_HEAD(&ns->sdev->bpf_bound_maps);
+ INIT_LIST_HEAD(&nsim_dev->bpf_bound_progs);
+ INIT_LIST_HEAD(&nsim_dev->bpf_bound_maps);
- ns->sdev->ddir_bpf_bound_progs =
- debugfs_create_dir("bpf_bound_progs", ns->sdev->ddir);
- if (IS_ERR_OR_NULL(ns->sdev->ddir_bpf_bound_progs))
- return -ENOMEM;
+ nsim_dev->ddir_bpf_bound_progs = debugfs_create_dir("bpf_bound_progs",
+ nsim_dev->ddir);
+ if (IS_ERR_OR_NULL(nsim_dev->ddir_bpf_bound_progs))
+ return -ENOMEM;
- ns->sdev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops,
- ns);
- err = PTR_ERR_OR_ZERO(ns->sdev->bpf_dev);
- if (err)
- return err;
- }
+ nsim_dev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops, nsim_dev);
+ err = PTR_ERR_OR_ZERO(nsim_dev->bpf_dev);
+ if (err)
+ return err;
+
+ nsim_dev->bpf_bind_accept = true;
+ debugfs_create_bool("bpf_bind_accept", 0600, nsim_dev->ddir,
+ &nsim_dev->bpf_bind_accept);
+ debugfs_create_u32("bpf_bind_verifier_delay", 0600, nsim_dev->ddir,
+ &nsim_dev->bpf_bind_verifier_delay);
+ return 0;
+}
+
+void nsim_bpf_dev_exit(struct nsim_dev *nsim_dev)
+{
+ WARN_ON(!list_empty(&nsim_dev->bpf_bound_progs));
+ WARN_ON(!list_empty(&nsim_dev->bpf_bound_maps));
+ bpf_offload_dev_destroy(nsim_dev->bpf_dev);
+}
+
+int nsim_bpf_init(struct netdevsim *ns)
+{
+ struct dentry *ddir = ns->nsim_dev_port->ddir;
+ int err;
- err = bpf_offload_dev_netdev_register(ns->sdev->bpf_dev, ns->netdev);
+ err = bpf_offload_dev_netdev_register(ns->nsim_dev->bpf_dev,
+ ns->netdev);
if (err)
- goto err_destroy_bdev;
+ return err;
- debugfs_create_u32("bpf_offloaded_id", 0400, ns->ddir,
+ debugfs_create_u32("bpf_offloaded_id", 0400, ddir,
&ns->bpf_offloaded_id);
- ns->bpf_bind_accept = true;
- debugfs_create_bool("bpf_bind_accept", 0600, ns->ddir,
- &ns->bpf_bind_accept);
- debugfs_create_u32("bpf_bind_verifier_delay", 0600, ns->ddir,
- &ns->bpf_bind_verifier_delay);
-
ns->bpf_tc_accept = true;
- debugfs_create_bool("bpf_tc_accept", 0600, ns->ddir,
+ debugfs_create_bool("bpf_tc_accept", 0600, ddir,
&ns->bpf_tc_accept);
- debugfs_create_bool("bpf_tc_non_bound_accept", 0600, ns->ddir,
+ debugfs_create_bool("bpf_tc_non_bound_accept", 0600, ddir,
&ns->bpf_tc_non_bound_accept);
ns->bpf_xdpdrv_accept = true;
- debugfs_create_bool("bpf_xdpdrv_accept", 0600, ns->ddir,
+ debugfs_create_bool("bpf_xdpdrv_accept", 0600, ddir,
&ns->bpf_xdpdrv_accept);
ns->bpf_xdpoffload_accept = true;
- debugfs_create_bool("bpf_xdpoffload_accept", 0600, ns->ddir,
+ debugfs_create_bool("bpf_xdpoffload_accept", 0600, ddir,
&ns->bpf_xdpoffload_accept);
ns->bpf_map_accept = true;
- debugfs_create_bool("bpf_map_accept", 0600, ns->ddir,
+ debugfs_create_bool("bpf_map_accept", 0600, ddir,
&ns->bpf_map_accept);
return 0;
-
-err_destroy_bdev:
- if (ns->sdev->refcnt == 1)
- bpf_offload_dev_destroy(ns->sdev->bpf_dev);
- return err;
}
void nsim_bpf_uninit(struct netdevsim *ns)
@@ -638,11 +647,5 @@ void nsim_bpf_uninit(struct netdevsim *ns)
WARN_ON(ns->xdp.prog);
WARN_ON(ns->xdp_hw.prog);
WARN_ON(ns->bpf_offloaded);
- bpf_offload_dev_netdev_unregister(ns->sdev->bpf_dev, ns->netdev);
-
- if (ns->sdev->refcnt == 1) {
- WARN_ON(!list_empty(&ns->sdev->bpf_bound_progs));
- WARN_ON(!list_empty(&ns->sdev->bpf_bound_maps));
- bpf_offload_dev_destroy(ns->sdev->bpf_dev);
- }
+ bpf_offload_dev_netdev_unregister(ns->nsim_dev->bpf_dev, ns->netdev);
}
diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
new file mode 100644
index 000000000000..1a0ff3d7747b
--- /dev/null
+++ b/drivers/net/netdevsim/bus.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2017 Netronome Systems, Inc.
+ * Copyright (C) 2019 Mellanox Technologies. All rights reserved
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "netdevsim.h"
+
+static DEFINE_IDA(nsim_bus_dev_ids);
+static LIST_HEAD(nsim_bus_dev_list);
+static DEFINE_MUTEX(nsim_bus_dev_list_lock);
+
+static struct nsim_bus_dev *to_nsim_bus_dev(struct device *dev)
+{
+ return container_of(dev, struct nsim_bus_dev, dev);
+}
+
+static int nsim_bus_dev_vfs_enable(struct nsim_bus_dev *nsim_bus_dev,
+ unsigned int num_vfs)
+{
+ nsim_bus_dev->vfconfigs = kcalloc(num_vfs,
+ sizeof(struct nsim_vf_config),
+ GFP_KERNEL);
+ if (!nsim_bus_dev->vfconfigs)
+ return -ENOMEM;
+ nsim_bus_dev->num_vfs = num_vfs;
+
+ return 0;
+}
+
+static void nsim_bus_dev_vfs_disable(struct nsim_bus_dev *nsim_bus_dev)
+{
+ kfree(nsim_bus_dev->vfconfigs);
+ nsim_bus_dev->vfconfigs = NULL;
+ nsim_bus_dev->num_vfs = 0;
+}
+
+static ssize_t
+nsim_bus_dev_numvfs_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+ unsigned int num_vfs;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &num_vfs);
+ if (ret)
+ return ret;
+
+ rtnl_lock();
+ if (nsim_bus_dev->num_vfs == num_vfs)
+ goto exit_good;
+ if (nsim_bus_dev->num_vfs && num_vfs) {
+ ret = -EBUSY;
+ goto exit_unlock;
+ }
+
+ if (num_vfs) {
+ ret = nsim_bus_dev_vfs_enable(nsim_bus_dev, num_vfs);
+ if (ret)
+ goto exit_unlock;
+ } else {
+ nsim_bus_dev_vfs_disable(nsim_bus_dev);
+ }
+exit_good:
+ ret = count;
+exit_unlock:
+ rtnl_unlock();
+
+ return ret;
+}
+
+static ssize_t
+nsim_bus_dev_numvfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+
+ return sprintf(buf, "%u\n", nsim_bus_dev->num_vfs);
+}
+
+static struct device_attribute nsim_bus_dev_numvfs_attr =
+ __ATTR(sriov_numvfs, 0664, nsim_bus_dev_numvfs_show,
+ nsim_bus_dev_numvfs_store);
+
+static ssize_t
+new_port_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+ unsigned int port_index;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &port_index);
+ if (ret)
+ return ret;
+ ret = nsim_dev_port_add(nsim_bus_dev, port_index);
+ return ret ? ret : count;
+}
+
+static struct device_attribute nsim_bus_dev_new_port_attr = __ATTR_WO(new_port);
+
+static ssize_t
+del_port_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+ unsigned int port_index;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &port_index);
+ if (ret)
+ return ret;
+ ret = nsim_dev_port_del(nsim_bus_dev, port_index);
+ return ret ? ret : count;
+}
+
+static struct device_attribute nsim_bus_dev_del_port_attr = __ATTR_WO(del_port);
+
+static struct attribute *nsim_bus_dev_attrs[] = {
+ &nsim_bus_dev_numvfs_attr.attr,
+ &nsim_bus_dev_new_port_attr.attr,
+ &nsim_bus_dev_del_port_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group nsim_bus_dev_attr_group = {
+ .attrs = nsim_bus_dev_attrs,
+};
+
+static const struct attribute_group *nsim_bus_dev_attr_groups[] = {
+ &nsim_bus_dev_attr_group,
+ NULL,
+};
+
+static void nsim_bus_dev_release(struct device *dev)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+
+ nsim_bus_dev_vfs_disable(nsim_bus_dev);
+}
+
+static struct device_type nsim_bus_dev_type = {
+ .groups = nsim_bus_dev_attr_groups,
+ .release = nsim_bus_dev_release,
+};
+
+static struct nsim_bus_dev *
+nsim_bus_dev_new(unsigned int id, unsigned int port_count);
+
+static ssize_t
+new_device_store(struct bus_type *bus, const char *buf, size_t count)
+{
+ struct nsim_bus_dev *nsim_bus_dev;
+ unsigned int port_count;
+ unsigned int id;
+ int err;
+
+ err = sscanf(buf, "%u %u", &id, &port_count);
+ switch (err) {
+ case 1:
+ port_count = 1;
+ /* fall through */
+ case 2:
+ if (id > INT_MAX) {
+ pr_err("Value of \"id\" is too big.\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_err("Format for adding new device is \"id port_count\" (uint uint).\n");
+ return -EINVAL;
+ }
+ nsim_bus_dev = nsim_bus_dev_new(id, port_count);
+ if (IS_ERR(nsim_bus_dev))
+ return PTR_ERR(nsim_bus_dev);
+
+ mutex_lock(&nsim_bus_dev_list_lock);
+ list_add_tail(&nsim_bus_dev->list, &nsim_bus_dev_list);
+ mutex_unlock(&nsim_bus_dev_list_lock);
+
+ return count;
+}
+static BUS_ATTR_WO(new_device);
+
+static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev);
+
+static ssize_t
+del_device_store(struct bus_type *bus, const char *buf, size_t count)
+{
+ struct nsim_bus_dev *nsim_bus_dev, *tmp;
+ unsigned int id;
+ int err;
+
+ err = sscanf(buf, "%u", &id);
+ switch (err) {
+ case 1:
+ if (id > INT_MAX) {
+ pr_err("Value of \"id\" is too big.\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_err("Format for deleting device is \"id\" (uint).\n");
+ return -EINVAL;
+ }
+
+ err = -ENOENT;
+ mutex_lock(&nsim_bus_dev_list_lock);
+ list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) {
+ if (nsim_bus_dev->dev.id != id)
+ continue;
+ list_del(&nsim_bus_dev->list);
+ nsim_bus_dev_del(nsim_bus_dev);
+ err = 0;
+ break;
+ }
+ mutex_unlock(&nsim_bus_dev_list_lock);
+ return !err ? count : err;
+}
+static BUS_ATTR_WO(del_device);
+
+static struct attribute *nsim_bus_attrs[] = {
+ &bus_attr_new_device.attr,
+ &bus_attr_del_device.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(nsim_bus);
+
+static int nsim_bus_probe(struct device *dev)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+
+ return nsim_dev_probe(nsim_bus_dev);
+}
+
+static int nsim_bus_remove(struct device *dev)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+
+ nsim_dev_remove(nsim_bus_dev);
+ return 0;
+}
+
+static int nsim_num_vf(struct device *dev)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+
+ return nsim_bus_dev->num_vfs;
+}
+
+static struct bus_type nsim_bus = {
+ .name = DRV_NAME,
+ .dev_name = DRV_NAME,
+ .bus_groups = nsim_bus_groups,
+ .probe = nsim_bus_probe,
+ .remove = nsim_bus_remove,
+ .num_vf = nsim_num_vf,
+};
+
+static struct nsim_bus_dev *
+nsim_bus_dev_new(unsigned int id, unsigned int port_count)
+{
+ struct nsim_bus_dev *nsim_bus_dev;
+ int err;
+
+ nsim_bus_dev = kzalloc(sizeof(*nsim_bus_dev), GFP_KERNEL);
+ if (!nsim_bus_dev)
+ return ERR_PTR(-ENOMEM);
+
+ err = ida_alloc_range(&nsim_bus_dev_ids, id, id, GFP_KERNEL);
+ if (err < 0)
+ goto err_nsim_bus_dev_free;
+ nsim_bus_dev->dev.id = err;
+ nsim_bus_dev->dev.bus = &nsim_bus;
+ nsim_bus_dev->dev.type = &nsim_bus_dev_type;
+ nsim_bus_dev->port_count = port_count;
+
+ err = device_register(&nsim_bus_dev->dev);
+ if (err)
+ goto err_nsim_bus_dev_id_free;
+ return nsim_bus_dev;
+
+err_nsim_bus_dev_id_free:
+ ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
+err_nsim_bus_dev_free:
+ kfree(nsim_bus_dev);
+ return ERR_PTR(err);
+}
+
+static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev)
+{
+ device_unregister(&nsim_bus_dev->dev);
+ ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
+ kfree(nsim_bus_dev);
+}
+
+static struct device_driver nsim_driver = {
+ .name = DRV_NAME,
+ .bus = &nsim_bus,
+ .owner = THIS_MODULE,
+};
+
+int nsim_bus_init(void)
+{
+ int err;
+
+ err = bus_register(&nsim_bus);
+ if (err)
+ return err;
+ err = driver_register(&nsim_driver);
+ if (err)
+ goto err_bus_unregister;
+ return 0;
+
+err_bus_unregister:
+ bus_unregister(&nsim_bus);
+ return err;
+}
+
+void nsim_bus_exit(void)
+{
+ struct nsim_bus_dev *nsim_bus_dev, *tmp;
+
+ mutex_lock(&nsim_bus_dev_list_lock);
+ list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) {
+ list_del(&nsim_bus_dev->list);
+ nsim_bus_dev_del(nsim_bus_dev);
+ }
+ mutex_unlock(&nsim_bus_dev_list_lock);
+ driver_unregister(&nsim_driver);
+ bus_unregister(&nsim_bus);
+}
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
new file mode 100644
index 000000000000..b509b941d5ca
--- /dev/null
+++ b/drivers/net/netdevsim/dev.c
@@ -0,0 +1,447 @@
+/*
+ * Copyright (c) 2018 Cumulus Networks. All rights reserved.
+ * Copyright (c) 2018 David Ahern <dsa@cumulusnetworks.com>
+ * Copyright (c) 2019 Mellanox Technologies. All rights reserved.
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/random.h>
+#include <linux/rtnetlink.h>
+#include <net/devlink.h>
+
+#include "netdevsim.h"
+
+static struct dentry *nsim_dev_ddir;
+
+static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
+{
+ char dev_ddir_name[16];
+
+ sprintf(dev_ddir_name, DRV_NAME "%u", nsim_dev->nsim_bus_dev->dev.id);
+ nsim_dev->ddir = debugfs_create_dir(dev_ddir_name, nsim_dev_ddir);
+ if (IS_ERR_OR_NULL(nsim_dev->ddir))
+ return PTR_ERR_OR_ZERO(nsim_dev->ddir) ?: -EINVAL;
+ nsim_dev->ports_ddir = debugfs_create_dir("ports", nsim_dev->ddir);
+ if (IS_ERR_OR_NULL(nsim_dev->ports_ddir))
+ return PTR_ERR_OR_ZERO(nsim_dev->ports_ddir) ?: -EINVAL;
+ return 0;
+}
+
+static void nsim_dev_debugfs_exit(struct nsim_dev *nsim_dev)
+{
+ debugfs_remove_recursive(nsim_dev->ports_ddir);
+ debugfs_remove_recursive(nsim_dev->ddir);
+}
+
+static int nsim_dev_port_debugfs_init(struct nsim_dev *nsim_dev,
+ struct nsim_dev_port *nsim_dev_port)
+{
+ char port_ddir_name[16];
+ char dev_link_name[32];
+
+ sprintf(port_ddir_name, "%u", nsim_dev_port->port_index);
+ nsim_dev_port->ddir = debugfs_create_dir(port_ddir_name,
+ nsim_dev->ports_ddir);
+ if (IS_ERR_OR_NULL(nsim_dev_port->ddir))
+ return -ENOMEM;
+
+ sprintf(dev_link_name, "../../../" DRV_NAME "%u",
+ nsim_dev->nsim_bus_dev->dev.id);
+ debugfs_create_symlink("dev", nsim_dev_port->ddir, dev_link_name);
+
+ return 0;
+}
+
+static void nsim_dev_port_debugfs_exit(struct nsim_dev_port *nsim_dev_port)
+{
+ debugfs_remove_recursive(nsim_dev_port->ddir);
+}
+
+static u64 nsim_dev_ipv4_fib_resource_occ_get(void *priv)
+{
+ struct nsim_dev *nsim_dev = priv;
+
+ return nsim_fib_get_val(nsim_dev->fib_data,
+ NSIM_RESOURCE_IPV4_FIB, false);
+}
+
+static u64 nsim_dev_ipv4_fib_rules_res_occ_get(void *priv)
+{
+ struct nsim_dev *nsim_dev = priv;
+
+ return nsim_fib_get_val(nsim_dev->fib_data,
+ NSIM_RESOURCE_IPV4_FIB_RULES, false);
+}
+
+static u64 nsim_dev_ipv6_fib_resource_occ_get(void *priv)
+{
+ struct nsim_dev *nsim_dev = priv;
+
+ return nsim_fib_get_val(nsim_dev->fib_data,
+ NSIM_RESOURCE_IPV6_FIB, false);
+}
+
+static u64 nsim_dev_ipv6_fib_rules_res_occ_get(void *priv)
+{
+ struct nsim_dev *nsim_dev = priv;
+
+ return nsim_fib_get_val(nsim_dev->fib_data,
+ NSIM_RESOURCE_IPV6_FIB_RULES, false);
+}
+
+static int nsim_dev_resources_register(struct devlink *devlink)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ struct devlink_resource_size_params params = {
+ .size_max = (u64)-1,
+ .size_granularity = 1,
+ .unit = DEVLINK_RESOURCE_UNIT_ENTRY
+ };
+ int err;
+ u64 n;
+
+ /* Resources for IPv4 */
+ err = devlink_resource_register(devlink, "IPv4", (u64)-1,
+ NSIM_RESOURCE_IPV4,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &params);
+ if (err) {
+ pr_err("Failed to register IPv4 top resource\n");
+ goto out;
+ }
+
+ n = nsim_fib_get_val(nsim_dev->fib_data,
+ NSIM_RESOURCE_IPV4_FIB, true);
+ err = devlink_resource_register(devlink, "fib", n,
+ NSIM_RESOURCE_IPV4_FIB,
+ NSIM_RESOURCE_IPV4, &params);
+ if (err) {
+ pr_err("Failed to register IPv4 FIB resource\n");
+ return err;
+ }
+
+ n = nsim_fib_get_val(nsim_dev->fib_data,
+ NSIM_RESOURCE_IPV4_FIB_RULES, true);
+ err = devlink_resource_register(devlink, "fib-rules", n,
+ NSIM_RESOURCE_IPV4_FIB_RULES,
+ NSIM_RESOURCE_IPV4, &params);
+ if (err) {
+ pr_err("Failed to register IPv4 FIB rules resource\n");
+ return err;
+ }
+
+ /* Resources for IPv6 */
+ err = devlink_resource_register(devlink, "IPv6", (u64)-1,
+ NSIM_RESOURCE_IPV6,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &params);
+ if (err) {
+ pr_err("Failed to register IPv6 top resource\n");
+ goto out;
+ }
+
+ n = nsim_fib_get_val(nsim_dev->fib_data,
+ NSIM_RESOURCE_IPV6_FIB, true);
+ err = devlink_resource_register(devlink, "fib", n,
+ NSIM_RESOURCE_IPV6_FIB,
+ NSIM_RESOURCE_IPV6, &params);
+ if (err) {
+ pr_err("Failed to register IPv6 FIB resource\n");
+ return err;
+ }
+
+ n = nsim_fib_get_val(nsim_dev->fib_data,
+ NSIM_RESOURCE_IPV6_FIB_RULES, true);
+ err = devlink_resource_register(devlink, "fib-rules", n,
+ NSIM_RESOURCE_IPV6_FIB_RULES,
+ NSIM_RESOURCE_IPV6, &params);
+ if (err) {
+ pr_err("Failed to register IPv6 FIB rules resource\n");
+ return err;
+ }
+
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV4_FIB,
+ nsim_dev_ipv4_fib_resource_occ_get,
+ nsim_dev);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV4_FIB_RULES,
+ nsim_dev_ipv4_fib_rules_res_occ_get,
+ nsim_dev);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV6_FIB,
+ nsim_dev_ipv6_fib_resource_occ_get,
+ nsim_dev);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV6_FIB_RULES,
+ nsim_dev_ipv6_fib_rules_res_occ_get,
+ nsim_dev);
+out:
+ return err;
+}
+
+static int nsim_dev_reload(struct devlink *devlink,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ enum nsim_resource_id res_ids[] = {
+ NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
+ NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(res_ids); ++i) {
+ int err;
+ u64 val;
+
+ err = devlink_resource_size_get(devlink, res_ids[i], &val);
+ if (!err) {
+ err = nsim_fib_set_max(nsim_dev->fib_data,
+ res_ids[i], val, extack);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static const struct devlink_ops nsim_dev_devlink_ops = {
+ .reload = nsim_dev_reload,
+};
+
+static struct nsim_dev *
+nsim_dev_create(struct nsim_bus_dev *nsim_bus_dev, unsigned int port_count)
+{
+ struct nsim_dev *nsim_dev;
+ struct devlink *devlink;
+ int err;
+
+ devlink = devlink_alloc(&nsim_dev_devlink_ops, sizeof(*nsim_dev));
+ if (!devlink)
+ return ERR_PTR(-ENOMEM);
+ nsim_dev = devlink_priv(devlink);
+ nsim_dev->nsim_bus_dev = nsim_bus_dev;
+ nsim_dev->switch_id.id_len = sizeof(nsim_dev->switch_id.id);
+ get_random_bytes(nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
+ INIT_LIST_HEAD(&nsim_dev->port_list);
+ mutex_init(&nsim_dev->port_list_lock);
+
+ nsim_dev->fib_data = nsim_fib_create();
+ if (IS_ERR(nsim_dev->fib_data)) {
+ err = PTR_ERR(nsim_dev->fib_data);
+ goto err_devlink_free;
+ }
+
+ err = nsim_dev_resources_register(devlink);
+ if (err)
+ goto err_fib_destroy;
+
+ err = devlink_register(devlink, &nsim_bus_dev->dev);
+ if (err)
+ goto err_resources_unregister;
+
+ err = nsim_dev_debugfs_init(nsim_dev);
+ if (err)
+ goto err_dl_unregister;
+
+ err = nsim_bpf_dev_init(nsim_dev);
+ if (err)
+ goto err_debugfs_exit;
+
+ return nsim_dev;
+
+err_debugfs_exit:
+ nsim_dev_debugfs_exit(nsim_dev);
+err_dl_unregister:
+ devlink_unregister(devlink);
+err_resources_unregister:
+ devlink_resources_unregister(devlink, NULL);
+err_fib_destroy:
+ nsim_fib_destroy(nsim_dev->fib_data);
+err_devlink_free:
+ devlink_free(devlink);
+ return ERR_PTR(err);
+}
+
+static void nsim_dev_destroy(struct nsim_dev *nsim_dev)
+{
+ struct devlink *devlink = priv_to_devlink(nsim_dev);
+
+ nsim_bpf_dev_exit(nsim_dev);
+ nsim_dev_debugfs_exit(nsim_dev);
+ devlink_unregister(devlink);
+ devlink_resources_unregister(devlink, NULL);
+ nsim_fib_destroy(nsim_dev->fib_data);
+ mutex_destroy(&nsim_dev->port_list_lock);
+ devlink_free(devlink);
+}
+
+static int __nsim_dev_port_add(struct nsim_dev *nsim_dev,
+ unsigned int port_index)
+{
+ struct nsim_dev_port *nsim_dev_port;
+ struct devlink_port *devlink_port;
+ int err;
+
+ nsim_dev_port = kzalloc(sizeof(*nsim_dev_port), GFP_KERNEL);
+ if (!nsim_dev_port)
+ return -ENOMEM;
+ nsim_dev_port->port_index = port_index;
+
+ devlink_port = &nsim_dev_port->devlink_port;
+ devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ port_index + 1, 0, 0,
+ nsim_dev->switch_id.id,
+ nsim_dev->switch_id.id_len);
+ err = devlink_port_register(priv_to_devlink(nsim_dev), devlink_port,
+ port_index);
+ if (err)
+ goto err_port_free;
+
+ err = nsim_dev_port_debugfs_init(nsim_dev, nsim_dev_port);
+ if (err)
+ goto err_dl_port_unregister;
+
+ nsim_dev_port->ns = nsim_create(nsim_dev, nsim_dev_port);
+ if (IS_ERR(nsim_dev_port->ns)) {
+ err = PTR_ERR(nsim_dev_port->ns);
+ goto err_port_debugfs_exit;
+ }
+
+ devlink_port_type_eth_set(devlink_port, nsim_dev_port->ns->netdev);
+ list_add(&nsim_dev_port->list, &nsim_dev->port_list);
+
+ return 0;
+
+err_port_debugfs_exit:
+ nsim_dev_port_debugfs_exit(nsim_dev_port);
+err_dl_port_unregister:
+ devlink_port_unregister(devlink_port);
+err_port_free:
+ kfree(nsim_dev_port);
+ return err;
+}
+
+static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port)
+{
+ struct devlink_port *devlink_port = &nsim_dev_port->devlink_port;
+
+ list_del(&nsim_dev_port->list);
+ devlink_port_type_clear(devlink_port);
+ nsim_destroy(nsim_dev_port->ns);
+ nsim_dev_port_debugfs_exit(nsim_dev_port);
+ devlink_port_unregister(devlink_port);
+ kfree(nsim_dev_port);
+}
+
+static void nsim_dev_port_del_all(struct nsim_dev *nsim_dev)
+{
+ struct nsim_dev_port *nsim_dev_port, *tmp;
+
+ list_for_each_entry_safe(nsim_dev_port, tmp,
+ &nsim_dev->port_list, list)
+ __nsim_dev_port_del(nsim_dev_port);
+}
+
+int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
+{
+ struct nsim_dev *nsim_dev;
+ int i;
+ int err;
+
+ nsim_dev = nsim_dev_create(nsim_bus_dev, nsim_bus_dev->port_count);
+ if (IS_ERR(nsim_dev))
+ return PTR_ERR(nsim_dev);
+ dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
+
+ for (i = 0; i < nsim_bus_dev->port_count; i++) {
+ err = __nsim_dev_port_add(nsim_dev, i);
+ if (err)
+ goto err_port_del_all;
+ }
+ return 0;
+
+err_port_del_all:
+ nsim_dev_port_del_all(nsim_dev);
+ nsim_dev_destroy(nsim_dev);
+ return err;
+}
+
+void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev)
+{
+ struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
+
+ nsim_dev_port_del_all(nsim_dev);
+ nsim_dev_destroy(nsim_dev);
+}
+
+static struct nsim_dev_port *
+__nsim_dev_port_lookup(struct nsim_dev *nsim_dev, unsigned int port_index)
+{
+ struct nsim_dev_port *nsim_dev_port;
+
+ list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list)
+ if (nsim_dev_port->port_index == port_index)
+ return nsim_dev_port;
+ return NULL;
+}
+
+int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev,
+ unsigned int port_index)
+{
+ struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
+ int err;
+
+ mutex_lock(&nsim_dev->port_list_lock);
+ if (__nsim_dev_port_lookup(nsim_dev, port_index))
+ err = -EEXIST;
+ else
+ err = __nsim_dev_port_add(nsim_dev, port_index);
+ mutex_unlock(&nsim_dev->port_list_lock);
+ return err;
+}
+
+int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
+ unsigned int port_index)
+{
+ struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
+ struct nsim_dev_port *nsim_dev_port;
+ int err = 0;
+
+ mutex_lock(&nsim_dev->port_list_lock);
+ nsim_dev_port = __nsim_dev_port_lookup(nsim_dev, port_index);
+ if (!nsim_dev_port)
+ err = -ENOENT;
+ else
+ __nsim_dev_port_del(nsim_dev_port);
+ mutex_unlock(&nsim_dev->port_list_lock);
+ return err;
+}
+
+int nsim_dev_init(void)
+{
+ nsim_dev_ddir = debugfs_create_dir(DRV_NAME, NULL);
+ if (IS_ERR_OR_NULL(nsim_dev_ddir))
+ return -ENOMEM;
+ return 0;
+}
+
+void nsim_dev_exit(void)
+{
+ debugfs_remove_recursive(nsim_dev_ddir);
+}
diff --git a/drivers/net/netdevsim/devlink.c b/drivers/net/netdevsim/devlink.c
deleted file mode 100644
index 5135fc371f01..000000000000
--- a/drivers/net/netdevsim/devlink.c
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
- * Copyright (c) 2018 Cumulus Networks. All rights reserved.
- * Copyright (c) 2018 David Ahern <dsa@cumulusnetworks.com>
- *
- * This software is licensed under the GNU General License Version 2,
- * June 1991 as shown in the file COPYING in the top-level directory of this
- * source tree.
- *
- * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
- * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
- * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
- * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
- * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
- */
-
-#include <linux/device.h>
-#include <net/devlink.h>
-#include <net/netns/generic.h>
-
-#include "netdevsim.h"
-
-static unsigned int nsim_devlink_id;
-
-/* place holder until devlink and namespaces is sorted out */
-static struct net *nsim_devlink_net(struct devlink *devlink)
-{
- return &init_net;
-}
-
-/* IPv4
- */
-static u64 nsim_ipv4_fib_resource_occ_get(void *priv)
-{
- struct net *net = priv;
-
- return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false);
-}
-
-static u64 nsim_ipv4_fib_rules_res_occ_get(void *priv)
-{
- struct net *net = priv;
-
- return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false);
-}
-
-/* IPv6
- */
-static u64 nsim_ipv6_fib_resource_occ_get(void *priv)
-{
- struct net *net = priv;
-
- return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false);
-}
-
-static u64 nsim_ipv6_fib_rules_res_occ_get(void *priv)
-{
- struct net *net = priv;
-
- return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false);
-}
-
-static int devlink_resources_register(struct devlink *devlink)
-{
- struct devlink_resource_size_params params = {
- .size_max = (u64)-1,
- .size_granularity = 1,
- .unit = DEVLINK_RESOURCE_UNIT_ENTRY
- };
- struct net *net = nsim_devlink_net(devlink);
- int err;
- u64 n;
-
- /* Resources for IPv4 */
- err = devlink_resource_register(devlink, "IPv4", (u64)-1,
- NSIM_RESOURCE_IPV4,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &params);
- if (err) {
- pr_err("Failed to register IPv4 top resource\n");
- goto out;
- }
-
- n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true);
- err = devlink_resource_register(devlink, "fib", n,
- NSIM_RESOURCE_IPV4_FIB,
- NSIM_RESOURCE_IPV4, &params);
- if (err) {
- pr_err("Failed to register IPv4 FIB resource\n");
- return err;
- }
-
- n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true);
- err = devlink_resource_register(devlink, "fib-rules", n,
- NSIM_RESOURCE_IPV4_FIB_RULES,
- NSIM_RESOURCE_IPV4, &params);
- if (err) {
- pr_err("Failed to register IPv4 FIB rules resource\n");
- return err;
- }
-
- /* Resources for IPv6 */
- err = devlink_resource_register(devlink, "IPv6", (u64)-1,
- NSIM_RESOURCE_IPV6,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &params);
- if (err) {
- pr_err("Failed to register IPv6 top resource\n");
- goto out;
- }
-
- n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true);
- err = devlink_resource_register(devlink, "fib", n,
- NSIM_RESOURCE_IPV6_FIB,
- NSIM_RESOURCE_IPV6, &params);
- if (err) {
- pr_err("Failed to register IPv6 FIB resource\n");
- return err;
- }
-
- n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true);
- err = devlink_resource_register(devlink, "fib-rules", n,
- NSIM_RESOURCE_IPV6_FIB_RULES,
- NSIM_RESOURCE_IPV6, &params);
- if (err) {
- pr_err("Failed to register IPv6 FIB rules resource\n");
- return err;
- }
-
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV4_FIB,
- nsim_ipv4_fib_resource_occ_get,
- net);
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV4_FIB_RULES,
- nsim_ipv4_fib_rules_res_occ_get,
- net);
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV6_FIB,
- nsim_ipv6_fib_resource_occ_get,
- net);
- devlink_resource_occ_get_register(devlink,
- NSIM_RESOURCE_IPV6_FIB_RULES,
- nsim_ipv6_fib_rules_res_occ_get,
- net);
-out:
- return err;
-}
-
-static int nsim_devlink_reload(struct devlink *devlink,
- struct netlink_ext_ack *extack)
-{
- enum nsim_resource_id res_ids[] = {
- NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
- NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
- };
- struct net *net = nsim_devlink_net(devlink);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(res_ids); ++i) {
- int err;
- u64 val;
-
- err = devlink_resource_size_get(devlink, res_ids[i], &val);
- if (!err) {
- err = nsim_fib_set_max(net, res_ids[i], val, extack);
- if (err)
- return err;
- }
- }
-
- return 0;
-}
-
-static void nsim_devlink_net_reset(struct net *net)
-{
- enum nsim_resource_id res_ids[] = {
- NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
- NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(res_ids); ++i) {
- if (nsim_fib_set_max(net, res_ids[i], (u64)-1, NULL)) {
- pr_err("Failed to reset limit for resource %u\n",
- res_ids[i]);
- }
- }
-}
-
-static const struct devlink_ops nsim_devlink_ops = {
- .reload = nsim_devlink_reload,
-};
-
-/* once devlink / namespace issues are sorted out
- * this needs to be net in which a devlink instance
- * is to be created. e.g., dev_net(ns->netdev)
- */
-static struct net *nsim_to_net(struct netdevsim *ns)
-{
- return &init_net;
-}
-
-void nsim_devlink_teardown(struct netdevsim *ns)
-{
- if (ns->devlink) {
- struct net *net = nsim_to_net(ns);
- bool *reg_devlink = net_generic(net, nsim_devlink_id);
-
- devlink_resources_unregister(ns->devlink, NULL);
- devlink_unregister(ns->devlink);
- devlink_free(ns->devlink);
- ns->devlink = NULL;
-
- nsim_devlink_net_reset(net);
- *reg_devlink = true;
- }
-}
-
-int nsim_devlink_setup(struct netdevsim *ns)
-{
- struct net *net = nsim_to_net(ns);
- bool *reg_devlink = net_generic(net, nsim_devlink_id);
- struct devlink *devlink;
- int err;
-
- /* only one device per namespace controls devlink */
- if (!*reg_devlink) {
- ns->devlink = NULL;
- return 0;
- }
-
- devlink = devlink_alloc(&nsim_devlink_ops, 0);
- if (!devlink)
- return -ENOMEM;
-
- err = devlink_register(devlink, &ns->dev);
- if (err)
- goto err_devlink_free;
-
- err = devlink_resources_register(devlink);
- if (err)
- goto err_dl_unregister;
-
- ns->devlink = devlink;
-
- *reg_devlink = false;
-
- return 0;
-
-err_dl_unregister:
- devlink_unregister(devlink);
-err_devlink_free:
- devlink_free(devlink);
-
- return err;
-}
-
-/* Initialize per network namespace state */
-static int __net_init nsim_devlink_netns_init(struct net *net)
-{
- bool *reg_devlink = net_generic(net, nsim_devlink_id);
-
- *reg_devlink = true;
-
- return 0;
-}
-
-static struct pernet_operations nsim_devlink_net_ops = {
- .init = nsim_devlink_netns_init,
- .id = &nsim_devlink_id,
- .size = sizeof(bool),
-};
-
-void nsim_devlink_exit(void)
-{
- unregister_pernet_subsys(&nsim_devlink_net_ops);
- nsim_fib_exit();
-}
-
-int nsim_devlink_init(void)
-{
- int err;
-
- err = nsim_fib_init();
- if (err)
- goto err_out;
-
- err = register_pernet_subsys(&nsim_devlink_net_ops);
- if (err)
- nsim_fib_exit();
-
-err_out:
- return err;
-}
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index f61d094746c0..8c57ba747772 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -18,7 +18,6 @@
#include <net/ip_fib.h>
#include <net/ip6_fib.h>
#include <net/fib_rules.h>
-#include <net/netns/generic.h>
#include "netdevsim.h"
@@ -33,15 +32,14 @@ struct nsim_per_fib_data {
};
struct nsim_fib_data {
+ struct notifier_block fib_nb;
struct nsim_per_fib_data ipv4;
struct nsim_per_fib_data ipv6;
};
-static unsigned int nsim_fib_net_id;
-
-u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max)
+u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
+ enum nsim_resource_id res_id, bool max)
{
- struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
struct nsim_fib_entry *entry;
switch (res_id) {
@@ -64,10 +62,10 @@ u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max)
return max ? entry->max : entry->num;
}
-int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
+int nsim_fib_set_max(struct nsim_fib_data *fib_data,
+ enum nsim_resource_id res_id, u64 val,
struct netlink_ext_ack *extack)
{
- struct nsim_fib_data *fib_data = net_generic(net, nsim_fib_net_id);
struct nsim_fib_entry *entry;
int err = 0;
@@ -120,9 +118,9 @@ static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add,
return err;
}
-static int nsim_fib_rule_event(struct fib_notifier_info *info, bool add)
+static int nsim_fib_rule_event(struct nsim_fib_data *data,
+ struct fib_notifier_info *info, bool add)
{
- struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
struct netlink_ext_ack *extack = info->extack;
int err = 0;
@@ -157,9 +155,9 @@ static int nsim_fib_account(struct nsim_fib_entry *entry, bool add,
return err;
}
-static int nsim_fib_event(struct fib_notifier_info *info, bool add)
+static int nsim_fib_event(struct nsim_fib_data *data,
+ struct fib_notifier_info *info, bool add)
{
- struct nsim_fib_data *data = net_generic(info->net, nsim_fib_net_id);
struct netlink_ext_ack *extack = info->extack;
int err = 0;
@@ -178,18 +176,22 @@ static int nsim_fib_event(struct fib_notifier_info *info, bool add)
static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
void *ptr)
{
+ struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
+ fib_nb);
struct fib_notifier_info *info = ptr;
int err = 0;
switch (event) {
case FIB_EVENT_RULE_ADD: /* fall through */
case FIB_EVENT_RULE_DEL:
- err = nsim_fib_rule_event(info, event == FIB_EVENT_RULE_ADD);
+ err = nsim_fib_rule_event(data, info,
+ event == FIB_EVENT_RULE_ADD);
break;
case FIB_EVENT_ENTRY_ADD: /* fall through */
case FIB_EVENT_ENTRY_DEL:
- err = nsim_fib_event(info, event == FIB_EVENT_ENTRY_ADD);
+ err = nsim_fib_event(data, info,
+ event == FIB_EVENT_ENTRY_ADD);
break;
}
@@ -199,30 +201,23 @@ static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
/* inconsistent dump, trying again */
static void nsim_fib_dump_inconsistent(struct notifier_block *nb)
{
- struct nsim_fib_data *data;
- struct net *net;
-
- rcu_read_lock();
- for_each_net_rcu(net) {
- data = net_generic(net, nsim_fib_net_id);
-
- data->ipv4.fib.num = 0ULL;
- data->ipv4.rules.num = 0ULL;
+ struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
+ fib_nb);
- data->ipv6.fib.num = 0ULL;
- data->ipv6.rules.num = 0ULL;
- }
- rcu_read_unlock();
+ data->ipv4.fib.num = 0ULL;
+ data->ipv4.rules.num = 0ULL;
+ data->ipv6.fib.num = 0ULL;
+ data->ipv6.rules.num = 0ULL;
}
-static struct notifier_block nsim_fib_nb = {
- .notifier_call = nsim_fib_event_nb,
-};
-
-/* Initialize per network namespace state */
-static int __net_init nsim_fib_netns_init(struct net *net)
+struct nsim_fib_data *nsim_fib_create(void)
{
- struct nsim_fib_data *data = net_generic(net, nsim_fib_net_id);
+ struct nsim_fib_data *data;
+ int err;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
data->ipv4.fib.max = (u64)-1;
data->ipv4.rules.max = (u64)-1;
@@ -230,37 +225,22 @@ static int __net_init nsim_fib_netns_init(struct net *net)
data->ipv6.fib.max = (u64)-1;
data->ipv6.rules.max = (u64)-1;
- return 0;
-}
-
-static struct pernet_operations nsim_fib_net_ops = {
- .init = nsim_fib_netns_init,
- .id = &nsim_fib_net_id,
- .size = sizeof(struct nsim_fib_data),
-};
-
-void nsim_fib_exit(void)
-{
- unregister_pernet_subsys(&nsim_fib_net_ops);
- unregister_fib_notifier(&nsim_fib_nb);
-}
-
-int nsim_fib_init(void)
-{
- int err;
-
- err = register_pernet_subsys(&nsim_fib_net_ops);
- if (err < 0) {
- pr_err("Failed to register pernet subsystem\n");
- goto err_out;
- }
-
- err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent);
- if (err < 0) {
+ data->fib_nb.notifier_call = nsim_fib_event_nb;
+ err = register_fib_notifier(&data->fib_nb, nsim_fib_dump_inconsistent);
+ if (err) {
pr_err("Failed to register fib notifier\n");
goto err_out;
}
+ return data;
+
err_out:
- return err;
+ kfree(data);
+ return ERR_PTR(err);
+}
+
+void nsim_fib_destroy(struct nsim_fib_data *data)
+{
+ unregister_fib_notifier(&data->fib_nb);
+ kfree(data);
}
diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c
index 76e11d889bb6..e27fc1a4516d 100644
--- a/drivers/net/netdevsim/ipsec.c
+++ b/drivers/net/netdevsim/ipsec.c
@@ -283,7 +283,8 @@ void nsim_ipsec_init(struct netdevsim *ns)
ns->netdev->features |= NSIM_ESP_FEATURES;
ns->netdev->hw_enc_features |= NSIM_ESP_FEATURES;
- ns->ipsec.pfile = debugfs_create_file("ipsec", 0400, ns->ddir, ns,
+ ns->ipsec.pfile = debugfs_create_file("ipsec", 0400,
+ ns->nsim_dev_port->ddir, ns,
&ipsec_dbg_fops);
}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 75a50b59cb8f..e5c8aa08e1cd 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -25,230 +25,6 @@
#include "netdevsim.h"
-struct nsim_vf_config {
- int link_state;
- u16 min_tx_rate;
- u16 max_tx_rate;
- u16 vlan;
- __be16 vlan_proto;
- u16 qos;
- u8 vf_mac[ETH_ALEN];
- bool spoofchk_enabled;
- bool trusted;
- bool rss_query_enabled;
-};
-
-static u32 nsim_dev_id;
-
-static struct dentry *nsim_ddir;
-static struct dentry *nsim_sdev_ddir;
-
-static int nsim_num_vf(struct device *dev)
-{
- struct netdevsim *ns = to_nsim(dev);
-
- return ns->num_vfs;
-}
-
-static struct bus_type nsim_bus = {
- .name = DRV_NAME,
- .dev_name = DRV_NAME,
- .num_vf = nsim_num_vf,
-};
-
-static int nsim_vfs_enable(struct netdevsim *ns, unsigned int num_vfs)
-{
- ns->vfconfigs = kcalloc(num_vfs, sizeof(struct nsim_vf_config),
- GFP_KERNEL);
- if (!ns->vfconfigs)
- return -ENOMEM;
- ns->num_vfs = num_vfs;
-
- return 0;
-}
-
-static void nsim_vfs_disable(struct netdevsim *ns)
-{
- kfree(ns->vfconfigs);
- ns->vfconfigs = NULL;
- ns->num_vfs = 0;
-}
-
-static ssize_t
-nsim_numvfs_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct netdevsim *ns = to_nsim(dev);
- unsigned int num_vfs;
- int ret;
-
- ret = kstrtouint(buf, 0, &num_vfs);
- if (ret)
- return ret;
-
- rtnl_lock();
- if (ns->num_vfs == num_vfs)
- goto exit_good;
- if (ns->num_vfs && num_vfs) {
- ret = -EBUSY;
- goto exit_unlock;
- }
-
- if (num_vfs) {
- ret = nsim_vfs_enable(ns, num_vfs);
- if (ret)
- goto exit_unlock;
- } else {
- nsim_vfs_disable(ns);
- }
-exit_good:
- ret = count;
-exit_unlock:
- rtnl_unlock();
-
- return ret;
-}
-
-static ssize_t
-nsim_numvfs_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct netdevsim *ns = to_nsim(dev);
-
- return sprintf(buf, "%u\n", ns->num_vfs);
-}
-
-static struct device_attribute nsim_numvfs_attr =
- __ATTR(sriov_numvfs, 0664, nsim_numvfs_show, nsim_numvfs_store);
-
-static struct attribute *nsim_dev_attrs[] = {
- &nsim_numvfs_attr.attr,
- NULL,
-};
-
-static const struct attribute_group nsim_dev_attr_group = {
- .attrs = nsim_dev_attrs,
-};
-
-static const struct attribute_group *nsim_dev_attr_groups[] = {
- &nsim_dev_attr_group,
- NULL,
-};
-
-static void nsim_dev_release(struct device *dev)
-{
- struct netdevsim *ns = to_nsim(dev);
-
- nsim_vfs_disable(ns);
- free_netdev(ns->netdev);
-}
-
-static struct device_type nsim_dev_type = {
- .groups = nsim_dev_attr_groups,
- .release = nsim_dev_release,
-};
-
-static int nsim_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
-{
- struct netdevsim *ns = netdev_priv(dev);
-
- ppid->id_len = sizeof(ns->sdev->switch_id);
- memcpy(&ppid->id, &ns->sdev->switch_id, ppid->id_len);
- return 0;
-}
-
-static int nsim_init(struct net_device *dev)
-{
- char sdev_ddir_name[10], sdev_link_name[32];
- struct netdevsim *ns = netdev_priv(dev);
- int err;
-
- ns->netdev = dev;
- ns->ddir = debugfs_create_dir(netdev_name(dev), nsim_ddir);
- if (IS_ERR_OR_NULL(ns->ddir))
- return -ENOMEM;
-
- if (!ns->sdev) {
- ns->sdev = kzalloc(sizeof(*ns->sdev), GFP_KERNEL);
- if (!ns->sdev) {
- err = -ENOMEM;
- goto err_debugfs_destroy;
- }
- ns->sdev->refcnt = 1;
- ns->sdev->switch_id = nsim_dev_id;
- sprintf(sdev_ddir_name, "%u", ns->sdev->switch_id);
- ns->sdev->ddir = debugfs_create_dir(sdev_ddir_name,
- nsim_sdev_ddir);
- if (IS_ERR_OR_NULL(ns->sdev->ddir)) {
- err = PTR_ERR_OR_ZERO(ns->sdev->ddir) ?: -EINVAL;
- goto err_sdev_free;
- }
- } else {
- sprintf(sdev_ddir_name, "%u", ns->sdev->switch_id);
- ns->sdev->refcnt++;
- }
-
- sprintf(sdev_link_name, "../../" DRV_NAME "_sdev/%s", sdev_ddir_name);
- debugfs_create_symlink("sdev", ns->ddir, sdev_link_name);
-
- err = nsim_bpf_init(ns);
- if (err)
- goto err_sdev_destroy;
-
- ns->dev.id = nsim_dev_id++;
- ns->dev.bus = &nsim_bus;
- ns->dev.type = &nsim_dev_type;
- err = device_register(&ns->dev);
- if (err)
- goto err_bpf_uninit;
-
- SET_NETDEV_DEV(dev, &ns->dev);
-
- err = nsim_devlink_setup(ns);
- if (err)
- goto err_unreg_dev;
-
- nsim_ipsec_init(ns);
-
- return 0;
-
-err_unreg_dev:
- device_unregister(&ns->dev);
-err_bpf_uninit:
- nsim_bpf_uninit(ns);
-err_sdev_destroy:
- if (!--ns->sdev->refcnt) {
- debugfs_remove_recursive(ns->sdev->ddir);
-err_sdev_free:
- kfree(ns->sdev);
- }
-err_debugfs_destroy:
- debugfs_remove_recursive(ns->ddir);
- return err;
-}
-
-static void nsim_uninit(struct net_device *dev)
-{
- struct netdevsim *ns = netdev_priv(dev);
-
- nsim_ipsec_teardown(ns);
- nsim_devlink_teardown(ns);
- debugfs_remove_recursive(ns->ddir);
- nsim_bpf_uninit(ns);
- if (!--ns->sdev->refcnt) {
- debugfs_remove_recursive(ns->sdev->ddir);
- kfree(ns->sdev);
- }
-}
-
-static void nsim_free(struct net_device *dev)
-{
- struct netdevsim *ns = netdev_priv(dev);
-
- device_unregister(&ns->dev);
- /* netdev and vf state will be freed out of device_release() */
-}
-
static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
@@ -325,11 +101,12 @@ nsim_setup_tc_block(struct net_device *dev, struct tc_block_offload *f)
static int nsim_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
/* Only refuse multicast addresses, zero address can mean unset/any. */
- if (vf >= ns->num_vfs || is_multicast_ether_addr(mac))
+ if (vf >= nsim_bus_dev->num_vfs || is_multicast_ether_addr(mac))
return -EINVAL;
- memcpy(ns->vfconfigs[vf].vf_mac, mac, ETH_ALEN);
+ memcpy(nsim_bus_dev->vfconfigs[vf].vf_mac, mac, ETH_ALEN);
return 0;
}
@@ -338,13 +115,14 @@ static int nsim_set_vf_vlan(struct net_device *dev, int vf,
u16 vlan, u8 qos, __be16 vlan_proto)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
- if (vf >= ns->num_vfs || vlan > 4095 || qos > 7)
+ if (vf >= nsim_bus_dev->num_vfs || vlan > 4095 || qos > 7)
return -EINVAL;
- ns->vfconfigs[vf].vlan = vlan;
- ns->vfconfigs[vf].qos = qos;
- ns->vfconfigs[vf].vlan_proto = vlan_proto;
+ nsim_bus_dev->vfconfigs[vf].vlan = vlan;
+ nsim_bus_dev->vfconfigs[vf].qos = qos;
+ nsim_bus_dev->vfconfigs[vf].vlan_proto = vlan_proto;
return 0;
}
@@ -352,12 +130,13 @@ static int nsim_set_vf_vlan(struct net_device *dev, int vf,
static int nsim_set_vf_rate(struct net_device *dev, int vf, int min, int max)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
- if (vf >= ns->num_vfs)
+ if (vf >= nsim_bus_dev->num_vfs)
return -EINVAL;
- ns->vfconfigs[vf].min_tx_rate = min;
- ns->vfconfigs[vf].max_tx_rate = max;
+ nsim_bus_dev->vfconfigs[vf].min_tx_rate = min;
+ nsim_bus_dev->vfconfigs[vf].max_tx_rate = max;
return 0;
}
@@ -365,10 +144,11 @@ static int nsim_set_vf_rate(struct net_device *dev, int vf, int min, int max)
static int nsim_set_vf_spoofchk(struct net_device *dev, int vf, bool val)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
- if (vf >= ns->num_vfs)
+ if (vf >= nsim_bus_dev->num_vfs)
return -EINVAL;
- ns->vfconfigs[vf].spoofchk_enabled = val;
+ nsim_bus_dev->vfconfigs[vf].spoofchk_enabled = val;
return 0;
}
@@ -376,10 +156,11 @@ static int nsim_set_vf_spoofchk(struct net_device *dev, int vf, bool val)
static int nsim_set_vf_rss_query_en(struct net_device *dev, int vf, bool val)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
- if (vf >= ns->num_vfs)
+ if (vf >= nsim_bus_dev->num_vfs)
return -EINVAL;
- ns->vfconfigs[vf].rss_query_enabled = val;
+ nsim_bus_dev->vfconfigs[vf].rss_query_enabled = val;
return 0;
}
@@ -387,10 +168,11 @@ static int nsim_set_vf_rss_query_en(struct net_device *dev, int vf, bool val)
static int nsim_set_vf_trust(struct net_device *dev, int vf, bool val)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
- if (vf >= ns->num_vfs)
+ if (vf >= nsim_bus_dev->num_vfs)
return -EINVAL;
- ns->vfconfigs[vf].trusted = val;
+ nsim_bus_dev->vfconfigs[vf].trusted = val;
return 0;
}
@@ -399,21 +181,22 @@ static int
nsim_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
- if (vf >= ns->num_vfs)
+ if (vf >= nsim_bus_dev->num_vfs)
return -EINVAL;
ivi->vf = vf;
- ivi->linkstate = ns->vfconfigs[vf].link_state;
- ivi->min_tx_rate = ns->vfconfigs[vf].min_tx_rate;
- ivi->max_tx_rate = ns->vfconfigs[vf].max_tx_rate;
- ivi->vlan = ns->vfconfigs[vf].vlan;
- ivi->vlan_proto = ns->vfconfigs[vf].vlan_proto;
- ivi->qos = ns->vfconfigs[vf].qos;
- memcpy(&ivi->mac, ns->vfconfigs[vf].vf_mac, ETH_ALEN);
- ivi->spoofchk = ns->vfconfigs[vf].spoofchk_enabled;
- ivi->trusted = ns->vfconfigs[vf].trusted;
- ivi->rss_query_en = ns->vfconfigs[vf].rss_query_enabled;
+ ivi->linkstate = nsim_bus_dev->vfconfigs[vf].link_state;
+ ivi->min_tx_rate = nsim_bus_dev->vfconfigs[vf].min_tx_rate;
+ ivi->max_tx_rate = nsim_bus_dev->vfconfigs[vf].max_tx_rate;
+ ivi->vlan = nsim_bus_dev->vfconfigs[vf].vlan;
+ ivi->vlan_proto = nsim_bus_dev->vfconfigs[vf].vlan_proto;
+ ivi->qos = nsim_bus_dev->vfconfigs[vf].qos;
+ memcpy(&ivi->mac, nsim_bus_dev->vfconfigs[vf].vf_mac, ETH_ALEN);
+ ivi->spoofchk = nsim_bus_dev->vfconfigs[vf].spoofchk_enabled;
+ ivi->trusted = nsim_bus_dev->vfconfigs[vf].trusted;
+ ivi->rss_query_en = nsim_bus_dev->vfconfigs[vf].rss_query_enabled;
return 0;
}
@@ -421,8 +204,9 @@ nsim_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi)
static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
- if (vf >= ns->num_vfs)
+ if (vf >= nsim_bus_dev->num_vfs)
return -EINVAL;
switch (state) {
@@ -434,7 +218,7 @@ static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state)
return -EINVAL;
}
- ns->vfconfigs[vf].link_state = state;
+ nsim_bus_dev->vfconfigs[vf].link_state = state;
return 0;
}
@@ -461,9 +245,14 @@ nsim_set_features(struct net_device *dev, netdev_features_t features)
return 0;
}
+static struct devlink_port *nsim_get_devlink_port(struct net_device *dev)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ return &ns->nsim_dev_port->devlink_port;
+}
+
static const struct net_device_ops nsim_netdev_ops = {
- .ndo_init = nsim_init,
- .ndo_uninit = nsim_uninit,
.ndo_start_xmit = nsim_start_xmit,
.ndo_set_rx_mode = nsim_set_rx_mode,
.ndo_set_mac_address = eth_mac_addr,
@@ -481,7 +270,7 @@ static const struct net_device_ops nsim_netdev_ops = {
.ndo_setup_tc = nsim_setup_tc,
.ndo_set_features = nsim_set_features,
.ndo_bpf = nsim_bpf,
- .ndo_get_port_parent_id = nsim_get_port_parent_id,
+ .ndo_get_devlink_port = nsim_get_devlink_port,
};
static void nsim_setup(struct net_device *dev)
@@ -489,9 +278,6 @@ static void nsim_setup(struct net_device *dev)
ether_setup(dev);
eth_hw_addr_random(dev);
- dev->netdev_ops = &nsim_netdev_ops;
- dev->priv_destructor = nsim_free;
-
dev->tx_queue_len = 0;
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
@@ -506,104 +292,102 @@ static void nsim_setup(struct net_device *dev)
dev->max_mtu = ETH_MAX_MTU;
}
-static int nsim_validate(struct nlattr *tb[], struct nlattr *data[],
- struct netlink_ext_ack *extack)
+struct netdevsim *
+nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
{
- if (tb[IFLA_ADDRESS]) {
- if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
- return -EINVAL;
- if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
- return -EADDRNOTAVAIL;
- }
- return 0;
+ struct net_device *dev;
+ struct netdevsim *ns;
+ int err;
+
+ dev = alloc_netdev(sizeof(*ns), "eth%d", NET_NAME_UNKNOWN, nsim_setup);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ ns = netdev_priv(dev);
+ ns->netdev = dev;
+ ns->nsim_dev = nsim_dev;
+ ns->nsim_dev_port = nsim_dev_port;
+ ns->nsim_bus_dev = nsim_dev->nsim_bus_dev;
+ SET_NETDEV_DEV(dev, &ns->nsim_bus_dev->dev);
+ dev->netdev_ops = &nsim_netdev_ops;
+
+ rtnl_lock();
+ err = nsim_bpf_init(ns);
+ if (err)
+ goto err_free_netdev;
+
+ nsim_ipsec_init(ns);
+
+ err = register_netdevice(dev);
+ if (err)
+ goto err_ipsec_teardown;
+ rtnl_unlock();
+
+ return ns;
+
+err_ipsec_teardown:
+ nsim_ipsec_teardown(ns);
+ nsim_bpf_uninit(ns);
+ rtnl_unlock();
+err_free_netdev:
+ free_netdev(dev);
+ return ERR_PTR(err);
}
-static int nsim_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
- struct netlink_ext_ack *extack)
+void nsim_destroy(struct netdevsim *ns)
{
- struct netdevsim *ns = netdev_priv(dev);
-
- if (tb[IFLA_LINK]) {
- struct net_device *joindev;
- struct netdevsim *joinns;
-
- joindev = __dev_get_by_index(src_net,
- nla_get_u32(tb[IFLA_LINK]));
- if (!joindev)
- return -ENODEV;
- if (joindev->netdev_ops != &nsim_netdev_ops)
- return -EINVAL;
-
- joinns = netdev_priv(joindev);
- if (!joinns->sdev || !joinns->sdev->refcnt)
- return -EINVAL;
- ns->sdev = joinns->sdev;
- }
+ struct net_device *dev = ns->netdev;
- return register_netdevice(dev);
+ rtnl_lock();
+ unregister_netdevice(dev);
+ nsim_ipsec_teardown(ns);
+ nsim_bpf_uninit(ns);
+ rtnl_unlock();
+ free_netdev(dev);
}
-static void nsim_dellink(struct net_device *dev, struct list_head *head)
+static int nsim_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
{
- unregister_netdevice_queue(dev, head);
+ NL_SET_ERR_MSG_MOD(extack, "Please use: echo \"[ID] [PORT_COUNT]\" > /sys/bus/netdevsim/new_device");
+ return -EOPNOTSUPP;
}
static struct rtnl_link_ops nsim_link_ops __read_mostly = {
.kind = DRV_NAME,
- .priv_size = sizeof(struct netdevsim),
- .setup = nsim_setup,
.validate = nsim_validate,
- .newlink = nsim_newlink,
- .dellink = nsim_dellink,
};
static int __init nsim_module_init(void)
{
int err;
- nsim_ddir = debugfs_create_dir(DRV_NAME, NULL);
- if (IS_ERR_OR_NULL(nsim_ddir))
- return -ENOMEM;
-
- nsim_sdev_ddir = debugfs_create_dir(DRV_NAME "_sdev", NULL);
- if (IS_ERR_OR_NULL(nsim_sdev_ddir)) {
- err = -ENOMEM;
- goto err_debugfs_destroy;
- }
-
- err = bus_register(&nsim_bus);
+ err = nsim_dev_init();
if (err)
- goto err_sdir_destroy;
+ return err;
- err = nsim_devlink_init();
+ err = nsim_bus_init();
if (err)
- goto err_unreg_bus;
+ goto err_dev_exit;
err = rtnl_link_register(&nsim_link_ops);
if (err)
- goto err_dl_fini;
+ goto err_bus_exit;
return 0;
-err_dl_fini:
- nsim_devlink_exit();
-err_unreg_bus:
- bus_unregister(&nsim_bus);
-err_sdir_destroy:
- debugfs_remove_recursive(nsim_sdev_ddir);
-err_debugfs_destroy:
- debugfs_remove_recursive(nsim_ddir);
+err_bus_exit:
+ nsim_bus_exit();
+err_dev_exit:
+ nsim_dev_exit();
return err;
}
static void __exit nsim_module_exit(void)
{
rtnl_link_unregister(&nsim_link_ops);
- nsim_devlink_exit();
- bus_unregister(&nsim_bus);
- debugfs_remove_recursive(nsim_sdev_ddir);
- debugfs_remove_recursive(nsim_ddir);
+ nsim_bus_exit();
+ nsim_dev_exit();
}
module_init(nsim_module_init);
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 384c254fafc5..3f398797c2bc 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -18,6 +18,7 @@
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/u64_stats_sync.h>
+#include <net/devlink.h>
#include <net/xdp.h>
#define DRV_NAME "netdevsim"
@@ -26,26 +27,6 @@
#define NSIM_EA(extack, msg) NL_SET_ERR_MSG_MOD((extack), msg)
-struct bpf_prog;
-struct bpf_offload_dev;
-struct dentry;
-struct nsim_vf_config;
-
-struct netdevsim_shared_dev {
- unsigned int refcnt;
- u32 switch_id;
-
- struct dentry *ddir;
-
- struct bpf_offload_dev *bpf_dev;
-
- struct dentry *ddir_bpf_bound_progs;
- u32 prog_id_gen;
-
- struct list_head bpf_bound_progs;
- struct list_head bpf_bound_maps;
-};
-
#define NSIM_IPSEC_MAX_SA_COUNT 33
#define NSIM_IPSEC_VALID BIT(31)
@@ -69,18 +50,14 @@ struct nsim_ipsec {
struct netdevsim {
struct net_device *netdev;
+ struct nsim_dev *nsim_dev;
+ struct nsim_dev_port *nsim_dev_port;
u64 tx_packets;
u64 tx_bytes;
struct u64_stats_sync syncp;
- struct device dev;
- struct netdevsim_shared_dev *sdev;
-
- struct dentry *ddir;
-
- unsigned int num_vfs;
- struct nsim_vf_config *vfconfigs;
+ struct nsim_bus_dev *nsim_bus_dev;
struct bpf_prog *bpf_offloaded;
u32 bpf_offloaded_id;
@@ -88,22 +65,22 @@ struct netdevsim {
struct xdp_attachment_info xdp;
struct xdp_attachment_info xdp_hw;
- bool bpf_bind_accept;
- u32 bpf_bind_verifier_delay;
-
bool bpf_tc_accept;
bool bpf_tc_non_bound_accept;
bool bpf_xdpdrv_accept;
bool bpf_xdpoffload_accept;
bool bpf_map_accept;
-#if IS_ENABLED(CONFIG_NET_DEVLINK)
- struct devlink *devlink;
-#endif
struct nsim_ipsec ipsec;
};
+struct netdevsim *
+nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port);
+void nsim_destroy(struct netdevsim *ns);
+
#ifdef CONFIG_BPF_SYSCALL
+int nsim_bpf_dev_init(struct nsim_dev *nsim_dev);
+void nsim_bpf_dev_exit(struct nsim_dev *nsim_dev);
int nsim_bpf_init(struct netdevsim *ns);
void nsim_bpf_uninit(struct netdevsim *ns);
int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf);
@@ -111,6 +88,15 @@ int nsim_bpf_disable_tc(struct netdevsim *ns);
int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type,
void *type_data, void *cb_priv);
#else
+
+static inline int nsim_bpf_dev_init(struct nsim_dev *nsim_dev)
+{
+ return 0;
+}
+
+static inline void nsim_bpf_dev_exit(struct nsim_dev *nsim_dev)
+{
+}
static inline int nsim_bpf_init(struct netdevsim *ns)
{
return 0;
@@ -138,7 +124,6 @@ nsim_bpf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
#endif
-#if IS_ENABLED(CONFIG_NET_DEVLINK)
enum nsim_resource_id {
NSIM_RESOURCE_NONE, /* DEVLINK_RESOURCE_ID_PARENT_TOP */
NSIM_RESOURCE_IPV4,
@@ -149,36 +134,47 @@ enum nsim_resource_id {
NSIM_RESOURCE_IPV6_FIB_RULES,
};
-int nsim_devlink_setup(struct netdevsim *ns);
-void nsim_devlink_teardown(struct netdevsim *ns);
+struct nsim_dev_port {
+ struct list_head list;
+ struct devlink_port devlink_port;
+ unsigned int port_index;
+ struct dentry *ddir;
+ struct netdevsim *ns;
+};
-int nsim_devlink_init(void);
-void nsim_devlink_exit(void);
+struct nsim_dev {
+ struct nsim_bus_dev *nsim_bus_dev;
+ struct nsim_fib_data *fib_data;
+ struct dentry *ddir;
+ struct dentry *ports_ddir;
+ struct bpf_offload_dev *bpf_dev;
+ bool bpf_bind_accept;
+ u32 bpf_bind_verifier_delay;
+ struct dentry *ddir_bpf_bound_progs;
+ u32 prog_id_gen;
+ struct list_head bpf_bound_progs;
+ struct list_head bpf_bound_maps;
+ struct netdev_phys_item_id switch_id;
+ struct list_head port_list;
+ struct mutex port_list_lock; /* protects port list */
+};
-int nsim_fib_init(void);
-void nsim_fib_exit(void);
-u64 nsim_fib_get_val(struct net *net, enum nsim_resource_id res_id, bool max);
-int nsim_fib_set_max(struct net *net, enum nsim_resource_id res_id, u64 val,
+int nsim_dev_init(void);
+void nsim_dev_exit(void);
+int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev);
+void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev);
+int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev,
+ unsigned int port_index);
+int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
+ unsigned int port_index);
+
+struct nsim_fib_data *nsim_fib_create(void);
+void nsim_fib_destroy(struct nsim_fib_data *fib_data);
+u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
+ enum nsim_resource_id res_id, bool max);
+int nsim_fib_set_max(struct nsim_fib_data *fib_data,
+ enum nsim_resource_id res_id, u64 val,
struct netlink_ext_ack *extack);
-#else
-static inline int nsim_devlink_setup(struct netdevsim *ns)
-{
- return 0;
-}
-
-static inline void nsim_devlink_teardown(struct netdevsim *ns)
-{
-}
-
-static inline int nsim_devlink_init(void)
-{
- return 0;
-}
-
-static inline void nsim_devlink_exit(void)
-{
-}
-#endif
#if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
void nsim_ipsec_init(struct netdevsim *ns);
@@ -199,7 +195,26 @@ static inline bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb)
}
#endif
-static inline struct netdevsim *to_nsim(struct device *ptr)
-{
- return container_of(ptr, struct netdevsim, dev);
-}
+struct nsim_vf_config {
+ int link_state;
+ u16 min_tx_rate;
+ u16 max_tx_rate;
+ u16 vlan;
+ __be16 vlan_proto;
+ u16 qos;
+ u8 vf_mac[ETH_ALEN];
+ bool spoofchk_enabled;
+ bool trusted;
+ bool rss_query_enabled;
+};
+
+struct nsim_bus_dev {
+ struct device dev;
+ struct list_head list;
+ unsigned int port_count;
+ unsigned int num_vfs;
+ struct nsim_vf_config *vfconfigs;
+};
+
+int nsim_bus_init(void);
+void nsim_bus_exit(void);
diff --git a/drivers/net/netdevsim/sdev.c b/drivers/net/netdevsim/sdev.c
new file mode 100644
index 000000000000..6712da3340d6
--- /dev/null
+++ b/drivers/net/netdevsim/sdev.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "netdevsim.h"
+
+static struct dentry *nsim_sdev_ddir;
+
+static u32 nsim_sdev_id;
+
+struct netdevsim_shared_dev *nsim_sdev_get(struct netdevsim *joinns)
+{
+ struct netdevsim_shared_dev *sdev;
+ char sdev_ddir_name[10];
+ int err;
+
+ if (joinns) {
+ if (WARN_ON(!joinns->sdev))
+ return ERR_PTR(-EINVAL);
+ sdev = joinns->sdev;
+ sdev->refcnt++;
+ return sdev;
+ }
+
+ sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
+ if (!sdev)
+ return ERR_PTR(-ENOMEM);
+ sdev->refcnt = 1;
+ sdev->switch_id = nsim_sdev_id++;
+
+ sprintf(sdev_ddir_name, "%u", sdev->switch_id);
+ sdev->ddir = debugfs_create_dir(sdev_ddir_name, nsim_sdev_ddir);
+ if (IS_ERR_OR_NULL(sdev->ddir)) {
+ err = PTR_ERR_OR_ZERO(sdev->ddir) ?: -EINVAL;
+ goto err_sdev_free;
+ }
+
+ return sdev;
+
+err_sdev_free:
+ nsim_sdev_id--;
+ kfree(sdev);
+ return ERR_PTR(err);
+}
+
+void nsim_sdev_put(struct netdevsim_shared_dev *sdev)
+{
+ if (--sdev->refcnt)
+ return;
+ debugfs_remove_recursive(sdev->ddir);
+ kfree(sdev);
+}
+
+int nsim_sdev_init(void)
+{
+ nsim_sdev_ddir = debugfs_create_dir(DRV_NAME "_sdev", NULL);
+ if (IS_ERR_OR_NULL(nsim_sdev_ddir))
+ return -ENOMEM;
+ return 0;
+}
+
+void nsim_sdev_exit(void)
+{
+ debugfs_remove_recursive(nsim_sdev_ddir);
+}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 520657945b82..d6299710d634 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -76,6 +76,17 @@ config MDIO_BUS_MUX_GPIO
several child MDIO busses to a parent bus. Child bus
selection is under the control of GPIO lines.
+config MDIO_BUS_MUX_MESON_G12A
+ tristate "Amlogic G12a based MDIO bus multiplexer"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on OF_MDIO && HAS_IOMEM && COMMON_CLK
+ select MDIO_BUS_MUX
+ default m if ARCH_MESON
+ help
+ This module provides a driver for the MDIO multiplexer/glue of
+ the amlogic g12a SoC. The multiplexers connects either the external
+ or the internal MDIO bus to the parent bus.
+
config MDIO_BUS_MUX_MMIOREG
tristate "MMIO device-controlled MDIO bus multiplexers"
depends on OF_MDIO && HAS_IOMEM
@@ -273,13 +284,13 @@ config BCM87XX_PHY
Currently supports the BCM8706 and BCM8727 10G Ethernet PHYs.
config BCM_CYGNUS_PHY
- tristate "Broadcom Cygnus SoC internal PHY"
- depends on ARCH_BCM_CYGNUS || COMPILE_TEST
+ tristate "Broadcom Cygnus/Omega SoC internal PHY"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
depends on MDIO_BCM_IPROC
select BCM_NET_PHYLIB
---help---
This PHY driver is for the 1G internal PHYs of the Broadcom
- Cygnus Family SoC.
+ Cygnus and Omega Family SoC.
Currently supports internal PHY's used in the BCM11300,
BCM11320, BCM11350, BCM11360, BCM58300, BCM58302,
@@ -397,7 +408,7 @@ config MICROCHIP_T1_PHY
config MICROSEMI_PHY
tristate "Microsemi PHYs"
---help---
- Currently supports VSC8530, VSC8531, VSC8540 and VSC8541 PHYs
+ Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs
config NATIONAL_PHY
tristate "National Semiconductor PHYs"
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index ece5dae67174..27d7f9f3b0de 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
+obj-$(CONFIG_MDIO_BUS_MUX_MESON_G12A) += mdio-mux-meson-g12a.o
obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
obj-$(CONFIG_MDIO_BUS_MUX_MULTIPLEXER) += mdio-mux-multiplexer.o
obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index 65b4b0960b1e..eef35f8c8d45 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -60,7 +60,7 @@ static struct phy_driver am79c_driver[] = { {
.phy_id = PHY_ID_AM79C874,
.name = "AM79C874",
.phy_id_mask = 0xfffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = am79c_config_init,
.ack_interrupt = am79c_ack_interrupt,
.config_intr = am79c_config_intr,
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index 37218e5d7cc9..eed4fe3d871f 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
+#include <linux/bitfield.h>
#include <linux/phy.h>
#include "aquantia.h"
@@ -22,20 +23,33 @@
#define PHY_ID_AQCS109 0x03a1b5c2
#define PHY_ID_AQR405 0x03a1b4b0
+#define MDIO_PHYXS_VEND_IF_STATUS 0xe812
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK GENMASK(7, 3)
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR 0
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI 2
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII 6
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII 10
+
#define MDIO_AN_VEND_PROV 0xc400
#define MDIO_AN_VEND_PROV_1000BASET_FULL BIT(15)
#define MDIO_AN_VEND_PROV_1000BASET_HALF BIT(14)
+#define MDIO_AN_VEND_PROV_DOWNSHIFT_EN BIT(4)
+#define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK GENMASK(3, 0)
+#define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT 4
#define MDIO_AN_TX_VEND_STATUS1 0xc800
-#define MDIO_AN_TX_VEND_STATUS1_10BASET (0x0 << 1)
-#define MDIO_AN_TX_VEND_STATUS1_100BASETX (0x1 << 1)
-#define MDIO_AN_TX_VEND_STATUS1_1000BASET (0x2 << 1)
-#define MDIO_AN_TX_VEND_STATUS1_10GBASET (0x3 << 1)
-#define MDIO_AN_TX_VEND_STATUS1_2500BASET (0x4 << 1)
-#define MDIO_AN_TX_VEND_STATUS1_5000BASET (0x5 << 1)
-#define MDIO_AN_TX_VEND_STATUS1_RATE_MASK (0x7 << 1)
+#define MDIO_AN_TX_VEND_STATUS1_RATE_MASK GENMASK(3, 1)
+#define MDIO_AN_TX_VEND_STATUS1_10BASET 0
+#define MDIO_AN_TX_VEND_STATUS1_100BASETX 1
+#define MDIO_AN_TX_VEND_STATUS1_1000BASET 2
+#define MDIO_AN_TX_VEND_STATUS1_10GBASET 3
+#define MDIO_AN_TX_VEND_STATUS1_2500BASET 4
+#define MDIO_AN_TX_VEND_STATUS1_5000BASET 5
#define MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX BIT(0)
+#define MDIO_AN_TX_VEND_INT_STATUS1 0xcc00
+#define MDIO_AN_TX_VEND_INT_STATUS1_DOWNSHIFT BIT(1)
+
#define MDIO_AN_TX_VEND_INT_STATUS2 0xcc01
#define MDIO_AN_TX_VEND_INT_MASK2 0xd401
@@ -44,8 +58,42 @@
#define MDIO_AN_RX_LP_STAT1 0xe820
#define MDIO_AN_RX_LP_STAT1_1000BASET_FULL BIT(15)
#define MDIO_AN_RX_LP_STAT1_1000BASET_HALF BIT(14)
+#define MDIO_AN_RX_LP_STAT1_SHORT_REACH BIT(13)
+#define MDIO_AN_RX_LP_STAT1_AQRATE_DOWNSHIFT BIT(12)
+#define MDIO_AN_RX_LP_STAT1_AQ_PHY BIT(2)
+
+#define MDIO_AN_RX_LP_STAT4 0xe823
+#define MDIO_AN_RX_LP_STAT4_FW_MAJOR GENMASK(15, 8)
+#define MDIO_AN_RX_LP_STAT4_FW_MINOR GENMASK(7, 0)
+
+#define MDIO_AN_RX_VEND_STAT3 0xe832
+#define MDIO_AN_RX_VEND_STAT3_AFR BIT(0)
+
+/* MDIO_MMD_C22EXT */
+#define MDIO_C22EXT_STAT_SGMII_RX_GOOD_FRAMES 0xd292
+#define MDIO_C22EXT_STAT_SGMII_RX_BAD_FRAMES 0xd294
+#define MDIO_C22EXT_STAT_SGMII_RX_FALSE_CARRIER 0xd297
+#define MDIO_C22EXT_STAT_SGMII_TX_GOOD_FRAMES 0xd313
+#define MDIO_C22EXT_STAT_SGMII_TX_BAD_FRAMES 0xd315
+#define MDIO_C22EXT_STAT_SGMII_TX_FALSE_CARRIER 0xd317
+#define MDIO_C22EXT_STAT_SGMII_TX_COLLISIONS 0xd318
+#define MDIO_C22EXT_STAT_SGMII_TX_LINE_COLLISIONS 0xd319
+#define MDIO_C22EXT_STAT_SGMII_TX_FRAME_ALIGN_ERR 0xd31a
+#define MDIO_C22EXT_STAT_SGMII_TX_RUNT_FRAMES 0xd31b
/* Vendor specific 1, MDIO_MMD_VEND1 */
+#define VEND1_GLOBAL_FW_ID 0x0020
+#define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8)
+#define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0)
+
+#define VEND1_GLOBAL_RSVD_STAT1 0xc885
+#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4)
+#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0)
+
+#define VEND1_GLOBAL_RSVD_STAT9 0xc88d
+#define VEND1_GLOBAL_RSVD_STAT9_MODE GENMASK(7, 0)
+#define VEND1_GLOBAL_RSVD_STAT9_1000BT2 0x23
+
#define VEND1_GLOBAL_INT_STD_STATUS 0xfc00
#define VEND1_GLOBAL_INT_VEND_STATUS 0xfc01
@@ -72,6 +120,88 @@
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1)
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0)
+struct aqr107_hw_stat {
+ const char *name;
+ int reg;
+ int size;
+};
+
+#define SGMII_STAT(n, r, s) { n, MDIO_C22EXT_STAT_SGMII_ ## r, s }
+static const struct aqr107_hw_stat aqr107_hw_stats[] = {
+ SGMII_STAT("sgmii_rx_good_frames", RX_GOOD_FRAMES, 26),
+ SGMII_STAT("sgmii_rx_bad_frames", RX_BAD_FRAMES, 26),
+ SGMII_STAT("sgmii_rx_false_carrier_events", RX_FALSE_CARRIER, 8),
+ SGMII_STAT("sgmii_tx_good_frames", TX_GOOD_FRAMES, 26),
+ SGMII_STAT("sgmii_tx_bad_frames", TX_BAD_FRAMES, 26),
+ SGMII_STAT("sgmii_tx_false_carrier_events", TX_FALSE_CARRIER, 8),
+ SGMII_STAT("sgmii_tx_collisions", TX_COLLISIONS, 8),
+ SGMII_STAT("sgmii_tx_line_collisions", TX_LINE_COLLISIONS, 8),
+ SGMII_STAT("sgmii_tx_frame_alignment_err", TX_FRAME_ALIGN_ERR, 16),
+ SGMII_STAT("sgmii_tx_runt_frames", TX_RUNT_FRAMES, 22),
+};
+#define AQR107_SGMII_STAT_SZ ARRAY_SIZE(aqr107_hw_stats)
+
+struct aqr107_priv {
+ u64 sgmii_stats[AQR107_SGMII_STAT_SZ];
+};
+
+static int aqr107_get_sset_count(struct phy_device *phydev)
+{
+ return AQR107_SGMII_STAT_SZ;
+}
+
+static void aqr107_get_strings(struct phy_device *phydev, u8 *data)
+{
+ int i;
+
+ for (i = 0; i < AQR107_SGMII_STAT_SZ; i++)
+ strscpy(data + i * ETH_GSTRING_LEN, aqr107_hw_stats[i].name,
+ ETH_GSTRING_LEN);
+}
+
+static u64 aqr107_get_stat(struct phy_device *phydev, int index)
+{
+ const struct aqr107_hw_stat *stat = aqr107_hw_stats + index;
+ int len_l = min(stat->size, 16);
+ int len_h = stat->size - len_l;
+ u64 ret;
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_C22EXT, stat->reg);
+ if (val < 0)
+ return U64_MAX;
+
+ ret = val & GENMASK(len_l - 1, 0);
+ if (len_h) {
+ val = phy_read_mmd(phydev, MDIO_MMD_C22EXT, stat->reg + 1);
+ if (val < 0)
+ return U64_MAX;
+
+ ret += (val & GENMASK(len_h - 1, 0)) << 16;
+ }
+
+ return ret;
+}
+
+static void aqr107_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct aqr107_priv *priv = phydev->priv;
+ u64 val;
+ int i;
+
+ for (i = 0; i < AQR107_SGMII_STAT_SZ; i++) {
+ val = aqr107_get_stat(phydev, i);
+ if (val == U64_MAX)
+ phydev_err(phydev, "Reading HW Statistics failed for %s\n",
+ aqr107_hw_stats[i].name);
+ else
+ priv->sgmii_stats[i] += val;
+
+ data[i] = priv->sgmii_stats[i];
+ }
+}
+
static int aqr_config_aneg(struct phy_device *phydev)
{
bool changed = false;
@@ -112,41 +242,22 @@ static int aqr_config_aneg(struct phy_device *phydev)
static int aqr_config_intr(struct phy_device *phydev)
{
+ bool en = phydev->interrupts == PHY_INTERRUPT_ENABLED;
int err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
- err = phy_write_mmd(phydev, MDIO_MMD_AN,
- MDIO_AN_TX_VEND_INT_MASK2,
- MDIO_AN_TX_VEND_INT_MASK2_LINK);
- if (err < 0)
- return err;
-
- err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
- VEND1_GLOBAL_INT_STD_MASK,
- VEND1_GLOBAL_INT_STD_MASK_ALL);
- if (err < 0)
- return err;
-
- err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
- VEND1_GLOBAL_INT_VEND_MASK,
- VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 |
- VEND1_GLOBAL_INT_VEND_MASK_AN);
- } else {
- err = phy_write_mmd(phydev, MDIO_MMD_AN,
- MDIO_AN_TX_VEND_INT_MASK2, 0);
- if (err < 0)
- return err;
-
- err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
- VEND1_GLOBAL_INT_STD_MASK, 0);
- if (err < 0)
- return err;
-
- err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
- VEND1_GLOBAL_INT_VEND_MASK, 0);
- }
+ err = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_MASK2,
+ en ? MDIO_AN_TX_VEND_INT_MASK2_LINK : 0);
+ if (err < 0)
+ return err;
+
+ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_STD_MASK,
+ en ? VEND1_GLOBAL_INT_STD_MASK_ALL : 0);
+ if (err < 0)
+ return err;
- return err;
+ return phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_VEND_MASK,
+ en ? VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 |
+ VEND1_GLOBAL_INT_VEND_MASK_AN : 0);
}
static int aqr_ack_interrupt(struct phy_device *phydev)
@@ -178,21 +289,315 @@ static int aqr_read_status(struct phy_device *phydev)
return genphy_c45_read_status(phydev);
}
+static int aqr107_read_downshift_event(struct phy_device *phydev)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS1);
+ if (val < 0)
+ return val;
+
+ return !!(val & MDIO_AN_TX_VEND_INT_STATUS1_DOWNSHIFT);
+}
+
+static int aqr107_read_rate(struct phy_device *phydev)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_STATUS1);
+ if (val < 0)
+ return val;
+
+ switch (FIELD_GET(MDIO_AN_TX_VEND_STATUS1_RATE_MASK, val)) {
+ case MDIO_AN_TX_VEND_STATUS1_10BASET:
+ phydev->speed = SPEED_10;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_100BASETX:
+ phydev->speed = SPEED_100;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_1000BASET:
+ phydev->speed = SPEED_1000;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_2500BASET:
+ phydev->speed = SPEED_2500;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_5000BASET:
+ phydev->speed = SPEED_5000;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_10GBASET:
+ phydev->speed = SPEED_10000;
+ break;
+ default:
+ phydev->speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ if (val & MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ return 0;
+}
+
+static int aqr107_read_status(struct phy_device *phydev)
+{
+ int val, ret;
+
+ ret = aqr_read_status(phydev);
+ if (ret)
+ return ret;
+
+ if (!phydev->link || phydev->autoneg == AUTONEG_DISABLE)
+ return 0;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_VEND_IF_STATUS);
+ if (val < 0)
+ return val;
+
+ switch (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val)) {
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR:
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
+ phydev->interface = PHY_INTERFACE_MODE_10GKR;
+ break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII:
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII:
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ break;
+ default:
+ phydev->interface = PHY_INTERFACE_MODE_NA;
+ break;
+ }
+
+ val = aqr107_read_downshift_event(phydev);
+ if (val <= 0)
+ return val;
+
+ phydev_warn(phydev, "Downshift occurred! Cabling may be defective.\n");
+
+ /* Read downshifted rate from vendor register */
+ return aqr107_read_rate(phydev);
+}
+
+static int aqr107_get_downshift(struct phy_device *phydev, u8 *data)
+{
+ int val, cnt, enable;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV);
+ if (val < 0)
+ return val;
+
+ enable = FIELD_GET(MDIO_AN_VEND_PROV_DOWNSHIFT_EN, val);
+ cnt = FIELD_GET(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, val);
+
+ *data = enable && cnt ? cnt : DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int aqr107_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ int val = 0;
+
+ if (!FIELD_FIT(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, cnt))
+ return -E2BIG;
+
+ if (cnt != DOWNSHIFT_DEV_DISABLE) {
+ val = MDIO_AN_VEND_PROV_DOWNSHIFT_EN;
+ val |= FIELD_PREP(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, cnt);
+ }
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV,
+ MDIO_AN_VEND_PROV_DOWNSHIFT_EN |
+ MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, val);
+}
+
+static int aqr107_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return aqr107_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int aqr107_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return aqr107_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* If we configure settings whilst firmware is still initializing the chip,
+ * then these settings may be overwritten. Therefore make sure chip
+ * initialization has completed. Use presence of the firmware ID as
+ * indicator for initialization having completed.
+ * The chip also provides a "reset completed" bit, but it's cleared after
+ * read. Therefore function would time out if called again.
+ */
+static int aqr107_wait_reset_complete(struct phy_device *phydev)
+{
+ int val, retries = 100;
+
+ do {
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
+ if (val < 0)
+ return val;
+ msleep(20);
+ } while (!val && --retries);
+
+ return val ? 0 : -ETIMEDOUT;
+}
+
+static void aqr107_chip_info(struct phy_device *phydev)
+{
+ u8 fw_major, fw_minor, build_id, prov_id;
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
+ if (val < 0)
+ return;
+
+ fw_major = FIELD_GET(VEND1_GLOBAL_FW_ID_MAJOR, val);
+ fw_minor = FIELD_GET(VEND1_GLOBAL_FW_ID_MINOR, val);
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_RSVD_STAT1);
+ if (val < 0)
+ return;
+
+ build_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID, val);
+ prov_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_PROV_ID, val);
+
+ phydev_dbg(phydev, "FW %u.%u, Build %u, Provisioning %u\n",
+ fw_major, fw_minor, build_id, prov_id);
+}
+
+static int aqr107_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Check that the PHY interface type is compatible */
+ if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
+ phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
+ phydev->interface != PHY_INTERFACE_MODE_10GKR)
+ return -ENODEV;
+
+ ret = aqr107_wait_reset_complete(phydev);
+ if (!ret)
+ aqr107_chip_info(phydev);
+
+ /* ensure that a latched downshift event is cleared */
+ aqr107_read_downshift_event(phydev);
+
+ return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
+}
+
static int aqcs109_config_init(struct phy_device *phydev)
{
+ int ret;
+
+ /* Check that the PHY interface type is compatible */
+ if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
+ phydev->interface != PHY_INTERFACE_MODE_2500BASEX)
+ return -ENODEV;
+
+ ret = aqr107_wait_reset_complete(phydev);
+ if (!ret)
+ aqr107_chip_info(phydev);
+
/* AQCS109 belongs to a chip family partially supporting 10G and 5G.
* PMA speed ability bits are the same for all members of the family,
* AQCS109 however supports speeds up to 2.5G only.
*/
- return phy_set_max_speed(phydev, SPEED_2500);
+ ret = phy_set_max_speed(phydev, SPEED_2500);
+ if (ret)
+ return ret;
+
+ /* ensure that a latched downshift event is cleared */
+ aqr107_read_downshift_event(phydev);
+
+ return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
+}
+
+static void aqr107_link_change_notify(struct phy_device *phydev)
+{
+ u8 fw_major, fw_minor;
+ bool downshift, short_reach, afr;
+ int mode, val;
+
+ if (phydev->state != PHY_RUNNING || phydev->autoneg == AUTONEG_DISABLE)
+ return;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT1);
+ /* call failed or link partner is no Aquantia PHY */
+ if (val < 0 || !(val & MDIO_AN_RX_LP_STAT1_AQ_PHY))
+ return;
+
+ short_reach = val & MDIO_AN_RX_LP_STAT1_SHORT_REACH;
+ downshift = val & MDIO_AN_RX_LP_STAT1_AQRATE_DOWNSHIFT;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT4);
+ if (val < 0)
+ return;
+
+ fw_major = FIELD_GET(MDIO_AN_RX_LP_STAT4_FW_MAJOR, val);
+ fw_minor = FIELD_GET(MDIO_AN_RX_LP_STAT4_FW_MINOR, val);
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_VEND_STAT3);
+ if (val < 0)
+ return;
+
+ afr = val & MDIO_AN_RX_VEND_STAT3_AFR;
+
+ phydev_dbg(phydev, "Link partner is Aquantia PHY, FW %u.%u%s%s%s\n",
+ fw_major, fw_minor,
+ short_reach ? ", short reach mode" : "",
+ downshift ? ", fast-retrain downshift advertised" : "",
+ afr ? ", fast reframe advertised" : "");
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_RSVD_STAT9);
+ if (val < 0)
+ return;
+
+ mode = FIELD_GET(VEND1_GLOBAL_RSVD_STAT9_MODE, val);
+ if (mode == VEND1_GLOBAL_RSVD_STAT9_1000BT2)
+ phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
+}
+
+static int aqr107_suspend(struct phy_device *phydev)
+{
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+}
+
+static int aqr107_resume(struct phy_device *phydev)
+{
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+}
+
+static int aqr107_probe(struct phy_device *phydev)
+{
+ phydev->priv = devm_kzalloc(&phydev->mdio.dev,
+ sizeof(struct aqr107_priv), GFP_KERNEL);
+ if (!phydev->priv)
+ return -ENOMEM;
+
+ return aqr_hwmon_probe(phydev);
}
static struct phy_driver aqr_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_AQ1202),
.name = "Aquantia AQ1202",
- .aneg_done = genphy_c45_aneg_done,
- .get_features = genphy_c45_pma_read_abilities,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.ack_interrupt = aqr_ack_interrupt,
@@ -201,8 +606,6 @@ static struct phy_driver aqr_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_AQ2104),
.name = "Aquantia AQ2104",
- .aneg_done = genphy_c45_aneg_done,
- .get_features = genphy_c45_pma_read_abilities,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.ack_interrupt = aqr_ack_interrupt,
@@ -211,8 +614,6 @@ static struct phy_driver aqr_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR105),
.name = "Aquantia AQR105",
- .aneg_done = genphy_c45_aneg_done,
- .get_features = genphy_c45_pma_read_abilities,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.ack_interrupt = aqr_ack_interrupt,
@@ -221,8 +622,6 @@ static struct phy_driver aqr_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR106),
.name = "Aquantia AQR106",
- .aneg_done = genphy_c45_aneg_done,
- .get_features = genphy_c45_pma_read_abilities,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.ack_interrupt = aqr_ack_interrupt,
@@ -231,31 +630,42 @@ static struct phy_driver aqr_driver[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR107),
.name = "Aquantia AQR107",
- .aneg_done = genphy_c45_aneg_done,
- .get_features = genphy_c45_pma_read_abilities,
- .probe = aqr_hwmon_probe,
+ .probe = aqr107_probe,
+ .config_init = aqr107_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.ack_interrupt = aqr_ack_interrupt,
- .read_status = aqr_read_status,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQCS109),
.name = "Aquantia AQCS109",
- .aneg_done = genphy_c45_aneg_done,
- .get_features = genphy_c45_pma_read_abilities,
- .probe = aqr_hwmon_probe,
+ .probe = aqr107_probe,
.config_init = aqcs109_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.ack_interrupt = aqr_ack_interrupt,
- .read_status = aqr_read_status,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR405),
.name = "Aquantia AQR405",
- .aneg_done = genphy_c45_aneg_done,
- .get_features = genphy_c45_pma_read_abilities,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.ack_interrupt = aqr_ack_interrupt,
diff --git a/drivers/net/phy/asix.c b/drivers/net/phy/asix.c
index f14ba5366b91..79bf7ef1fcfd 100644
--- a/drivers/net/phy/asix.c
+++ b/drivers/net/phy/asix.c
@@ -43,7 +43,7 @@ static struct phy_driver asix_driver[] = { {
.phy_id = PHY_ID_ASIX_AX88796B,
.name = "Asix Electronics AX88796B",
.phy_id_mask = 0xfffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.soft_reset = asix_soft_reset,
} };
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index f3e96191eb6f..222ccd9ecfce 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -324,8 +324,6 @@ static int at803x_config_intr(struct phy_device *phydev)
static void at803x_link_change_notify(struct phy_device *phydev)
{
- struct at803x_priv *priv = phydev->priv;
-
/*
* Conduct a hardware reset for AT8030 every time a link loss is
* signalled. This is necessary to circumvent a hardware bug that
@@ -333,25 +331,19 @@ static void at803x_link_change_notify(struct phy_device *phydev)
* in the FIFO. In such cases, the FIFO enters an error mode it
* cannot recover from by software.
*/
- if (phydev->state == PHY_NOLINK) {
- if (phydev->mdio.reset && !priv->phy_reset) {
- struct at803x_context context;
+ if (phydev->state == PHY_NOLINK && phydev->mdio.reset_gpio) {
+ struct at803x_context context;
- at803x_context_save(phydev, &context);
+ at803x_context_save(phydev, &context);
- phy_device_reset(phydev, 1);
- msleep(1);
- phy_device_reset(phydev, 0);
- msleep(1);
+ phy_device_reset(phydev, 1);
+ msleep(1);
+ phy_device_reset(phydev, 0);
+ msleep(1);
- at803x_context_restore(phydev, &context);
+ at803x_context_restore(phydev, &context);
- phydev_dbg(phydev, "%s(): phy was reset\n",
- __func__);
- priv->phy_reset = true;
- }
- } else {
- priv->phy_reset = false;
+ phydev_dbg(phydev, "%s(): phy was reset\n", __func__);
}
}
@@ -397,7 +389,7 @@ static struct phy_driver at803x_driver[] = {
.get_wol = at803x_get_wol,
.suspend = at803x_suspend,
.resume = at803x_resume,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
}, {
@@ -412,7 +404,7 @@ static struct phy_driver at803x_driver[] = {
.get_wol = at803x_get_wol,
.suspend = at803x_suspend,
.resume = at803x_resume,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
}, {
@@ -426,7 +418,7 @@ static struct phy_driver at803x_driver[] = {
.get_wol = at803x_get_wol,
.suspend = at803x_suspend,
.resume = at803x_resume,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.aneg_done = at803x_aneg_done,
.ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
index ab8e12922bf9..9ccf28b0a04d 100644
--- a/drivers/net/phy/bcm-cygnus.c
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -10,6 +10,10 @@
#include <linux/netdevice.h>
#include <linux/phy.h>
+struct bcm_omega_phy_priv {
+ u64 *stats;
+};
+
/* Broadcom Cygnus Phy specific registers */
#define MII_BCM_CYGNUS_AFE_VDAC_ICTRL_0 0x91E5 /* VDAL Control register */
@@ -121,21 +125,162 @@ static int bcm_cygnus_resume(struct phy_device *phydev)
return genphy_config_aneg(phydev);
}
+static int bcm_omega_config_init(struct phy_device *phydev)
+{
+ u8 count, rev;
+ int ret = 0;
+
+ rev = phydev->phy_id & ~phydev->drv->phy_id_mask;
+
+ pr_info_once("%s: %s PHY revision: 0x%02x\n",
+ phydev_name(phydev), phydev->drv->name, rev);
+
+ /* Dummy read to a register to workaround an issue upon reset where the
+ * internal inverter may not allow the first MDIO transaction to pass
+ * the MDIO management controller and make us return 0xffff for such
+ * reads.
+ */
+ phy_read(phydev, MII_BMSR);
+
+ switch (rev) {
+ case 0x00:
+ ret = bcm_phy_28nm_a0b0_afe_config_init(phydev);
+ break;
+ default:
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ ret = bcm_phy_downshift_get(phydev, &count);
+ if (ret)
+ return ret;
+
+ /* Only enable EEE if Wirespeed/downshift is disabled */
+ ret = bcm_phy_set_eee(phydev, count == DOWNSHIFT_DEV_DISABLE);
+ if (ret)
+ return ret;
+
+ return bcm_phy_enable_apd(phydev, true);
+}
+
+static int bcm_omega_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Re-apply workarounds coming out suspend/resume */
+ ret = bcm_omega_config_init(phydev);
+ if (ret)
+ return ret;
+
+ /* 28nm Gigabit PHYs come out of reset without any half-duplex
+ * or "hub" compliant advertised mode, fix that. This does not
+ * cause any problems with the PHY library since genphy_config_aneg()
+ * gracefully handles auto-negotiated and forced modes.
+ */
+ return genphy_config_aneg(phydev);
+}
+
+static int bcm_omega_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return bcm_phy_downshift_get(phydev, (u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bcm_omega_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna,
+ const void *data)
+{
+ u8 count = *(u8 *)data;
+ int ret;
+
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ ret = bcm_phy_downshift_set(phydev, count);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Disable EEE advertisement since this prevents the PHY
+ * from successfully linking up, trigger auto-negotiation restart
+ * to let the MAC decide what to do.
+ */
+ ret = bcm_phy_set_eee(phydev, count == DOWNSHIFT_DEV_DISABLE);
+ if (ret)
+ return ret;
+
+ return genphy_restart_aneg(phydev);
+}
+
+static void bcm_omega_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct bcm_omega_phy_priv *priv = phydev->priv;
+
+ bcm_phy_get_stats(phydev, priv->stats, stats, data);
+}
+
+static int bcm_omega_probe(struct phy_device *phydev)
+{
+ struct bcm_omega_phy_priv *priv;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ priv->stats = devm_kcalloc(&phydev->mdio.dev,
+ bcm_phy_get_sset_count(phydev), sizeof(u64),
+ GFP_KERNEL);
+ if (!priv->stats)
+ return -ENOMEM;
+
+ return 0;
+}
+
static struct phy_driver bcm_cygnus_phy_driver[] = {
{
.phy_id = PHY_ID_BCM_CYGNUS,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom Cygnus PHY",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm_cygnus_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
.suspend = genphy_suspend,
.resume = bcm_cygnus_resume,
-} };
+}, {
+ .phy_id = PHY_ID_BCM_OMEGA,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom Omega Combo GPHY",
+ /* PHY_GBIT_FEATURES */
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm_omega_config_init,
+ .suspend = genphy_suspend,
+ .resume = bcm_omega_resume,
+ .get_tunable = bcm_omega_get_tunable,
+ .set_tunable = bcm_omega_set_tunable,
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm_omega_get_phy_stats,
+ .probe = bcm_omega_probe,
+}
+};
static struct mdio_device_id __maybe_unused bcm_cygnus_phy_tbl[] = {
{ PHY_ID_BCM_CYGNUS, 0xfffffff0, },
+ { PHY_ID_BCM_OMEGA, 0xfffffff0, },
{ }
};
MODULE_DEVICE_TABLE(mdio, bcm_cygnus_phy_tbl);
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index a75642051b8b..e0d3310957ff 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -371,6 +371,58 @@ void bcm_phy_get_stats(struct phy_device *phydev, u64 *shadow,
}
EXPORT_SYMBOL_GPL(bcm_phy_get_stats);
+void bcm_phy_r_rc_cal_reset(struct phy_device *phydev)
+{
+ /* Reset R_CAL/RC_CAL Engine */
+ bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010);
+
+ /* Disable Reset R_AL/RC_CAL Engine */
+ bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_r_rc_cal_reset);
+
+int bcm_phy_28nm_a0b0_afe_config_init(struct phy_device *phydev)
+{
+ /* Increase VCO range to prevent unlocking problem of PLL at low
+ * temp
+ */
+ bcm_phy_write_misc(phydev, PLL_PLLCTRL_1, 0x0048);
+
+ /* Change Ki to 011 */
+ bcm_phy_write_misc(phydev, PLL_PLLCTRL_2, 0x021b);
+
+ /* Disable loading of TVCO buffer to bandgap, set bandgap trim
+ * to 111
+ */
+ bcm_phy_write_misc(phydev, PLL_PLLCTRL_4, 0x0e20);
+
+ /* Adjust bias current trim by -3 */
+ bcm_phy_write_misc(phydev, DSP_TAP10, 0x690b);
+
+ /* Switch to CORE_BASE1E */
+ phy_write(phydev, MII_BRCM_CORE_BASE1E, 0xd);
+
+ bcm_phy_r_rc_cal_reset(phydev);
+
+ /* write AFE_RXCONFIG_0 */
+ bcm_phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb19);
+
+ /* write AFE_RXCONFIG_1 */
+ bcm_phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9a3f);
+
+ /* write AFE_RX_LP_COUNTER */
+ bcm_phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
+
+ /* write AFE_HPF_TRIM_OTHERS */
+ bcm_phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x000b);
+
+ /* write AFTE_TX_CONFIG */
+ bcm_phy_write_misc(phydev, AFE_TX_CONFIG, 0x0800);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_28nm_a0b0_afe_config_init);
+
MODULE_DESCRIPTION("Broadcom PHY Library");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index 17faaefcfd60..5ecacb4e64f0 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -9,6 +9,24 @@
#include <linux/brcmphy.h>
#include <linux/phy.h>
+/* 28nm only register definitions */
+#define MISC_ADDR(base, channel) base, channel
+
+#define DSP_TAP10 MISC_ADDR(0x0a, 0)
+#define PLL_PLLCTRL_1 MISC_ADDR(0x32, 1)
+#define PLL_PLLCTRL_2 MISC_ADDR(0x32, 2)
+#define PLL_PLLCTRL_4 MISC_ADDR(0x33, 0)
+
+#define AFE_RXCONFIG_0 MISC_ADDR(0x38, 0)
+#define AFE_RXCONFIG_1 MISC_ADDR(0x38, 1)
+#define AFE_RXCONFIG_2 MISC_ADDR(0x38, 2)
+#define AFE_RX_LP_COUNTER MISC_ADDR(0x38, 3)
+#define AFE_TX_CONFIG MISC_ADDR(0x39, 0)
+#define AFE_VDCA_ICTRL_0 MISC_ADDR(0x39, 1)
+#define AFE_VDAC_OTHERS_0 MISC_ADDR(0x39, 3)
+#define AFE_HPF_TRIM_OTHERS MISC_ADDR(0x3a, 0)
+
+
int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val);
int bcm_phy_read_exp(struct phy_device *phydev, u16 reg);
@@ -45,5 +63,7 @@ int bcm_phy_get_sset_count(struct phy_device *phydev);
void bcm_phy_get_strings(struct phy_device *phydev, u8 *data);
void bcm_phy_get_stats(struct phy_device *phydev, u64 *shadow,
struct ethtool_stats *stats, u64 *data);
+void bcm_phy_r_rc_cal_reset(struct phy_device *phydev);
+int bcm_phy_28nm_a0b0_afe_config_init(struct phy_device *phydev);
#endif /* _LINUX_BCM_PHY_LIB_H */
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index 44e6cff419a0..23f1958ba6ad 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -64,7 +64,7 @@ static struct phy_driver bcm63xx_driver[] = {
.phy_id = 0x00406000,
.phy_id_mask = 0xfffffc00,
.name = "Broadcom BCM63XX (1)",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.flags = PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
@@ -73,7 +73,7 @@ static struct phy_driver bcm63xx_driver[] = {
/* same phy as above, with just a different OUI */
.phy_id = 0x002bdc00,
.phy_id_mask = 0xfffffc00,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.flags = PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index b8415f8fae14..8fc33867e524 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -37,77 +37,10 @@
#define MII_BCM7XXX_SHD_3_TL4 0x23
#define MII_BCM7XXX_TL4_RST_MSK (BIT(2) | BIT(1))
-/* 28nm only register definitions */
-#define MISC_ADDR(base, channel) base, channel
-
-#define DSP_TAP10 MISC_ADDR(0x0a, 0)
-#define PLL_PLLCTRL_1 MISC_ADDR(0x32, 1)
-#define PLL_PLLCTRL_2 MISC_ADDR(0x32, 2)
-#define PLL_PLLCTRL_4 MISC_ADDR(0x33, 0)
-
-#define AFE_RXCONFIG_0 MISC_ADDR(0x38, 0)
-#define AFE_RXCONFIG_1 MISC_ADDR(0x38, 1)
-#define AFE_RXCONFIG_2 MISC_ADDR(0x38, 2)
-#define AFE_RX_LP_COUNTER MISC_ADDR(0x38, 3)
-#define AFE_TX_CONFIG MISC_ADDR(0x39, 0)
-#define AFE_VDCA_ICTRL_0 MISC_ADDR(0x39, 1)
-#define AFE_VDAC_OTHERS_0 MISC_ADDR(0x39, 3)
-#define AFE_HPF_TRIM_OTHERS MISC_ADDR(0x3a, 0)
-
struct bcm7xxx_phy_priv {
u64 *stats;
};
-static void r_rc_cal_reset(struct phy_device *phydev)
-{
- /* Reset R_CAL/RC_CAL Engine */
- bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010);
-
- /* Disable Reset R_AL/RC_CAL Engine */
- bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000);
-}
-
-static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev)
-{
- /* Increase VCO range to prevent unlocking problem of PLL at low
- * temp
- */
- bcm_phy_write_misc(phydev, PLL_PLLCTRL_1, 0x0048);
-
- /* Change Ki to 011 */
- bcm_phy_write_misc(phydev, PLL_PLLCTRL_2, 0x021b);
-
- /* Disable loading of TVCO buffer to bandgap, set bandgap trim
- * to 111
- */
- bcm_phy_write_misc(phydev, PLL_PLLCTRL_4, 0x0e20);
-
- /* Adjust bias current trim by -3 */
- bcm_phy_write_misc(phydev, DSP_TAP10, 0x690b);
-
- /* Switch to CORE_BASE1E */
- phy_write(phydev, MII_BRCM_CORE_BASE1E, 0xd);
-
- r_rc_cal_reset(phydev);
-
- /* write AFE_RXCONFIG_0 */
- bcm_phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb19);
-
- /* write AFE_RXCONFIG_1 */
- bcm_phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9a3f);
-
- /* write AFE_RX_LP_COUNTER */
- bcm_phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
-
- /* write AFE_HPF_TRIM_OTHERS */
- bcm_phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x000b);
-
- /* write AFTE_TX_CONFIG */
- bcm_phy_write_misc(phydev, AFE_TX_CONFIG, 0x0800);
-
- return 0;
-}
-
static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev)
{
/* AFE_RXCONFIG_0 */
@@ -143,7 +76,7 @@ static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev)
bcm_phy_write_misc(phydev, DSP_TAP10, 0x011b);
/* Reset R_CAL/RC_CAL engine */
- r_rc_cal_reset(phydev);
+ bcm_phy_r_rc_cal_reset(phydev);
return 0;
}
@@ -171,7 +104,7 @@ static int bcm7xxx_28nm_e0_plus_afe_config_init(struct phy_device *phydev)
bcm_phy_write_misc(phydev, DSP_TAP10, 0x011b);
/* Reset R_CAL/RC_CAL engine */
- r_rc_cal_reset(phydev);
+ bcm_phy_r_rc_cal_reset(phydev);
return 0;
}
@@ -196,7 +129,7 @@ static int bcm7xxx_28nm_a0_patch_afe_config_init(struct phy_device *phydev)
/* Enable ffe zero detection for Vitesse interoperability */
bcm_phy_write_misc(phydev, 0x26, 0x2, 0x0015);
- r_rc_cal_reset(phydev);
+ bcm_phy_r_rc_cal_reset(phydev);
return 0;
}
@@ -227,7 +160,7 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
switch (rev) {
case 0xa0:
case 0xb0:
- ret = bcm7xxx_28nm_b0_afe_config_init(phydev);
+ ret = bcm_phy_28nm_a0b0_afe_config_init(phydev);
break;
case 0xd0:
ret = bcm7xxx_28nm_d0_afe_config_init(phydev);
@@ -605,7 +538,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.phy_id = (_oui), \
.phy_id_mask = 0xfffffff0, \
.name = _name, \
- .features = PHY_GBIT_FEATURES, \
+ /* PHY_GBIT_FEATURES */ \
.flags = PHY_IS_INTERNAL, \
.config_init = bcm7xxx_28nm_config_init, \
.resume = bcm7xxx_28nm_resume, \
@@ -622,7 +555,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.phy_id = (_oui), \
.phy_id_mask = 0xfffffff0, \
.name = _name, \
- .features = PHY_BASIC_FEATURES, \
+ /* PHY_BASIC_FEATURES */ \
.flags = PHY_IS_INTERNAL, \
.config_init = bcm7xxx_28nm_ephy_config_init, \
.resume = bcm7xxx_28nm_ephy_resume, \
@@ -637,7 +570,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.phy_id = (_oui), \
.phy_id_mask = 0xfffffff0, \
.name = _name, \
- .features = PHY_BASIC_FEATURES, \
+ /* PHY_BASIC_FEATURES */ \
.flags = PHY_IS_INTERNAL, \
.config_init = bcm7xxx_config_init, \
.suspend = bcm7xxx_suspend, \
@@ -657,7 +590,6 @@ static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"),
- BCM7XXX_28NM_GPHY(PHY_ID_BCM_OMEGA, "Broadcom Omega Combo GPHY"),
BCM7XXX_40NM_EPHY(PHY_ID_BCM7346, "Broadcom BCM7346"),
BCM7XXX_40NM_EPHY(PHY_ID_BCM7362, "Broadcom BCM7362"),
BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"),
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index cb86a3e90c7d..67fa05d67523 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -610,7 +610,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM5411,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5411",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -618,7 +618,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM5421,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5421",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -626,7 +626,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM54210E,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54210E",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -634,7 +634,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM5461,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5461",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -642,7 +642,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM54612E,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54612E",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -650,7 +650,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM54616S,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54616S",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.config_aneg = bcm54616s_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
@@ -659,7 +659,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM5464,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5464",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -667,7 +667,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM5481,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5481",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
@@ -676,7 +676,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM54810,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54810",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
@@ -685,7 +685,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM5482,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5482",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm5482_config_init,
.read_status = bcm5482_read_status,
.ack_interrupt = bcm_phy_ack_intr,
@@ -694,7 +694,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM50610,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -702,7 +702,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM50610M,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610M",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -710,7 +710,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM57780,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM57780",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -718,7 +718,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCMAC131,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCMAC131",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = brcm_fet_config_init,
.ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
@@ -726,7 +726,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM5241,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5241",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = brcm_fet_config_init,
.ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
@@ -735,7 +735,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5395",
.flags = PHY_IS_INTERNAL,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.get_sset_count = bcm_phy_get_sset_count,
.get_strings = bcm_phy_get_strings,
.get_stats = bcm53xx_phy_get_stats,
@@ -744,7 +744,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id = PHY_ID_BCM89610,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM89610",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index 108ed24f8489..9d1612a4d7e6 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -102,7 +102,7 @@ static struct phy_driver cis820x_driver[] = {
.phy_id = 0x000fc410,
.name = "Cicada Cis8201",
.phy_id_mask = 0x000ffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = &cis820x_config_init,
.ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
@@ -110,7 +110,7 @@ static struct phy_driver cis820x_driver[] = {
.phy_id = 0x000fc440,
.name = "Cicada Cis8204",
.phy_id_mask = 0x000fffc0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = &cis820x_config_init,
.ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index bf39baa7f2c8..942f277463a4 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -144,7 +144,7 @@ static struct phy_driver dm91xx_driver[] = {
.phy_id = 0x0181b880,
.name = "Davicom DM9161E",
.phy_id_mask = 0x0ffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
.ack_interrupt = dm9161_ack_interrupt,
@@ -153,7 +153,7 @@ static struct phy_driver dm91xx_driver[] = {
.phy_id = 0x0181b8b0,
.name = "Davicom DM9161B/C",
.phy_id_mask = 0x0ffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
.ack_interrupt = dm9161_ack_interrupt,
@@ -162,7 +162,7 @@ static struct phy_driver dm91xx_driver[] = {
.phy_id = 0x0181b8a0,
.name = "Davicom DM9161A",
.phy_id_mask = 0x0ffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
.ack_interrupt = dm9161_ack_interrupt,
@@ -171,7 +171,7 @@ static struct phy_driver dm91xx_driver[] = {
.phy_id = 0x00181b80,
.name = "Davicom DM9131",
.phy_id_mask = 0x0ffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
} };
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 2fe2ebaf62d1..6580094161a9 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1514,7 +1514,7 @@ static struct phy_driver dp83640_driver = {
.phy_id = DP83640_PHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "NatSemi DP83640",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.probe = dp83640_probe,
.remove = dp83640_remove,
.soft_reset = dp83640_soft_reset,
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 97d45bd5b38e..7ed4760fb155 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -310,7 +310,7 @@ static int dp83822_resume(struct phy_device *phydev)
{ \
PHY_ID_MATCH_MODEL(_id), \
.name = (_name), \
- .features = PHY_BASIC_FEATURES, \
+ /* PHY_BASIC_FEATURES */ \
.soft_reset = dp83822_phy_reset, \
.config_init = dp83822_config_init, \
.get_wol = dp83822_get_wol, \
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index f55dc907c2f3..6f9bc7d91f17 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -99,7 +99,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
.phy_id = _id, \
.phy_id_mask = 0xfffffff0, \
.name = _name, \
- .features = PHY_BASIC_FEATURES, \
+ /* PHY_BASIC_FEATURES */ \
\
.soft_reset = genphy_soft_reset, \
.config_init = _config_init, \
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 8448d01819ef..fd35131a0c39 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -315,7 +315,7 @@ static struct phy_driver dp83867_driver[] = {
.phy_id = DP83867_PHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "TI DP83867",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = dp83867_config_init,
.soft_reset = dp83867_phy_reset,
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
index e9704af1d239..ac27da16824d 100644
--- a/drivers/net/phy/dp83tc811.c
+++ b/drivers/net/phy/dp83tc811.c
@@ -338,7 +338,7 @@ static struct phy_driver dp83811_driver[] = {
.phy_id = DP83TC811_PHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "TI DP83TC811",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = dp83811_config_init,
.config_aneg = dp83811_config_aneg,
.soft_reset = dp83811_phy_reset,
diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
index 2aa367c04a8e..09e07b902d3a 100644
--- a/drivers/net/phy/et1011c.c
+++ b/drivers/net/phy/et1011c.c
@@ -86,7 +86,7 @@ static struct phy_driver et1011c_driver[] = { {
.phy_id = 0x0282f014,
.name = "ET1011C",
.phy_id_mask = 0xfffffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_aneg = et1011c_config_aneg,
.read_status = et1011c_read_status,
} };
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 1acd8bfdb3bc..3ffe46df249e 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -301,7 +301,7 @@ static struct phy_device *__fixed_phy_register(unsigned int irq,
phy->supported);
}
- linkmode_copy(phy->advertising, phy->supported);
+ phy_advertise_supported(phy);
ret = phy_device_register(phy);
if (ret) {
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index ebef8354bc81..d6e8516cd146 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -311,7 +311,7 @@ static struct phy_driver icplus_driver[] = {
.phy_id = 0x02430d80,
.name = "ICPlus IP175C",
.phy_id_mask = 0x0ffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = &ip175c_config_init,
.config_aneg = &ip175c_config_aneg,
.read_status = &ip175c_read_status,
@@ -321,7 +321,7 @@ static struct phy_driver icplus_driver[] = {
.phy_id = 0x02430d90,
.name = "ICPlus IP1001",
.phy_id_mask = 0x0ffffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = &ip1001_config_init,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -329,7 +329,7 @@ static struct phy_driver icplus_driver[] = {
.phy_id = 0x02430c54,
.name = "ICPlus IP101A/G",
.phy_id_mask = 0x0ffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.probe = ip101a_g_probe,
.config_intr = ip101a_g_config_intr,
.did_interrupt = ip101a_g_did_interrupt,
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index 02d9713318b6..b7875b36097f 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -232,7 +232,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY11G_1_3,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.3",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
.ack_interrupt = xway_gphy_ack_interrupt,
@@ -244,7 +244,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY22F_1_3,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (PEF 7061) v1.3",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
.ack_interrupt = xway_gphy_ack_interrupt,
@@ -256,7 +256,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY11G_1_4,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.4",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
.ack_interrupt = xway_gphy_ack_interrupt,
@@ -268,7 +268,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY22F_1_4,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (PEF 7061) v1.4",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
.ack_interrupt = xway_gphy_ack_interrupt,
@@ -280,7 +280,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY11G_1_5,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.5 / v1.6",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -291,7 +291,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY22F_1_5,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (PEF 7061) v1.5 / v1.6",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -302,7 +302,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY11G_VR9_1_1,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (xRX v1.1 integrated)",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -313,7 +313,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY22F_VR9_1_1,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (xRX v1.1 integrated)",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -324,7 +324,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY11G_VR9_1_2,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (xRX v1.2 integrated)",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -335,7 +335,7 @@ static struct phy_driver xway_gphy[] = {
.phy_id = PHY_ID_PHY22F_VR9_1_2,
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (xRX v1.2 integrated)",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index a93d673baf35..314486288119 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -251,7 +251,7 @@ static struct phy_driver lxt97x_driver[] = {
.phy_id = 0x78100000,
.name = "LXT970",
.phy_id_mask = 0xfffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = lxt970_config_init,
.ack_interrupt = lxt970_ack_interrupt,
.config_intr = lxt970_config_intr,
@@ -259,14 +259,14 @@ static struct phy_driver lxt97x_driver[] = {
.phy_id = 0x001378e0,
.name = "LXT971",
.phy_id_mask = 0xfffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.ack_interrupt = lxt971_ack_interrupt,
.config_intr = lxt971_config_intr,
}, {
.phy_id = 0x00137a10,
.name = "LXT973-A2",
.phy_id_mask = 0xffffffff,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.flags = 0,
.probe = lxt973_probe,
.config_aneg = lxt973_config_aneg,
@@ -275,7 +275,7 @@ static struct phy_driver lxt97x_driver[] = {
.phy_id = 0x00137a10,
.name = "LXT973",
.phy_id_mask = 0xfffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.flags = 0,
.probe = lxt973_probe,
.config_aneg = lxt973_config_aneg,
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index f76c4048b978..a7796134e3be 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -29,6 +29,7 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/marvell_phy.h>
+#include <linux/bitfield.h>
#include <linux/of.h>
#include <linux/io.h>
@@ -91,6 +92,14 @@
#define MII_88E1510_TEMP_SENSOR 0x1b
#define MII_88E1510_TEMP_SENSOR_MASK 0xff
+#define MII_88E1540_COPPER_CTRL3 0x1a
+#define MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_MASK GENMASK(11, 10)
+#define MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_00MS 0
+#define MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_10MS 1
+#define MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_20MS 2
+#define MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_40MS 3
+#define MII_88E1540_COPPER_CTRL3_FAST_LINK_DOWN BIT(9)
+
#define MII_88E6390_MISC_TEST 0x1b
#define MII_88E6390_MISC_TEST_SAMPLE_1S 0
#define MII_88E6390_MISC_TEST_SAMPLE_10MS BIT(14)
@@ -128,6 +137,7 @@
#define MII_PHY_LED_CTRL 16
#define MII_88E1121_PHY_LED_DEF 0x0030
#define MII_88E1510_PHY_LED_DEF 0x1177
+#define MII_88E1510_PHY_LED0_LINK_LED1_ACTIVE 0x1040
#define MII_M1011_PHY_STATUS 0x11
#define MII_M1011_PHY_STATUS_1000 0x8000
@@ -624,7 +634,10 @@ static void marvell_config_led(struct phy_device *phydev)
* LED[2] .. Blink, Activity
*/
case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1510):
- def_config = MII_88E1510_PHY_LED_DEF;
+ if (phydev->dev_flags & MARVELL_PHY_LED0_LINK_LED1_ACTIVE)
+ def_config = MII_88E1510_PHY_LED0_LINK_LED1_ACTIVE;
+ else
+ def_config = MII_88E1510_PHY_LED_DEF;
break;
default:
return;
@@ -1025,6 +1038,101 @@ static int m88e1145_config_init(struct phy_device *phydev)
return 0;
}
+static int m88e1540_get_fld(struct phy_device *phydev, u8 *msecs)
+{
+ int val;
+
+ val = phy_read(phydev, MII_88E1540_COPPER_CTRL3);
+ if (val < 0)
+ return val;
+
+ if (!(val & MII_88E1540_COPPER_CTRL3_FAST_LINK_DOWN)) {
+ *msecs = ETHTOOL_PHY_FAST_LINK_DOWN_OFF;
+ return 0;
+ }
+
+ val = FIELD_GET(MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_MASK, val);
+
+ switch (val) {
+ case MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_00MS:
+ *msecs = 0;
+ break;
+ case MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_10MS:
+ *msecs = 10;
+ break;
+ case MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_20MS:
+ *msecs = 20;
+ break;
+ case MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_40MS:
+ *msecs = 40;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int m88e1540_set_fld(struct phy_device *phydev, const u8 *msecs)
+{
+ struct ethtool_eee eee;
+ int val, ret;
+
+ if (*msecs == ETHTOOL_PHY_FAST_LINK_DOWN_OFF)
+ return phy_clear_bits(phydev, MII_88E1540_COPPER_CTRL3,
+ MII_88E1540_COPPER_CTRL3_FAST_LINK_DOWN);
+
+ /* According to the Marvell data sheet EEE must be disabled for
+ * Fast Link Down detection to work properly
+ */
+ ret = phy_ethtool_get_eee(phydev, &eee);
+ if (!ret && eee.eee_enabled) {
+ phydev_warn(phydev, "Fast Link Down detection requires EEE to be disabled!\n");
+ return -EBUSY;
+ }
+
+ if (*msecs <= 5)
+ val = MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_00MS;
+ else if (*msecs <= 15)
+ val = MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_10MS;
+ else if (*msecs <= 30)
+ val = MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_20MS;
+ else
+ val = MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_40MS;
+
+ val = FIELD_PREP(MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_MASK, val);
+
+ ret = phy_modify(phydev, MII_88E1540_COPPER_CTRL3,
+ MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_MASK, val);
+ if (ret)
+ return ret;
+
+ return phy_set_bits(phydev, MII_88E1540_COPPER_CTRL3,
+ MII_88E1540_COPPER_CTRL3_FAST_LINK_DOWN);
+}
+
+static int m88e1540_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_FAST_LINK_DOWN:
+ return m88e1540_get_fld(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int m88e1540_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_FAST_LINK_DOWN:
+ return m88e1540_set_fld(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
/* The VOD can be out of specification on link up. Poke an
* undocumented register, in an undocumented page, with a magic value
* to fix this.
@@ -2024,7 +2132,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1101,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1101",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = marvell_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1101_config_aneg,
@@ -2042,7 +2150,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1112,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1112",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
.config_aneg = &marvell_config_aneg,
@@ -2060,7 +2168,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1111,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1111",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
.config_aneg = &marvell_config_aneg,
@@ -2079,7 +2187,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1118,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1118",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = marvell_probe,
.config_init = &m88e1118_config_init,
.config_aneg = &m88e1118_config_aneg,
@@ -2097,7 +2205,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1121R,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1121R",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = &m88e1121_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1121_config_aneg,
@@ -2117,7 +2225,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1318S,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1318S",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = marvell_probe,
.config_init = &m88e1318_config_init,
.config_aneg = &m88e1318_config_aneg,
@@ -2139,7 +2247,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1145,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1145",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = marvell_probe,
.config_init = &m88e1145_config_init,
.config_aneg = &m88e1101_config_aneg,
@@ -2158,7 +2266,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1149R,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1149R",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = marvell_probe,
.config_init = &m88e1149_config_init,
.config_aneg = &m88e1118_config_aneg,
@@ -2176,7 +2284,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1240,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1240",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
.config_aneg = &marvell_config_aneg,
@@ -2194,7 +2302,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1116R,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1116R",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = marvell_probe,
.config_init = &m88e1116r_config_init,
.ack_interrupt = &marvell_ack_interrupt,
@@ -2234,7 +2342,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E1540,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1540",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = m88e1510_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
@@ -2249,13 +2357,15 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1540_get_tunable,
+ .set_tunable = m88e1540_set_tunable,
},
{
.phy_id = MARVELL_PHY_ID_88E1545,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1545",
.probe = m88e1510_probe,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
@@ -2274,7 +2384,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E3016,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E3016",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.probe = marvell_probe,
.config_init = &m88e3016_config_init,
.aneg_done = &marvell_aneg_done,
@@ -2294,7 +2404,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E6390,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E6390",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = m88e6390_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e6390_config_aneg,
@@ -2309,6 +2419,8 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1540_get_tunable,
+ .set_tunable = m88e1540_set_tunable,
},
};
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 100b401b1f4a..238a20e13d6a 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -48,6 +48,8 @@ enum {
MV_AN_STAT1000 = 0x8001, /* 1000base-T status register */
/* Vendor2 MMD registers */
+ MV_V2_PORT_CTRL = 0xf001,
+ MV_V2_PORT_CTRL_PWRDOWN = 0x0800,
MV_V2_TEMP_CTRL = 0xf08a,
MV_V2_TEMP_CTRL_MASK = 0xc000,
MV_V2_TEMP_CTRL_SAMPLE = 0x0000,
@@ -226,11 +228,19 @@ static int mv3310_probe(struct phy_device *phydev)
static int mv3310_suspend(struct phy_device *phydev)
{
- return 0;
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+ MV_V2_PORT_CTRL_PWRDOWN);
}
static int mv3310_resume(struct phy_device *phydev)
{
+ int ret;
+
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+ MV_V2_PORT_CTRL_PWRDOWN);
+ if (ret)
+ return ret;
+
return mv3310_hwmon_config(phydev, true);
}
@@ -472,8 +482,9 @@ static struct phy_driver mv3310_drivers[] = {
.phy_id = MARVELL_PHY_ID_88E2110,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "mv88x2110",
- .get_features = genphy_c45_pma_read_abilities,
.probe = mv3310_probe,
+ .suspend = mv3310_suspend,
+ .resume = mv3310_resume,
.soft_reset = genphy_no_soft_reset,
.config_init = mv3310_config_init,
.config_aneg = mv3310_config_aneg,
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index 8295bc7c8c20..4a28fb29adaa 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -92,10 +92,7 @@ static int unimac_mdio_poll(void *wait_func_data)
usleep_range(1000, 2000);
} while (--timeout);
- if (!timeout)
- return -ETIMEDOUT;
-
- return 0;
+ return -ETIMEDOUT;
}
static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
@@ -292,7 +289,7 @@ static int unimac_mdio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- dev_info(&pdev->dev, "Broadcom UniMAC MDIO bus at 0x%p\n", priv->base);
+ dev_info(&pdev->dev, "Broadcom UniMAC MDIO bus\n");
return 0;
diff --git a/drivers/net/phy/mdio-mux-meson-g12a.c b/drivers/net/phy/mdio-mux-meson-g12a.c
new file mode 100644
index 000000000000..6fa29ea8e2a3
--- /dev/null
+++ b/drivers/net/phy/mdio-mux-meson-g12a.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Baylibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mdio-mux.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+
+#define ETH_PLL_STS 0x40
+#define ETH_PLL_CTL0 0x44
+#define PLL_CTL0_LOCK_DIG BIT(30)
+#define PLL_CTL0_RST BIT(29)
+#define PLL_CTL0_EN BIT(28)
+#define PLL_CTL0_SEL BIT(23)
+#define PLL_CTL0_N GENMASK(14, 10)
+#define PLL_CTL0_M GENMASK(8, 0)
+#define PLL_LOCK_TIMEOUT 1000000
+#define PLL_MUX_NUM_PARENT 2
+#define ETH_PLL_CTL1 0x48
+#define ETH_PLL_CTL2 0x4c
+#define ETH_PLL_CTL3 0x50
+#define ETH_PLL_CTL4 0x54
+#define ETH_PLL_CTL5 0x58
+#define ETH_PLL_CTL6 0x5c
+#define ETH_PLL_CTL7 0x60
+
+#define ETH_PHY_CNTL0 0x80
+#define EPHY_G12A_ID 0x33000180
+#define ETH_PHY_CNTL1 0x84
+#define PHY_CNTL1_ST_MODE GENMASK(2, 0)
+#define PHY_CNTL1_ST_PHYADD GENMASK(7, 3)
+#define EPHY_DFLT_ADD 8
+#define PHY_CNTL1_MII_MODE GENMASK(15, 14)
+#define EPHY_MODE_RMII 0x1
+#define PHY_CNTL1_CLK_EN BIT(16)
+#define PHY_CNTL1_CLKFREQ BIT(17)
+#define PHY_CNTL1_PHY_ENB BIT(18)
+#define ETH_PHY_CNTL2 0x88
+#define PHY_CNTL2_USE_INTERNAL BIT(5)
+#define PHY_CNTL2_SMI_SRC_MAC BIT(6)
+#define PHY_CNTL2_RX_CLK_EPHY BIT(9)
+
+#define MESON_G12A_MDIO_EXTERNAL_ID 0
+#define MESON_G12A_MDIO_INTERNAL_ID 1
+
+struct g12a_mdio_mux {
+ bool pll_is_enabled;
+ void __iomem *regs;
+ void *mux_handle;
+ struct clk *pclk;
+ struct clk *pll;
+};
+
+struct g12a_ephy_pll {
+ void __iomem *base;
+ struct clk_hw hw;
+};
+
+#define g12a_ephy_pll_to_dev(_hw) \
+ container_of(_hw, struct g12a_ephy_pll, hw)
+
+static unsigned long g12a_ephy_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw);
+ u32 val, m, n;
+
+ val = readl(pll->base + ETH_PLL_CTL0);
+ m = FIELD_GET(PLL_CTL0_M, val);
+ n = FIELD_GET(PLL_CTL0_N, val);
+
+ return parent_rate * m / n;
+}
+
+static int g12a_ephy_pll_enable(struct clk_hw *hw)
+{
+ struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw);
+ u32 val = readl(pll->base + ETH_PLL_CTL0);
+
+ /* Apply both enable an reset */
+ val |= PLL_CTL0_RST | PLL_CTL0_EN;
+ writel(val, pll->base + ETH_PLL_CTL0);
+
+ /* Clear the reset to let PLL lock */
+ val &= ~PLL_CTL0_RST;
+ writel(val, pll->base + ETH_PLL_CTL0);
+
+ /* Poll on the digital lock instead of the usual analog lock
+ * This is done because bit 31 is unreliable on some SoC. Bit
+ * 31 may indicate that the PLL is not lock eventhough the clock
+ * is actually running
+ */
+ return readl_poll_timeout(pll->base + ETH_PLL_CTL0, val,
+ val & PLL_CTL0_LOCK_DIG, 0, PLL_LOCK_TIMEOUT);
+}
+
+static void g12a_ephy_pll_disable(struct clk_hw *hw)
+{
+ struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw);
+ u32 val;
+
+ val = readl(pll->base + ETH_PLL_CTL0);
+ val &= ~PLL_CTL0_EN;
+ val |= PLL_CTL0_RST;
+ writel(val, pll->base + ETH_PLL_CTL0);
+}
+
+static int g12a_ephy_pll_is_enabled(struct clk_hw *hw)
+{
+ struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw);
+ unsigned int val;
+
+ val = readl(pll->base + ETH_PLL_CTL0);
+
+ return (val & PLL_CTL0_LOCK_DIG) ? 1 : 0;
+}
+
+static void g12a_ephy_pll_init(struct clk_hw *hw)
+{
+ struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw);
+
+ /* Apply PLL HW settings */
+ writel(0x29c0040a, pll->base + ETH_PLL_CTL0);
+ writel(0x927e0000, pll->base + ETH_PLL_CTL1);
+ writel(0xac5f49e5, pll->base + ETH_PLL_CTL2);
+ writel(0x00000000, pll->base + ETH_PLL_CTL3);
+ writel(0x00000000, pll->base + ETH_PLL_CTL4);
+ writel(0x20200000, pll->base + ETH_PLL_CTL5);
+ writel(0x0000c002, pll->base + ETH_PLL_CTL6);
+ writel(0x00000023, pll->base + ETH_PLL_CTL7);
+}
+
+static const struct clk_ops g12a_ephy_pll_ops = {
+ .recalc_rate = g12a_ephy_pll_recalc_rate,
+ .is_enabled = g12a_ephy_pll_is_enabled,
+ .enable = g12a_ephy_pll_enable,
+ .disable = g12a_ephy_pll_disable,
+ .init = g12a_ephy_pll_init,
+};
+
+static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
+{
+ int ret;
+
+ /* Enable the phy clock */
+ if (!priv->pll_is_enabled) {
+ ret = clk_prepare_enable(priv->pll);
+ if (ret)
+ return ret;
+ }
+
+ priv->pll_is_enabled = true;
+
+ /* Initialize ephy control */
+ writel(EPHY_G12A_ID, priv->regs + ETH_PHY_CNTL0);
+ writel(FIELD_PREP(PHY_CNTL1_ST_MODE, 3) |
+ FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) |
+ FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) |
+ PHY_CNTL1_CLK_EN |
+ PHY_CNTL1_CLKFREQ |
+ PHY_CNTL1_PHY_ENB,
+ priv->regs + ETH_PHY_CNTL1);
+ writel(PHY_CNTL2_USE_INTERNAL |
+ PHY_CNTL2_SMI_SRC_MAC |
+ PHY_CNTL2_RX_CLK_EPHY,
+ priv->regs + ETH_PHY_CNTL2);
+
+ return 0;
+}
+
+static int g12a_enable_external_mdio(struct g12a_mdio_mux *priv)
+{
+ /* Reset the mdio bus mux */
+ writel_relaxed(0x0, priv->regs + ETH_PHY_CNTL2);
+
+ /* Disable the phy clock if enabled */
+ if (priv->pll_is_enabled) {
+ clk_disable_unprepare(priv->pll);
+ priv->pll_is_enabled = false;
+ }
+
+ return 0;
+}
+
+static int g12a_mdio_switch_fn(int current_child, int desired_child,
+ void *data)
+{
+ struct g12a_mdio_mux *priv = dev_get_drvdata(data);
+
+ if (current_child == desired_child)
+ return 0;
+
+ switch (desired_child) {
+ case MESON_G12A_MDIO_EXTERNAL_ID:
+ return g12a_enable_external_mdio(priv);
+ case MESON_G12A_MDIO_INTERNAL_ID:
+ return g12a_enable_internal_mdio(priv);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct of_device_id g12a_mdio_mux_match[] = {
+ { .compatible = "amlogic,g12a-mdio-mux", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, g12a_mdio_mux_match);
+
+static int g12a_ephy_glue_clk_register(struct device *dev)
+{
+ struct g12a_mdio_mux *priv = dev_get_drvdata(dev);
+ const char *parent_names[PLL_MUX_NUM_PARENT];
+ struct clk_init_data init;
+ struct g12a_ephy_pll *pll;
+ struct clk_mux *mux;
+ struct clk *clk;
+ char *name;
+ int i;
+
+ /* get the mux parents */
+ for (i = 0; i < PLL_MUX_NUM_PARENT; i++) {
+ char in_name[8];
+
+ snprintf(in_name, sizeof(in_name), "clkin%d", i);
+ clk = devm_clk_get(dev, in_name);
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) != -EPROBE_DEFER)
+ dev_err(dev, "Missing clock %s\n", in_name);
+ return PTR_ERR(clk);
+ }
+
+ parent_names[i] = __clk_get_name(clk);
+ }
+
+ /* create the input mux */
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return -ENOMEM;
+
+ name = kasprintf(GFP_KERNEL, "%s#mux", dev_name(dev));
+ if (!name)
+ return -ENOMEM;
+
+ init.name = name;
+ init.ops = &clk_mux_ro_ops;
+ init.flags = 0;
+ init.parent_names = parent_names;
+ init.num_parents = PLL_MUX_NUM_PARENT;
+
+ mux->reg = priv->regs + ETH_PLL_CTL0;
+ mux->shift = __ffs(PLL_CTL0_SEL);
+ mux->mask = PLL_CTL0_SEL >> mux->shift;
+ mux->hw.init = &init;
+
+ clk = devm_clk_register(dev, &mux->hw);
+ kfree(name);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to register input mux\n");
+ return PTR_ERR(clk);
+ }
+
+ /* create the pll */
+ pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return -ENOMEM;
+
+ name = kasprintf(GFP_KERNEL, "%s#pll", dev_name(dev));
+ if (!name)
+ return -ENOMEM;
+
+ init.name = name;
+ init.ops = &g12a_ephy_pll_ops;
+ init.flags = 0;
+ parent_names[0] = __clk_get_name(clk);
+ init.parent_names = parent_names;
+ init.num_parents = 1;
+
+ pll->base = priv->regs;
+ pll->hw.init = &init;
+
+ clk = devm_clk_register(dev, &pll->hw);
+ kfree(name);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to register input mux\n");
+ return PTR_ERR(clk);
+ }
+
+ priv->pll = clk;
+
+ return 0;
+}
+
+static int g12a_mdio_mux_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct g12a_mdio_mux *priv;
+ struct resource *res;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->regs))
+ return PTR_ERR(priv->regs);
+
+ priv->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(priv->pclk)) {
+ ret = PTR_ERR(priv->pclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get peripheral clock\n");
+ return ret;
+ }
+
+ /* Make sure the device registers are clocked */
+ ret = clk_prepare_enable(priv->pclk);
+ if (ret) {
+ dev_err(dev, "failed to enable peripheral clock");
+ return ret;
+ }
+
+ /* Register PLL in CCF */
+ ret = g12a_ephy_glue_clk_register(dev);
+ if (ret)
+ goto err;
+
+ ret = mdio_mux_init(dev, dev->of_node, g12a_mdio_switch_fn,
+ &priv->mux_handle, dev, NULL);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "mdio multiplexer init failed: %d", ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ clk_disable_unprepare(priv->pclk);
+ return ret;
+}
+
+static int g12a_mdio_mux_remove(struct platform_device *pdev)
+{
+ struct g12a_mdio_mux *priv = platform_get_drvdata(pdev);
+
+ mdio_mux_uninit(priv->mux_handle);
+
+ if (priv->pll_is_enabled)
+ clk_disable_unprepare(priv->pll);
+
+ clk_disable_unprepare(priv->pclk);
+
+ return 0;
+}
+
+static struct platform_driver g12a_mdio_mux_driver = {
+ .probe = g12a_mdio_mux_probe,
+ .remove = g12a_mdio_mux_remove,
+ .driver = {
+ .name = "g12a-mdio_mux",
+ .of_match_table = g12a_mdio_mux_match,
+ },
+};
+module_platform_driver(g12a_mdio_mux_driver);
+
+MODULE_DESCRIPTION("Amlogic G12a MDIO multiplexer driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 4be4cc09eb90..bd04fe762056 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -24,6 +24,7 @@
#include <linux/of_gpio.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/reset.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
@@ -55,10 +56,25 @@ static int mdiobus_register_gpiod(struct mdio_device *mdiodev)
return PTR_ERR(gpiod);
}
- mdiodev->reset = gpiod;
+ mdiodev->reset_gpio = gpiod;
- /* Assert the reset signal again */
- mdio_device_reset(mdiodev, 1);
+ return 0;
+}
+
+static int mdiobus_register_reset(struct mdio_device *mdiodev)
+{
+ struct reset_control *reset = NULL;
+
+ if (mdiodev->dev.of_node)
+ reset = devm_reset_control_get_exclusive(&mdiodev->dev,
+ "phy");
+ if (PTR_ERR(reset) == -ENOENT ||
+ PTR_ERR(reset) == -ENOTSUPP)
+ reset = NULL;
+ else if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
+ mdiodev->reset_ctrl = reset;
return 0;
}
@@ -74,6 +90,13 @@ int mdiobus_register_device(struct mdio_device *mdiodev)
err = mdiobus_register_gpiod(mdiodev);
if (err)
return err;
+
+ err = mdiobus_register_reset(mdiodev);
+ if (err)
+ return err;
+
+ /* Assert the reset signal */
+ mdio_device_reset(mdiodev, 1);
}
mdiodev->bus->mdio_map[mdiodev->addr] = mdiodev;
@@ -446,8 +469,8 @@ void mdiobus_unregister(struct mii_bus *bus)
if (!mdiodev)
continue;
- if (mdiodev->reset)
- gpiod_put(mdiodev->reset);
+ if (mdiodev->reset_gpio)
+ gpiod_put(mdiodev->reset_gpio);
mdiodev->device_remove(mdiodev);
mdiodev->device_free(mdiodev);
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index 887076292e50..e282600bd83e 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -16,6 +16,7 @@
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/phy.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/unistd.h>
@@ -116,10 +117,18 @@ void mdio_device_reset(struct mdio_device *mdiodev, int value)
{
unsigned int d;
- if (!mdiodev->reset)
+ if (!mdiodev->reset_gpio && !mdiodev->reset_ctrl)
return;
- gpiod_set_value(mdiodev->reset, value);
+ if (mdiodev->reset_gpio)
+ gpiod_set_value(mdiodev->reset_gpio, value);
+
+ if (mdiodev->reset_ctrl) {
+ if (value)
+ reset_control_assert(mdiodev->reset_ctrl);
+ else
+ reset_control_deassert(mdiodev->reset_ctrl);
+ }
d = value ? mdiodev->reset_assert_delay : mdiodev->reset_deassert_delay;
if (d)
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index 0eec2913c289..fa80d6dce8ee 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -224,24 +224,33 @@ static int meson_gxl_config_intr(struct phy_device *phydev)
static struct phy_driver meson_gxl_phy[] = {
{
- .phy_id = 0x01814400,
- .phy_id_mask = 0xfffffff0,
+ PHY_ID_MATCH_EXACT(0x01814400),
.name = "Meson GXL Internal PHY",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.flags = PHY_IS_INTERNAL,
.soft_reset = genphy_soft_reset,
.config_init = meson_gxl_config_init,
- .aneg_done = genphy_aneg_done,
.read_status = meson_gxl_read_status,
.ack_interrupt = meson_gxl_ack_interrupt,
.config_intr = meson_gxl_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ }, {
+ PHY_ID_MATCH_EXACT(0x01803301),
+ .name = "Meson G12A Internal PHY",
+ /* PHY_BASIC_FEATURES */
+ .flags = PHY_IS_INTERNAL,
+ .soft_reset = genphy_soft_reset,
+ .ack_interrupt = meson_gxl_ack_interrupt,
+ .config_intr = meson_gxl_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
},
};
static struct mdio_device_id __maybe_unused meson_gxl_tbl[] = {
- { 0x01814400, 0xfffffff0 },
+ { PHY_ID_MATCH_VENDOR(0x01814400) },
+ { PHY_ID_MATCH_VENDOR(0x01803301) },
{ }
};
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 352da24f1f33..3c8186f269f9 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -28,6 +28,7 @@
/* Operation Mode Strap Override */
#define MII_KSZPHY_OMSO 0x16
+#define KSZPHY_OMSO_FACTORY_TEST BIT(15)
#define KSZPHY_OMSO_B_CAST_OFF BIT(9)
#define KSZPHY_OMSO_NAND_TREE_ON BIT(5)
#define KSZPHY_OMSO_RMII_OVERRIDE BIT(1)
@@ -340,6 +341,18 @@ static int ksz8041_config_aneg(struct phy_device *phydev)
return genphy_config_aneg(phydev);
}
+static int ksz8081_config_init(struct phy_device *phydev)
+{
+ /* KSZPHY_OMSO_FACTORY_TEST is set at de-assertion of the reset line
+ * based on the RXER (KSZ8081RNA/RND) or TXC (KSZ8081MNX/RNB) pin. If a
+ * pull-down is missing, the factory test mode should be cleared by
+ * manually writing a 0.
+ */
+ phy_clear_bits(phydev, MII_KSZPHY_OMSO, KSZPHY_OMSO_FACTORY_TEST);
+
+ return kszphy_config_init(phydev);
+}
+
static int ksz8061_config_init(struct phy_device *phydev)
{
int ret;
@@ -738,6 +751,31 @@ static int ksz8873mll_read_status(struct phy_device *phydev)
return 0;
}
+static int ksz9031_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_read_abilities(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Silicon Errata Sheet (DS80000691D or DS80000692D):
+ * Whenever the device's Asymmetric Pause capability is set to 1,
+ * link-up may fail after a link-up to link-down transition.
+ *
+ * Workaround:
+ * Do not enable the Asymmetric Pause capability bit.
+ */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported);
+
+ /* We force setting the Pause capability as the core will force the
+ * Asymmetric Pause capability to 1 otherwise.
+ */
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
+
+ return 0;
+}
+
static int ksz9031_read_status(struct phy_device *phydev)
{
int err;
@@ -908,7 +946,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KS8737,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KS8737",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.driver_data = &ks8737_type,
.config_init = kszphy_config_init,
.ack_interrupt = kszphy_ack_interrupt,
@@ -919,7 +957,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8021,
.phy_id_mask = 0x00ffffff,
.name = "Micrel KSZ8021 or KSZ8031",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -934,7 +972,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8031,
.phy_id_mask = 0x00ffffff,
.name = "Micrel KSZ8031",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -949,7 +987,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8041,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8041",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = ksz8041_config_init,
@@ -965,7 +1003,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8041RNLI,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8041RNLI",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -980,7 +1018,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8051,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8051",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.driver_data = &ksz8051_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -995,7 +1033,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8001,
.name = "Micrel KSZ8001 or KS8721",
.phy_id_mask = 0x00fffffc,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -1010,10 +1048,10 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8081,
.name = "Micrel KSZ8081 or KSZ8091",
.phy_id_mask = MICREL_PHY_ID_MASK,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.driver_data = &ksz8081_type,
.probe = kszphy_probe,
- .config_init = kszphy_config_init,
+ .config_init = ksz8081_config_init,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.get_sset_count = kszphy_get_sset_count,
@@ -1025,7 +1063,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8061,
.name = "Micrel KSZ8061",
.phy_id_mask = MICREL_PHY_ID_MASK,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = ksz8061_config_init,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
@@ -1035,7 +1073,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ9021,
.phy_id_mask = 0x000ffffe,
.name = "Micrel KSZ9021 Gigabit PHY",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9021_config_init,
@@ -1052,9 +1090,9 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ9031,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ9031 Gigabit PHY",
- .features = PHY_GBIT_FEATURES,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
+ .get_features = ksz9031_get_features,
.config_init = ksz9031_config_init,
.soft_reset = genphy_soft_reset,
.read_status = ksz9031_read_status,
@@ -1069,7 +1107,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ9131,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip KSZ9131 Gigabit PHY",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9131_config_init,
@@ -1085,7 +1123,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8873MLL,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8873MLL Switch",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
@@ -1095,7 +1133,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ886X,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ886X Switch",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = kszphy_config_init,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -1103,7 +1141,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8795,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8795",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
@@ -1113,7 +1151,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ9477,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip KSZ9477",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = kszphy_config_init,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index c6cbb3aa8ae0..eb1b3287fe08 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -333,7 +333,7 @@ static struct phy_driver microchip_phy_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "Microchip LAN88xx",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.probe = lan88xx_probe,
.remove = lan88xx_remove,
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index db50efb30df5..28676af97b42 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -85,12 +85,49 @@ enum rgmii_rx_clock_delay {
#define LED_MODE_SEL_MASK(x) (GENMASK(3, 0) << LED_MODE_SEL_POS(x))
#define LED_MODE_SEL(x, mode) (((mode) << LED_MODE_SEL_POS(x)) & LED_MODE_SEL_MASK(x))
+#define MSCC_EXT_PAGE_CSR_CNTL_17 17
+#define MSCC_EXT_PAGE_CSR_CNTL_18 18
+
+#define MSCC_EXT_PAGE_CSR_CNTL_19 19
+#define MSCC_PHY_CSR_CNTL_19_REG_ADDR(x) (x)
+#define MSCC_PHY_CSR_CNTL_19_TARGET(x) ((x) << 12)
+#define MSCC_PHY_CSR_CNTL_19_READ BIT(14)
+#define MSCC_PHY_CSR_CNTL_19_CMD BIT(15)
+
+#define MSCC_EXT_PAGE_CSR_CNTL_20 20
+#define MSCC_PHY_CSR_CNTL_20_TARGET(x) (x)
+
+#define PHY_MCB_TARGET 0x07
+#define PHY_MCB_S6G_WRITE BIT(31)
+#define PHY_MCB_S6G_READ BIT(30)
+
+#define PHY_S6G_PLL5G_CFG0 0x06
+#define PHY_S6G_LCPLL_CFG 0x11
+#define PHY_S6G_PLL_CFG 0x2b
+#define PHY_S6G_COMMON_CFG 0x2c
+#define PHY_S6G_GPC_CFG 0x2e
+#define PHY_S6G_MISC_CFG 0x3b
+#define PHY_MCB_S6G_CFG 0x3f
+#define PHY_S6G_DFT_CFG2 0x3e
+#define PHY_S6G_PLL_STATUS 0x31
+#define PHY_S6G_IB_STATUS0 0x2f
+
+#define PHY_S6G_SYS_RST_POS 31
+#define PHY_S6G_ENA_LANE_POS 18
+#define PHY_S6G_ENA_LOOP_POS 8
+#define PHY_S6G_QRATE_POS 6
+#define PHY_S6G_IF_MODE_POS 4
+#define PHY_S6G_PLL_ENA_OFFS_POS 21
+#define PHY_S6G_PLL_FSM_CTRL_DATA_POS 8
+#define PHY_S6G_PLL_FSM_ENA_POS 7
+
#define MSCC_EXT_PAGE_ACCESS 31
#define MSCC_PHY_PAGE_STANDARD 0x0000 /* Standard registers */
#define MSCC_PHY_PAGE_EXTENDED 0x0001 /* Extended registers */
#define MSCC_PHY_PAGE_EXTENDED_2 0x0002 /* Extended reg - page 2 */
#define MSCC_PHY_PAGE_EXTENDED_3 0x0003 /* Extended reg - page 3 */
#define MSCC_PHY_PAGE_EXTENDED_4 0x0004 /* Extended reg - page 4 */
+#define MSCC_PHY_PAGE_CSR_CNTL MSCC_PHY_PAGE_EXTENDED_4
/* Extended reg - GPIO; this is a bank of registers that are shared for all PHYs
* in the same package.
*/
@@ -216,6 +253,7 @@ enum rgmii_rx_clock_delay {
#define MSCC_PHY_TR_MSB 18
/* Microsemi PHY ID's */
+#define PHY_ID_VSC8514 0x00070670
#define PHY_ID_VSC8530 0x00070560
#define PHY_ID_VSC8531 0x00070570
#define PHY_ID_VSC8540 0x00070760
@@ -1742,6 +1780,386 @@ static int vsc8584_did_interrupt(struct phy_device *phydev)
return (rc < 0) ? 0 : rc & MII_VSC85XX_INT_MASK_MASK;
}
+static int vsc8514_config_pre_init(struct phy_device *phydev)
+{
+ /* These are the settings to override the silicon default
+ * values to handle hardware performance of PHY. They
+ * are set at Power-On state and remain until PHY Reset.
+ */
+ const struct reg_val pre_init1[] = {
+ {0x0f90, 0x00688980},
+ {0x0786, 0x00000003},
+ {0x07fa, 0x0050100f},
+ {0x0f82, 0x0012b002},
+ {0x1686, 0x00000004},
+ {0x168c, 0x00d2c46f},
+ {0x17a2, 0x00000620},
+ {0x16a0, 0x00eeffdd},
+ {0x16a6, 0x00071448},
+ {0x16a4, 0x0013132f},
+ {0x16a8, 0x00000000},
+ {0x0ffc, 0x00c0a028},
+ {0x0fe8, 0x0091b06c},
+ {0x0fea, 0x00041600},
+ {0x0f80, 0x00fffaff},
+ {0x0fec, 0x00901809},
+ {0x0ffe, 0x00b01007},
+ {0x16b0, 0x00eeff00},
+ {0x16b2, 0x00007000},
+ {0x16b4, 0x00000814},
+ };
+ unsigned int i;
+ u16 reg;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ /* all writes below are broadcasted to all PHYs in the same package */
+ reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS);
+ reg |= SMI_BROADCAST_WR_EN;
+ phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
+
+ reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
+ reg |= BIT(15);
+ phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TR);
+
+ for (i = 0; i < ARRAY_SIZE(pre_init1); i++)
+ vsc8584_csr_write(phydev, pre_init1[i].reg, pre_init1[i].val);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_TEST);
+
+ reg = phy_base_read(phydev, MSCC_PHY_TEST_PAGE_8);
+ reg &= ~BIT(15);
+ phy_base_write(phydev, MSCC_PHY_TEST_PAGE_8, reg);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ reg = phy_base_read(phydev, MSCC_PHY_EXT_CNTL_STATUS);
+ reg &= ~SMI_BROADCAST_WR_EN;
+ phy_base_write(phydev, MSCC_PHY_EXT_CNTL_STATUS, reg);
+
+ return 0;
+}
+
+static u32 vsc85xx_csr_ctrl_phy_read(struct phy_device *phydev,
+ u32 target, u32 reg)
+{
+ unsigned long deadline;
+ u32 val, val_l, val_h;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_CSR_CNTL);
+
+ /* CSR registers are grouped under different Target IDs.
+ * 6-bit Target_ID is split between MSCC_EXT_PAGE_CSR_CNTL_20 and
+ * MSCC_EXT_PAGE_CSR_CNTL_19 registers.
+ * Target_ID[5:2] maps to bits[3:0] of MSCC_EXT_PAGE_CSR_CNTL_20
+ * and Target_ID[1:0] maps to bits[13:12] of MSCC_EXT_PAGE_CSR_CNTL_19.
+ */
+
+ /* Setup the Target ID */
+ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_20,
+ MSCC_PHY_CSR_CNTL_20_TARGET(target >> 2));
+
+ /* Trigger CSR Action - Read into the CSR's */
+ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_19,
+ MSCC_PHY_CSR_CNTL_19_CMD | MSCC_PHY_CSR_CNTL_19_READ |
+ MSCC_PHY_CSR_CNTL_19_REG_ADDR(reg) |
+ MSCC_PHY_CSR_CNTL_19_TARGET(target & 0x3));
+
+ /* Wait for register access*/
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ usleep_range(500, 1000);
+ val = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_19);
+ } while (time_before(jiffies, deadline) &&
+ !(val & MSCC_PHY_CSR_CNTL_19_CMD));
+
+ if (!(val & MSCC_PHY_CSR_CNTL_19_CMD))
+ return 0xffffffff;
+
+ /* Read the Least Significant Word (LSW) (17) */
+ val_l = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_17);
+
+ /* Read the Most Significant Word (MSW) (18) */
+ val_h = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_18);
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_STANDARD);
+
+ return (val_h << 16) | val_l;
+}
+
+static int vsc85xx_csr_ctrl_phy_write(struct phy_device *phydev,
+ u32 target, u32 reg, u32 val)
+{
+ unsigned long deadline;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_CSR_CNTL);
+
+ /* CSR registers are grouped under different Target IDs.
+ * 6-bit Target_ID is split between MSCC_EXT_PAGE_CSR_CNTL_20 and
+ * MSCC_EXT_PAGE_CSR_CNTL_19 registers.
+ * Target_ID[5:2] maps to bits[3:0] of MSCC_EXT_PAGE_CSR_CNTL_20
+ * and Target_ID[1:0] maps to bits[13:12] of MSCC_EXT_PAGE_CSR_CNTL_19.
+ */
+
+ /* Setup the Target ID */
+ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_20,
+ MSCC_PHY_CSR_CNTL_20_TARGET(target >> 2));
+
+ /* Write the Least Significant Word (LSW) (17) */
+ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_17, (u16)val);
+
+ /* Write the Most Significant Word (MSW) (18) */
+ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_18, (u16)(val >> 16));
+
+ /* Trigger CSR Action - Write into the CSR's */
+ phy_base_write(phydev, MSCC_EXT_PAGE_CSR_CNTL_19,
+ MSCC_PHY_CSR_CNTL_19_CMD |
+ MSCC_PHY_CSR_CNTL_19_REG_ADDR(reg) |
+ MSCC_PHY_CSR_CNTL_19_TARGET(target & 0x3));
+
+ /* Wait for register access */
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ usleep_range(500, 1000);
+ val = phy_base_read(phydev, MSCC_EXT_PAGE_CSR_CNTL_19);
+ } while (time_before(jiffies, deadline) &&
+ !(val & MSCC_PHY_CSR_CNTL_19_CMD));
+
+ if (!(val & MSCC_PHY_CSR_CNTL_19_CMD))
+ return -ETIMEDOUT;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_STANDARD);
+
+ return 0;
+}
+
+static int __phy_write_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb,
+ u32 op)
+{
+ unsigned long deadline;
+ u32 val;
+ int ret;
+
+ ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET, reg,
+ op | (1 << mcb));
+ if (ret)
+ return -EINVAL;
+
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ usleep_range(500, 1000);
+ val = vsc85xx_csr_ctrl_phy_read(phydev, PHY_MCB_TARGET, reg);
+
+ if (val == 0xffffffff)
+ return -EIO;
+
+ } while (time_before(jiffies, deadline) && (val & op));
+
+ if (val & op)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/* Trigger a read to the spcified MCB */
+static int phy_update_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb)
+{
+ return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_READ);
+}
+
+/* Trigger a write to the spcified MCB */
+static int phy_commit_mcb_s6g(struct phy_device *phydev, u32 reg, u8 mcb)
+{
+ return __phy_write_mcb_s6g(phydev, reg, mcb, PHY_MCB_S6G_WRITE);
+}
+
+static int vsc8514_config_init(struct phy_device *phydev)
+{
+ struct vsc8531_private *vsc8531 = phydev->priv;
+ unsigned long deadline;
+ u16 val, addr;
+ int ret, i;
+ u32 reg;
+
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
+ mutex_lock(&phydev->mdio.bus->mdio_lock);
+
+ __phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_EXTENDED);
+
+ addr = __phy_read(phydev, MSCC_PHY_EXT_PHY_CNTL_4);
+ addr >>= PHY_CNTL_4_ADDR_POS;
+
+ val = __phy_read(phydev, MSCC_PHY_ACTIPHY_CNTL);
+
+ if (val & PHY_ADDR_REVERSED)
+ vsc8531->base_addr = phydev->mdio.addr + addr;
+ else
+ vsc8531->base_addr = phydev->mdio.addr - addr;
+
+ /* Some parts of the init sequence are identical for every PHY in the
+ * package. Some parts are modifying the GPIO register bank which is a
+ * set of registers that are affecting all PHYs, a few resetting the
+ * microprocessor common to all PHYs.
+ * All PHYs' interrupts mask register has to be zeroed before enabling
+ * any PHY's interrupt in this register.
+ * For all these reasons, we need to do the init sequence once and only
+ * once whatever is the first PHY in the package that is initialized and
+ * do the correct init sequence for all PHYs that are package-critical
+ * in this pre-init function.
+ */
+ if (!vsc8584_is_pkg_init(phydev, val & PHY_ADDR_REVERSED ? 1 : 0))
+ vsc8514_config_pre_init(phydev);
+
+ vsc8531->pkg_init = true;
+
+ phy_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
+ MSCC_PHY_PAGE_EXTENDED_GPIO);
+
+ val = phy_base_read(phydev, MSCC_PHY_MAC_CFG_FASTLINK);
+
+ val &= ~MAC_CFG_MASK;
+ val |= MAC_CFG_QSGMII;
+ ret = phy_base_write(phydev, MSCC_PHY_MAC_CFG_FASTLINK, val);
+
+ if (ret)
+ goto err;
+
+ ret = vsc8584_cmd(phydev,
+ PROC_CMD_MCB_ACCESS_MAC_CONF |
+ PROC_CMD_RST_CONF_PORT |
+ PROC_CMD_READ_MOD_WRITE_PORT | PROC_CMD_QSGMII_MAC);
+ if (ret)
+ goto err;
+
+ /* 6g mcb */
+ phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+ /* lcpll mcb */
+ phy_update_mcb_s6g(phydev, PHY_S6G_LCPLL_CFG, 0);
+ /* pll5gcfg0 */
+ ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+ PHY_S6G_PLL5G_CFG0, 0x7036f145);
+ if (ret)
+ goto err;
+
+ phy_commit_mcb_s6g(phydev, PHY_S6G_LCPLL_CFG, 0);
+ /* pllcfg */
+ ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+ PHY_S6G_PLL_CFG,
+ (3 << PHY_S6G_PLL_ENA_OFFS_POS) |
+ (120 << PHY_S6G_PLL_FSM_CTRL_DATA_POS)
+ | (0 << PHY_S6G_PLL_FSM_ENA_POS));
+ if (ret)
+ goto err;
+
+ /* commoncfg */
+ ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+ PHY_S6G_COMMON_CFG,
+ (0 << PHY_S6G_SYS_RST_POS) |
+ (0 << PHY_S6G_ENA_LANE_POS) |
+ (0 << PHY_S6G_ENA_LOOP_POS) |
+ (0 << PHY_S6G_QRATE_POS) |
+ (3 << PHY_S6G_IF_MODE_POS));
+ if (ret)
+ goto err;
+
+ /* misccfg */
+ ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+ PHY_S6G_MISC_CFG, 1);
+ if (ret)
+ goto err;
+
+ /* gpcfg */
+ ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+ PHY_S6G_GPC_CFG, 768);
+ if (ret)
+ goto err;
+
+ phy_commit_mcb_s6g(phydev, PHY_S6G_DFT_CFG2, 0);
+
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ usleep_range(500, 1000);
+ phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG,
+ 0); /* read 6G MCB into CSRs */
+ reg = vsc85xx_csr_ctrl_phy_read(phydev, PHY_MCB_TARGET,
+ PHY_S6G_PLL_STATUS);
+ if (reg == 0xffffffff) {
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ return -EIO;
+ }
+
+ } while (time_before(jiffies, deadline) && (reg & BIT(12)));
+
+ if (reg & BIT(12)) {
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ return -ETIMEDOUT;
+ }
+
+ /* misccfg */
+ ret = vsc85xx_csr_ctrl_phy_write(phydev, PHY_MCB_TARGET,
+ PHY_S6G_MISC_CFG, 0);
+ if (ret)
+ goto err;
+
+ phy_commit_mcb_s6g(phydev, PHY_MCB_S6G_CFG, 0);
+
+ deadline = jiffies + msecs_to_jiffies(PROC_CMD_NCOMPLETED_TIMEOUT_MS);
+ do {
+ usleep_range(500, 1000);
+ phy_update_mcb_s6g(phydev, PHY_MCB_S6G_CFG,
+ 0); /* read 6G MCB into CSRs */
+ reg = vsc85xx_csr_ctrl_phy_read(phydev, PHY_MCB_TARGET,
+ PHY_S6G_IB_STATUS0);
+ if (reg == 0xffffffff) {
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ return -EIO;
+ }
+
+ } while (time_before(jiffies, deadline) && !(reg & BIT(8)));
+
+ if (!(reg & BIT(8))) {
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ return -ETIMEDOUT;
+ }
+
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+
+ ret = phy_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
+
+ if (ret)
+ return ret;
+
+ ret = phy_modify(phydev, MSCC_PHY_EXT_PHY_CNTL_1, MEDIA_OP_MODE_MASK,
+ MEDIA_OP_MODE_COPPER);
+
+ if (ret)
+ return ret;
+
+ ret = genphy_soft_reset(phydev);
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < vsc8531->nleds; i++) {
+ ret = vsc85xx_led_cntl_set(phydev, i, vsc8531->leds_mode[i]);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+
+err:
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+ return ret;
+}
+
static int vsc85xx_ack_interrupt(struct phy_device *phydev)
{
int rc = 0;
@@ -1791,6 +2209,31 @@ static int vsc85xx_read_status(struct phy_device *phydev)
return genphy_read_status(phydev);
}
+static int vsc8514_probe(struct phy_device *phydev)
+{
+ struct vsc8531_private *vsc8531;
+ u32 default_mode[4] = {VSC8531_LINK_1000_ACTIVITY,
+ VSC8531_LINK_100_ACTIVITY, VSC8531_LINK_ACTIVITY,
+ VSC8531_DUPLEX_COLLISION};
+
+ vsc8531 = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531), GFP_KERNEL);
+ if (!vsc8531)
+ return -ENOMEM;
+
+ phydev->priv = vsc8531;
+
+ vsc8531->nleds = 4;
+ vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES;
+ vsc8531->hw_stats = vsc85xx_hw_stats;
+ vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats);
+ vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats,
+ sizeof(u64), GFP_KERNEL);
+ if (!vsc8531->stats)
+ return -ENOMEM;
+
+ return vsc85xx_dt_led_modes_get(phydev, default_mode);
+}
+
static int vsc8574_probe(struct phy_device *phydev)
{
struct vsc8531_private *vsc8531;
@@ -1879,10 +2322,33 @@ static int vsc85xx_probe(struct phy_device *phydev)
/* Microsemi VSC85xx PHYs */
static struct phy_driver vsc85xx_driver[] = {
{
+ .phy_id = PHY_ID_VSC8514,
+ .name = "Microsemi GE VSC8514 SyncE",
+ .phy_id_mask = 0xfffffff0,
+ .soft_reset = &genphy_soft_reset,
+ .config_init = &vsc8514_config_init,
+ .config_aneg = &vsc85xx_config_aneg,
+ .read_status = &vsc85xx_read_status,
+ .ack_interrupt = &vsc85xx_ack_interrupt,
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
+ .probe = &vsc8514_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+ .read_page = &vsc85xx_phy_read_page,
+ .write_page = &vsc85xx_phy_write_page,
+ .get_sset_count = &vsc85xx_get_sset_count,
+ .get_strings = &vsc85xx_get_strings,
+ .get_stats = &vsc85xx_get_stats,
+},
+{
.phy_id = PHY_ID_VSC8530,
.name = "Microsemi FE VSC8530",
.phy_id_mask = 0xfffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -1907,7 +2373,7 @@ static struct phy_driver vsc85xx_driver[] = {
.phy_id = PHY_ID_VSC8531,
.name = "Microsemi VSC8531",
.phy_id_mask = 0xfffffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -1932,7 +2398,7 @@ static struct phy_driver vsc85xx_driver[] = {
.phy_id = PHY_ID_VSC8540,
.name = "Microsemi FE VSC8540 SyncE",
.phy_id_mask = 0xfffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -1957,7 +2423,7 @@ static struct phy_driver vsc85xx_driver[] = {
.phy_id = PHY_ID_VSC8541,
.name = "Microsemi VSC8541 SyncE",
.phy_id_mask = 0xfffffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -1982,7 +2448,7 @@ static struct phy_driver vsc85xx_driver[] = {
.phy_id = PHY_ID_VSC8574,
.name = "Microsemi GE VSC8574 SyncE",
.phy_id_mask = 0xfffffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -2008,7 +2474,7 @@ static struct phy_driver vsc85xx_driver[] = {
.phy_id = PHY_ID_VSC8584,
.name = "Microsemi GE VSC8584 SyncE",
.phy_id_mask = 0xfffffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -2034,6 +2500,7 @@ static struct phy_driver vsc85xx_driver[] = {
module_phy_driver(vsc85xx_driver);
static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
+ { PHY_ID_VSC8514, 0xfffffff0, },
{ PHY_ID_VSC8530, 0xfffffff0, },
{ PHY_ID_VSC8531, 0xfffffff0, },
{ PHY_ID_VSC8540, 0xfffffff0, },
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 42282a86b680..a221dd552c3c 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -128,7 +128,7 @@ static struct phy_driver dp83865_driver[] = { {
.phy_id = DP83865_PHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "NatSemi DP83865",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = ns_config_init,
.ack_interrupt = ns_ack_interrupt,
.config_intr = ns_config_intr,
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 9e24d9569424..abe13dfe50ad 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -262,12 +262,30 @@ int genphy_c45_read_lpa(struct phy_device *phydev)
{
int val;
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ if (val < 0)
+ return val;
+
+ if (!(val & MDIO_AN_STAT1_COMPLETE)) {
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->lp_advertising);
+ mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
+ mii_adv_mod_linkmode_adv_t(phydev->lp_advertising, 0);
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ return 0;
+ }
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->lp_advertising,
+ val & MDIO_AN_STAT1_LPABLE);
+
/* Read the link partner's base page advertisement */
val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
if (val < 0)
return val;
- mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, val);
+ mii_adv_mod_linkmode_adv_t(phydev->lp_advertising, val);
phydev->pause = val & LPA_PAUSE_CAP ? 1 : 0;
phydev->asym_pause = val & LPA_PAUSE_ASYM ? 1 : 0;
@@ -498,21 +516,10 @@ int gen10g_config_aneg(struct phy_device *phydev)
}
EXPORT_SYMBOL_GPL(gen10g_config_aneg);
-static int gen10g_read_status(struct phy_device *phydev)
-{
- /* For now just lie and say it's 10G all the time */
- phydev->speed = SPEED_10000;
- phydev->duplex = DUPLEX_FULL;
-
- return genphy_c45_read_link(phydev);
-}
-
-struct phy_driver genphy_10g_driver = {
+struct phy_driver genphy_c45_driver = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
- .name = "Generic 10G PHY",
+ .name = "Generic Clause 45 PHY",
.soft_reset = genphy_no_soft_reset,
- .features = PHY_10GBIT_FEATURES,
- .config_aneg = gen10g_config_aneg,
- .read_status = gen10g_read_status,
+ .read_status = genphy_c45_read_status,
};
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 5016cd5fd7c7..3daf0214a242 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -8,6 +8,11 @@
const char *phy_speed_to_str(int speed)
{
+ BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 67,
+ "Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
+ "If a speed or mode has been added please update phy_speed_to_str "
+ "and the PHY settings array.\n");
+
switch (speed) {
case SPEED_10:
return "10Mbps";
@@ -35,6 +40,8 @@ const char *phy_speed_to_str(int speed)
return "56Gbps";
case SPEED_100000:
return "100Gbps";
+ case SPEED_200000:
+ return "200Gbps";
case SPEED_UNKNOWN:
return "Unknown";
default:
@@ -58,222 +65,81 @@ EXPORT_SYMBOL_GPL(phy_duplex_to_str);
/* A mapping of all SUPPORTED settings to speed/duplex. This table
* must be grouped by speed and sorted in descending match priority
* - iow, descending speed. */
+
+#define PHY_SETTING(s, d, b) { .speed = SPEED_ ## s, .duplex = DUPLEX_ ## d, \
+ .bit = ETHTOOL_LINK_MODE_ ## b ## _BIT}
+
static const struct phy_setting settings[] = {
+ /* 200G */
+ PHY_SETTING( 200000, FULL, 200000baseCR4_Full ),
+ PHY_SETTING( 200000, FULL, 200000baseKR4_Full ),
+ PHY_SETTING( 200000, FULL, 200000baseLR4_ER4_FR4_Full ),
+ PHY_SETTING( 200000, FULL, 200000baseDR4_Full ),
+ PHY_SETTING( 200000, FULL, 200000baseSR4_Full ),
/* 100G */
- {
- .speed = SPEED_100000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
- },
- {
- .speed = SPEED_100000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
- },
- {
- .speed = SPEED_100000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
- },
- {
- .speed = SPEED_100000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
- },
+ PHY_SETTING( 100000, FULL, 100000baseCR4_Full ),
+ PHY_SETTING( 100000, FULL, 100000baseKR4_Full ),
+ PHY_SETTING( 100000, FULL, 100000baseLR4_ER4_Full ),
+ PHY_SETTING( 100000, FULL, 100000baseSR4_Full ),
+ PHY_SETTING( 100000, FULL, 100000baseCR2_Full ),
+ PHY_SETTING( 100000, FULL, 100000baseKR2_Full ),
+ PHY_SETTING( 100000, FULL, 100000baseLR2_ER2_FR2_Full ),
+ PHY_SETTING( 100000, FULL, 100000baseDR2_Full ),
+ PHY_SETTING( 100000, FULL, 100000baseSR2_Full ),
/* 56G */
- {
- .speed = SPEED_56000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
- },
- {
- .speed = SPEED_56000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
- },
- {
- .speed = SPEED_56000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
- },
- {
- .speed = SPEED_56000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
- },
+ PHY_SETTING( 56000, FULL, 56000baseCR4_Full ),
+ PHY_SETTING( 56000, FULL, 56000baseKR4_Full ),
+ PHY_SETTING( 56000, FULL, 56000baseLR4_Full ),
+ PHY_SETTING( 56000, FULL, 56000baseSR4_Full ),
/* 50G */
- {
- .speed = SPEED_50000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
- },
- {
- .speed = SPEED_50000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
- },
- {
- .speed = SPEED_50000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
- },
+ PHY_SETTING( 50000, FULL, 50000baseCR2_Full ),
+ PHY_SETTING( 50000, FULL, 50000baseKR2_Full ),
+ PHY_SETTING( 50000, FULL, 50000baseSR2_Full ),
+ PHY_SETTING( 50000, FULL, 50000baseCR_Full ),
+ PHY_SETTING( 50000, FULL, 50000baseKR_Full ),
+ PHY_SETTING( 50000, FULL, 50000baseLR_ER_FR_Full ),
+ PHY_SETTING( 50000, FULL, 50000baseDR_Full ),
+ PHY_SETTING( 50000, FULL, 50000baseSR_Full ),
/* 40G */
- {
- .speed = SPEED_40000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
- },
- {
- .speed = SPEED_40000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
- },
- {
- .speed = SPEED_40000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
- },
- {
- .speed = SPEED_40000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
- },
+ PHY_SETTING( 40000, FULL, 40000baseCR4_Full ),
+ PHY_SETTING( 40000, FULL, 40000baseKR4_Full ),
+ PHY_SETTING( 40000, FULL, 40000baseLR4_Full ),
+ PHY_SETTING( 40000, FULL, 40000baseSR4_Full ),
/* 25G */
- {
- .speed = SPEED_25000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
- },
- {
- .speed = SPEED_25000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
- },
- {
- .speed = SPEED_25000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- },
-
+ PHY_SETTING( 25000, FULL, 25000baseCR_Full ),
+ PHY_SETTING( 25000, FULL, 25000baseKR_Full ),
+ PHY_SETTING( 25000, FULL, 25000baseSR_Full ),
/* 20G */
- {
- .speed = SPEED_20000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
- },
- {
- .speed = SPEED_20000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
- },
+ PHY_SETTING( 20000, FULL, 20000baseKR2_Full ),
+ PHY_SETTING( 20000, FULL, 20000baseMLD2_Full ),
/* 10G */
- {
- .speed = SPEED_10000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
- },
- {
- .speed = SPEED_10000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
- },
- {
- .speed = SPEED_10000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
- },
- {
- .speed = SPEED_10000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
- },
- {
- .speed = SPEED_10000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
- },
- {
- .speed = SPEED_10000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
- },
- {
- .speed = SPEED_10000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
- },
- {
- .speed = SPEED_10000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
- },
- {
- .speed = SPEED_10000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
- },
+ PHY_SETTING( 10000, FULL, 10000baseCR_Full ),
+ PHY_SETTING( 10000, FULL, 10000baseER_Full ),
+ PHY_SETTING( 10000, FULL, 10000baseKR_Full ),
+ PHY_SETTING( 10000, FULL, 10000baseKX4_Full ),
+ PHY_SETTING( 10000, FULL, 10000baseLR_Full ),
+ PHY_SETTING( 10000, FULL, 10000baseLRM_Full ),
+ PHY_SETTING( 10000, FULL, 10000baseR_FEC ),
+ PHY_SETTING( 10000, FULL, 10000baseSR_Full ),
+ PHY_SETTING( 10000, FULL, 10000baseT_Full ),
/* 5G */
- {
- .speed = SPEED_5000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
- },
-
+ PHY_SETTING( 5000, FULL, 5000baseT_Full ),
/* 2.5G */
- {
- .speed = SPEED_2500,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
- },
- {
- .speed = SPEED_2500,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
- },
+ PHY_SETTING( 2500, FULL, 2500baseT_Full ),
+ PHY_SETTING( 2500, FULL, 2500baseX_Full ),
/* 1G */
- {
- .speed = SPEED_1000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
- },
- {
- .speed = SPEED_1000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- },
- {
- .speed = SPEED_1000,
- .duplex = DUPLEX_HALF,
- .bit = ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
- },
- {
- .speed = SPEED_1000,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
- },
+ PHY_SETTING( 1000, FULL, 1000baseKX_Full ),
+ PHY_SETTING( 1000, FULL, 1000baseT_Full ),
+ PHY_SETTING( 1000, HALF, 1000baseT_Half ),
+ PHY_SETTING( 1000, FULL, 1000baseX_Full ),
/* 100M */
- {
- .speed = SPEED_100,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- },
- {
- .speed = SPEED_100,
- .duplex = DUPLEX_HALF,
- .bit = ETHTOOL_LINK_MODE_100baseT_Half_BIT,
- },
+ PHY_SETTING( 100, FULL, 100baseT_Full ),
+ PHY_SETTING( 100, HALF, 100baseT_Half ),
/* 10M */
- {
- .speed = SPEED_10,
- .duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_10baseT_Full_BIT,
- },
- {
- .speed = SPEED_10,
- .duplex = DUPLEX_HALF,
- .bit = ETHTOOL_LINK_MODE_10baseT_Half_BIT,
- },
+ PHY_SETTING( 10, FULL, 10baseT_Full ),
+ PHY_SETTING( 10, HALF, 10baseT_Half ),
};
+#undef PHY_SETTING
/**
* phy_lookup_setting - lookup a PHY setting
@@ -362,7 +228,7 @@ int phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
if (err)
return err;
- linkmode_copy(phydev->advertising, phydev->supported);
+ phy_advertise_supported(phydev);
return 0;
}
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3745220c5c98..e8885429293a 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -43,7 +43,6 @@ static const char *phy_state_to_str(enum phy_state st)
PHY_STATE_STR(NOLINK)
PHY_STATE_STR(FORCING)
PHY_STATE_STR(HALTED)
- PHY_STATE_STR(RESUMING)
}
return NULL;
@@ -61,6 +60,32 @@ static void phy_link_down(struct phy_device *phydev, bool do_carrier)
phy_led_trigger_change_speed(phydev);
}
+static const char *phy_pause_str(struct phy_device *phydev)
+{
+ bool local_pause, local_asym_pause;
+
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ goto no_pause;
+
+ local_pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->advertising);
+ local_asym_pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising);
+
+ if (local_pause && phydev->pause)
+ return "rx/tx";
+
+ if (local_asym_pause && phydev->asym_pause) {
+ if (local_pause)
+ return "rx";
+ if (phydev->pause)
+ return "tx";
+ }
+
+no_pause:
+ return "off";
+}
+
/**
* phy_print_status - Convenience function to print out the current phy status
* @phydev: the phy_device struct
@@ -72,7 +97,7 @@ void phy_print_status(struct phy_device *phydev)
"Link is Up - %s/%s - flow control %s\n",
phy_speed_to_str(phydev->speed),
phy_duplex_to_str(phydev->duplex),
- phydev->pause ? "rx/tx" : "off");
+ phy_pause_str(phydev));
} else {
netdev_info(phydev->attached_dev, "Link is Down\n");
}
@@ -214,10 +239,6 @@ static void phy_sanitize_settings(struct phy_device *phydev)
{
const struct phy_setting *setting;
- /* Sanitize settings based on PHY capabilities */
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported))
- phydev->autoneg = AUTONEG_DISABLE;
-
setting = phy_find_valid(phydev->speed, phydev->duplex,
phydev->supported);
if (setting) {
@@ -863,10 +884,7 @@ void phy_start(struct phy_device *phydev)
goto out;
}
- if (phydev->state == PHY_READY)
- phydev->state = PHY_UP;
- else
- phydev->state = PHY_RESUMING;
+ phydev->state = PHY_UP;
phy_start_machine(phydev);
out:
@@ -891,9 +909,6 @@ void phy_state_machine(struct work_struct *work)
old_state = phydev->state;
- if (phydev->drv && phydev->drv->link_change_notify)
- phydev->drv->link_change_notify(phydev);
-
switch (phydev->state) {
case PHY_DOWN:
case PHY_READY:
@@ -904,7 +919,6 @@ void phy_state_machine(struct work_struct *work)
break;
case PHY_NOLINK:
case PHY_RUNNING:
- case PHY_RESUMING:
err = phy_check_link_status(phydev);
break;
case PHY_FORCING:
@@ -940,10 +954,13 @@ void phy_state_machine(struct work_struct *work)
if (err < 0)
phy_error(phydev);
- if (old_state != phydev->state)
+ if (old_state != phydev->state) {
phydev_dbg(phydev, "PHY state change %s -> %s\n",
phy_state_to_str(old_state),
phy_state_to_str(phydev->state));
+ if (phydev->drv && phydev->drv->link_change_notify)
+ phydev->drv->link_change_notify(phydev);
+ }
/* Only re-schedule a PHY state machine change if we are polling the
* PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 77068c545de0..dcc93a873174 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -225,7 +225,7 @@ static void phy_mdio_device_remove(struct mdio_device *mdiodev)
}
static struct phy_driver genphy_driver;
-extern struct phy_driver genphy_10g_driver;
+extern struct phy_driver genphy_c45_driver;
static LIST_HEAD(phy_fixup_list);
static DEFINE_MUTEX(phy_fixup_lock);
@@ -1174,7 +1174,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
*/
if (!d->driver) {
if (phydev->is_c45)
- d->driver = &genphy_10g_driver.mdiodrv.driver;
+ d->driver = &genphy_c45_driver.mdiodrv.driver;
else
d->driver = &genphy_driver.mdiodrv.driver;
@@ -1335,7 +1335,7 @@ EXPORT_SYMBOL_GPL(phy_driver_is_genphy);
bool phy_driver_is_genphy_10g(struct phy_device *phydev)
{
return phy_driver_is_genphy_kind(phydev,
- &genphy_10g_driver.mdiodrv.driver);
+ &genphy_c45_driver.mdiodrv.driver);
}
EXPORT_SYMBOL_GPL(phy_driver_is_genphy_10g);
@@ -1710,23 +1710,19 @@ int genphy_update_link(struct phy_device *phydev)
*/
if (!phy_polling_mode(phydev)) {
status = phy_read(phydev, MII_BMSR);
- if (status < 0) {
+ if (status < 0)
return status;
- } else if (status & BMSR_LSTATUS) {
- phydev->link = 1;
- return 0;
- }
+ else if (status & BMSR_LSTATUS)
+ goto done;
}
/* Read link and autonegotiation status */
status = phy_read(phydev, MII_BMSR);
if (status < 0)
return status;
-
- if ((status & BMSR_LSTATUS) == 0)
- phydev->link = 0;
- else
- phydev->link = 1;
+done:
+ phydev->link = status & BMSR_LSTATUS ? 1 : 0;
+ phydev->autoneg_complete = status & BMSR_ANEGCOMPLETE ? 1 : 0;
return 0;
}
@@ -1743,23 +1739,26 @@ EXPORT_SYMBOL(genphy_update_link);
*/
int genphy_read_status(struct phy_device *phydev)
{
- int adv;
- int err;
- int lpa;
- int lpagb = 0;
+ int adv, lpa, lpagb, err, old_link = phydev->link;
/* Update the link, but return if there was an error */
err = genphy_update_link(phydev);
if (err)
return err;
+ /* why bother the PHY if nothing can have changed */
+ if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+ return 0;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
linkmode_zero(phydev->lp_advertising);
- if (AUTONEG_ENABLE == phydev->autoneg) {
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
- phydev->supported) ||
- linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- phydev->supported)) {
+ if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
+ if (phydev->is_gigabit_capable) {
lpagb = phy_read(phydev, MII_STAT1000);
if (lpagb < 0)
return lpagb;
@@ -1785,14 +1784,8 @@ int genphy_read_status(struct phy_device *phydev)
return lpa;
mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa);
-
- phydev->speed = SPEED_UNKNOWN;
- phydev->duplex = DUPLEX_UNKNOWN;
- phydev->pause = 0;
- phydev->asym_pause = 0;
-
phy_resolve_aneg_linkmode(phydev);
- } else {
+ } else if (phydev->autoneg == AUTONEG_DISABLE) {
int bmcr = phy_read(phydev, MII_BMCR);
if (bmcr < 0)
@@ -1809,9 +1802,6 @@ int genphy_read_status(struct phy_device *phydev)
phydev->speed = SPEED_100;
else
phydev->speed = SPEED_10;
-
- phydev->pause = 0;
- phydev->asym_pause = 0;
}
return 0;
@@ -1829,13 +1819,25 @@ EXPORT_SYMBOL(genphy_read_status);
*/
int genphy_soft_reset(struct phy_device *phydev)
{
+ u16 res = BMCR_RESET;
int ret;
- ret = phy_set_bits(phydev, MII_BMCR, BMCR_RESET);
+ if (phydev->autoneg == AUTONEG_ENABLE)
+ res |= BMCR_ANRESTART;
+
+ ret = phy_modify(phydev, MII_BMCR, BMCR_ISOLATE, res);
if (ret < 0)
return ret;
- return phy_poll_reset(phydev);
+ ret = phy_poll_reset(phydev);
+ if (ret)
+ return ret;
+
+ /* BMCR may be reset to defaults */
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ ret = genphy_setup_forced(phydev);
+
+ return ret;
}
EXPORT_SYMBOL(genphy_soft_reset);
@@ -1887,6 +1889,54 @@ int genphy_config_init(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_config_init);
+/**
+ * genphy_read_abilities - read PHY abilities from Clause 22 registers
+ * @phydev: target phy_device struct
+ *
+ * Description: Reads the PHY's abilities and populates
+ * phydev->supported accordingly.
+ *
+ * Returns: 0 on success, < 0 on failure
+ */
+int genphy_read_abilities(struct phy_device *phydev)
+{
+ int val;
+
+ linkmode_set_bit_array(phy_basic_ports_array,
+ ARRAY_SIZE(phy_basic_ports_array),
+ phydev->supported);
+
+ val = phy_read(phydev, MII_BMSR);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported,
+ val & BMSR_ANEGCAPABLE);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, phydev->supported,
+ val & BMSR_100FULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, phydev->supported,
+ val & BMSR_100HALF);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, phydev->supported,
+ val & BMSR_10FULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, phydev->supported,
+ val & BMSR_10HALF);
+
+ if (val & BMSR_ESTATEN) {
+ val = phy_read(phydev, MII_ESTATUS);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->supported, val & ESTATUS_1000_TFULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->supported, val & ESTATUS_1000_THALF);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(genphy_read_abilities);
+
/* This is used for the phy device which doesn't support the MMD extended
* register access, but it does have side effect when we are trying to access
* the MMD register via indirect method.
@@ -1935,10 +1985,35 @@ EXPORT_SYMBOL(genphy_loopback);
void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode)
{
linkmode_clear_bit(link_mode, phydev->supported);
- linkmode_copy(phydev->advertising, phydev->supported);
+ phy_advertise_supported(phydev);
}
EXPORT_SYMBOL(phy_remove_link_mode);
+static void phy_copy_pause_bits(unsigned long *dst, unsigned long *src)
+{
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, dst,
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, src));
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, dst,
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, src));
+}
+
+/**
+ * phy_advertise_supported - Advertise all supported modes
+ * @phydev: target phy_device struct
+ *
+ * Description: Called to advertise all supported modes, doesn't touch
+ * pause mode advertising.
+ */
+void phy_advertise_supported(struct phy_device *phydev)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(new);
+
+ linkmode_copy(new, phydev->supported);
+ phy_copy_pause_bits(new, phydev->advertising);
+ linkmode_copy(phydev->advertising, new);
+}
+EXPORT_SYMBOL(phy_advertise_supported);
+
/**
* phy_support_sym_pause - Enable support of symmetrical pause
* @phydev: target phy_device struct
@@ -1949,8 +2024,7 @@ EXPORT_SYMBOL(phy_remove_link_mode);
void phy_support_sym_pause(struct phy_device *phydev)
{
linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
- linkmode_copy(phydev->advertising, phydev->supported);
+ phy_copy_pause_bits(phydev->advertising, phydev->supported);
}
EXPORT_SYMBOL(phy_support_sym_pause);
@@ -1962,9 +2036,7 @@ EXPORT_SYMBOL(phy_support_sym_pause);
*/
void phy_support_asym_pause(struct phy_device *phydev)
{
- linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported);
- linkmode_copy(phydev->advertising, phydev->supported);
+ phy_copy_pause_bits(phydev->advertising, phydev->supported);
}
EXPORT_SYMBOL(phy_support_asym_pause);
@@ -2044,11 +2116,14 @@ bool phy_validate_pause(struct phy_device *phydev,
struct ethtool_pauseparam *pp)
{
if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
- phydev->supported) ||
- (!linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
- phydev->supported) &&
- pp->rx_pause != pp->tx_pause))
+ phydev->supported) && pp->rx_pause)
+ return false;
+
+ if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->supported) &&
+ pp->rx_pause != pp->tx_pause)
return false;
+
return true;
}
EXPORT_SYMBOL(phy_validate_pause);
@@ -2104,14 +2179,30 @@ static int phy_probe(struct device *dev)
*/
if (phydrv->features) {
linkmode_copy(phydev->supported, phydrv->features);
- } else {
+ } else if (phydrv->get_features) {
err = phydrv->get_features(phydev);
- if (err)
- goto out;
+ } else if (phydev->is_c45) {
+ err = genphy_c45_pma_read_abilities(phydev);
+ } else {
+ err = genphy_read_abilities(phydev);
}
+ if (err)
+ goto out;
+
+ if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->supported))
+ phydev->autoneg = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->supported))
+ phydev->is_gigabit_capable = 1;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->supported))
+ phydev->is_gigabit_capable = 1;
+
of_set_phy_supported(phydev);
- linkmode_copy(phydev->advertising, phydev->supported);
+ phy_advertise_supported(phydev);
/* Get the EEE modes we want to prohibit. We will ask
* the PHY stop advertising these mode later on
@@ -2177,11 +2268,11 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
int retval;
/* Either the features are hard coded, or dynamically
- * determine. It cannot be both or neither
+ * determined. It cannot be both.
*/
- if (WARN_ON((!new_driver->features && !new_driver->get_features) ||
- (new_driver->features && new_driver->get_features))) {
- pr_err("%s: Driver features are missing\n", new_driver->name);
+ if (WARN_ON(new_driver->features && new_driver->get_features)) {
+ pr_err("%s: features and get_features must not both be set\n",
+ new_driver->name);
return -EINVAL;
}
@@ -2243,8 +2334,7 @@ static struct phy_driver genphy_driver = {
.phy_id_mask = 0xffffffff,
.name = "Generic PHY",
.soft_reset = genphy_no_soft_reset,
- .config_init = genphy_config_init,
- .features = PHY_GBIT_ALL_PORTS_FEATURES,
+ .get_features = genphy_read_abilities,
.aneg_done = genphy_aneg_done,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -2261,14 +2351,14 @@ static int __init phy_init(void)
features_init();
- rc = phy_driver_register(&genphy_10g_driver, THIS_MODULE);
+ rc = phy_driver_register(&genphy_c45_driver, THIS_MODULE);
if (rc)
- goto err_10g;
+ goto err_c45;
rc = phy_driver_register(&genphy_driver, THIS_MODULE);
if (rc) {
- phy_driver_unregister(&genphy_10g_driver);
-err_10g:
+ phy_driver_unregister(&genphy_c45_driver);
+err_c45:
mdio_bus_exit();
}
@@ -2277,7 +2367,7 @@ err_10g:
static void __exit phy_exit(void)
{
- phy_driver_unregister(&genphy_10g_driver);
+ phy_driver_unregister(&genphy_c45_driver);
phy_driver_unregister(&genphy_driver);
mdio_bus_exit();
}
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index 5486f6fb2ab2..1b15a991ee06 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -110,7 +110,7 @@ static struct phy_driver qs6612_driver[] = { {
.phy_id = 0x00181440,
.name = "QS6612",
.phy_id_mask = 0xfffffff0,
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = qs6612_config_init,
.ack_interrupt = qs6612_ack_interrupt,
.config_intr = qs6612_config_intr,
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 10df52ccddfe..d6a10f323117 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -151,21 +151,14 @@ static int rtl8211_config_aneg(struct phy_device *phydev)
static int rtl8211c_config_init(struct phy_device *phydev)
{
/* RTL8211C has an issue when operating in Gigabit slave mode */
- phy_set_bits(phydev, MII_CTRL1000,
- CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
-
- return genphy_config_init(phydev);
+ return phy_set_bits(phydev, MII_CTRL1000,
+ CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
}
static int rtl8211f_config_init(struct phy_device *phydev)
{
- int ret;
u16 val = 0;
- ret = genphy_config_init(phydev);
- if (ret < 0)
- return ret;
-
/* enable TX-delay for rgmii-id and rgmii-txid, otherwise disable it */
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
@@ -192,10 +185,6 @@ static int rtl8366rb_config_init(struct phy_device *phydev)
{
int ret;
- ret = genphy_config_init(phydev);
- if (ret < 0)
- return ret;
-
ret = phy_set_bits(phydev, RTL8366RB_POWER_SAVE,
RTL8366RB_POWER_SAVE_ON);
if (ret) {
@@ -210,11 +199,9 @@ static struct phy_driver realtek_drvs[] = {
{
PHY_ID_MATCH_EXACT(0x00008201),
.name = "RTL8201CP Ethernet",
- .features = PHY_BASIC_FEATURES,
}, {
PHY_ID_MATCH_EXACT(0x001cc816),
.name = "RTL8201F Fast Ethernet",
- .features = PHY_BASIC_FEATURES,
.ack_interrupt = &rtl8201_ack_interrupt,
.config_intr = &rtl8201_config_intr,
.suspend = genphy_suspend,
@@ -224,14 +211,12 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc910),
.name = "RTL8211 Gigabit Ethernet",
- .features = PHY_GBIT_FEATURES,
.config_aneg = rtl8211_config_aneg,
.read_mmd = &genphy_read_mmd_unsupported,
.write_mmd = &genphy_write_mmd_unsupported,
}, {
PHY_ID_MATCH_EXACT(0x001cc912),
.name = "RTL8211B Gigabit Ethernet",
- .features = PHY_GBIT_FEATURES,
.ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl8211b_config_intr,
.read_mmd = &genphy_read_mmd_unsupported,
@@ -241,14 +226,12 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc913),
.name = "RTL8211C Gigabit Ethernet",
- .features = PHY_GBIT_FEATURES,
.config_init = rtl8211c_config_init,
.read_mmd = &genphy_read_mmd_unsupported,
.write_mmd = &genphy_write_mmd_unsupported,
}, {
PHY_ID_MATCH_EXACT(0x001cc914),
.name = "RTL8211DN Gigabit Ethernet",
- .features = PHY_GBIT_FEATURES,
.ack_interrupt = rtl821x_ack_interrupt,
.config_intr = rtl8211e_config_intr,
.suspend = genphy_suspend,
@@ -256,7 +239,6 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc915),
.name = "RTL8211E Gigabit Ethernet",
- .features = PHY_GBIT_FEATURES,
.ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl8211e_config_intr,
.suspend = genphy_suspend,
@@ -264,7 +246,6 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc916),
.name = "RTL8211F Gigabit Ethernet",
- .features = PHY_GBIT_FEATURES,
.config_init = &rtl8211f_config_init,
.ack_interrupt = &rtl8211f_ack_interrupt,
.config_intr = &rtl8211f_config_intr,
@@ -275,8 +256,6 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc800),
.name = "Generic Realtek PHY",
- .features = PHY_GBIT_FEATURES,
- .config_init = genphy_config_init,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_page = rtl821x_read_page,
@@ -284,7 +263,6 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc961),
.name = "RTL8366RB Gigabit Ethernet",
- .features = PHY_GBIT_FEATURES,
.config_init = &rtl8366rb_config_init,
/* These interrupts are handled by the irq controller
* embedded inside the RTL8366RB, they get unmasked when the
diff --git a/drivers/net/phy/rockchip.c b/drivers/net/phy/rockchip.c
index 95abf7072f32..52f1f65320fe 100644
--- a/drivers/net/phy/rockchip.c
+++ b/drivers/net/phy/rockchip.c
@@ -104,41 +104,14 @@ static int rockchip_integrated_phy_config_init(struct phy_device *phydev)
static void rockchip_link_change_notify(struct phy_device *phydev)
{
- int speed = SPEED_10;
-
- if (phydev->autoneg == AUTONEG_ENABLE) {
- int reg = phy_read(phydev, MII_SPECIAL_CONTROL_STATUS);
-
- if (reg < 0) {
- phydev_err(phydev, "phy_read err: %d.\n", reg);
- return;
- }
-
- if (reg & MII_SPEED_100)
- speed = SPEED_100;
- else if (reg & MII_SPEED_10)
- speed = SPEED_10;
- } else {
- int bmcr = phy_read(phydev, MII_BMCR);
-
- if (bmcr < 0) {
- phydev_err(phydev, "phy_read err: %d.\n", bmcr);
- return;
- }
-
- if (bmcr & BMCR_SPEED100)
- speed = SPEED_100;
- else
- speed = SPEED_10;
- }
-
/*
* If mode switch happens from 10BT to 100BT, all DSP/AFE
* registers are set to default values. So any AFE/DSP
* registers have to be re-initialized in this case.
*/
- if ((phydev->speed == SPEED_10) && (speed == SPEED_100)) {
+ if (phydev->state == PHY_RUNNING && phydev->speed == SPEED_100) {
int ret = rockchip_integrated_phy_analog_init(phydev);
+
if (ret)
phydev_err(phydev, "rockchip_integrated_phy_analog_init err: %d.\n",
ret);
@@ -202,7 +175,7 @@ static struct phy_driver rockchip_phy_driver[] = {
.phy_id = INTERNAL_EPHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "Rockchip integrated EPHY",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.flags = 0,
.link_change_notify = rockchip_link_change_notify,
.soft_reset = genphy_soft_reset,
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index c94d3bfbc772..dc3d92d340c4 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -214,7 +214,7 @@ static struct phy_driver smsc_phy_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN83C185",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.probe = smsc_phy_probe,
@@ -233,7 +233,7 @@ static struct phy_driver smsc_phy_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8187",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.probe = smsc_phy_probe,
@@ -257,7 +257,7 @@ static struct phy_driver smsc_phy_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8700",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.probe = smsc_phy_probe,
@@ -282,7 +282,7 @@ static struct phy_driver smsc_phy_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN911x Internal PHY",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.probe = smsc_phy_probe,
@@ -300,7 +300,7 @@ static struct phy_driver smsc_phy_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8710/LAN8720",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.flags = PHY_RST_AFTER_CLK_EN,
.probe = smsc_phy_probe,
@@ -326,7 +326,7 @@ static struct phy_driver smsc_phy_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8740",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.probe = smsc_phy_probe,
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 5b6acf431f98..d735a01380ed 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -81,7 +81,7 @@ static struct phy_driver ste10xp_pdriver[] = {
.phy_id = STE101P_PHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "STe101p",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = ste10Xp_config_init,
.ack_interrupt = ste10Xp_ack_interrupt,
.config_intr = ste10Xp_config_intr,
@@ -91,7 +91,7 @@ static struct phy_driver ste10xp_pdriver[] = {
.phy_id = STE100P_PHY_ID,
.phy_id_mask = 0xffffffff,
.name = "STe100p",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.config_init = ste10Xp_config_init,
.ack_interrupt = ste10Xp_ack_interrupt,
.config_intr = ste10Xp_config_intr,
diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c
index 219fc7cdc2b3..a32b3fd8a370 100644
--- a/drivers/net/phy/uPD60620.c
+++ b/drivers/net/phy/uPD60620.c
@@ -87,7 +87,7 @@ static struct phy_driver upd60620_driver[1] = { {
.phy_id = UPD60620_PHY_ID,
.phy_id_mask = 0xfffffffe,
.name = "Renesas uPD60620",
- .features = PHY_BASIC_FEATURES,
+ /* PHY_BASIC_FEATURES */
.flags = 0,
.config_init = upd60620_config_init,
.read_status = upd60620_read_status,
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index dc0dd87a6694..43691b1acfd9 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -61,7 +61,6 @@
#define PHY_ID_VSC8234 0x000fc620
#define PHY_ID_VSC8244 0x000fc6c0
-#define PHY_ID_VSC8514 0x00070670
#define PHY_ID_VSC8572 0x000704d0
#define PHY_ID_VSC8601 0x00070420
#define PHY_ID_VSC7385 0x00070450
@@ -293,7 +292,6 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
err = phy_write(phydev, MII_VSC8244_IMASK,
(phydev->drv->phy_id == PHY_ID_VSC8234 ||
phydev->drv->phy_id == PHY_ID_VSC8244 ||
- phydev->drv->phy_id == PHY_ID_VSC8514 ||
phydev->drv->phy_id == PHY_ID_VSC8572 ||
phydev->drv->phy_id == PHY_ID_VSC8601) ?
MII_VSC8244_IMASK_MASK :
@@ -389,7 +387,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id = PHY_ID_VSC8234,
.name = "Vitesse VSC8234",
.phy_id_mask = 0x000ffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -398,16 +396,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id = PHY_ID_VSC8244,
.name = "Vitesse VSC8244",
.phy_id_mask = 0x000fffc0,
- .features = PHY_GBIT_FEATURES,
- .config_init = &vsc824x_config_init,
- .config_aneg = &vsc82x4_config_aneg,
- .ack_interrupt = &vsc824x_ack_interrupt,
- .config_intr = &vsc82xx_config_intr,
-}, {
- .phy_id = PHY_ID_VSC8514,
- .name = "Vitesse VSC8514",
- .phy_id_mask = 0x000ffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -416,7 +405,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id = PHY_ID_VSC8572,
.name = "Vitesse VSC8572",
.phy_id_mask = 0x000ffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -425,7 +414,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id = PHY_ID_VSC8601,
.name = "Vitesse VSC8601",
.phy_id_mask = 0x000ffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = &vsc8601_config_init,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
@@ -433,7 +422,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id = PHY_ID_VSC7385,
.name = "Vitesse VSC7385",
.phy_id_mask = 0x000ffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = vsc738x_config_init,
.config_aneg = vsc73xx_config_aneg,
.read_page = vsc73xx_read_page,
@@ -442,7 +431,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id = PHY_ID_VSC7388,
.name = "Vitesse VSC7388",
.phy_id_mask = 0x000ffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = vsc738x_config_init,
.config_aneg = vsc73xx_config_aneg,
.read_page = vsc73xx_read_page,
@@ -451,7 +440,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id = PHY_ID_VSC7395,
.name = "Vitesse VSC7395",
.phy_id_mask = 0x000ffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = vsc739x_config_init,
.config_aneg = vsc73xx_config_aneg,
.read_page = vsc73xx_read_page,
@@ -460,7 +449,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id = PHY_ID_VSC7398,
.name = "Vitesse VSC7398",
.phy_id_mask = 0x000ffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = vsc739x_config_init,
.config_aneg = vsc73xx_config_aneg,
.read_page = vsc73xx_read_page,
@@ -469,7 +458,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id = PHY_ID_VSC8662,
.name = "Vitesse VSC8662",
.phy_id_mask = 0x000ffff0,
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -479,7 +468,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id = PHY_ID_VSC8221,
.phy_id_mask = 0x000ffff0,
.name = "Vitesse VSC8221",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = &vsc8221_config_init,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
@@ -488,7 +477,7 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id = PHY_ID_VSC8211,
.phy_id_mask = 0x000ffff0,
.name = "Vitesse VSC8211",
- .features = PHY_GBIT_FEATURES,
+ /* PHY_GBIT_FEATURES */
.config_init = &vsc8221_config_init,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
@@ -499,7 +488,6 @@ module_phy_driver(vsc82xx_driver);
static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
{ PHY_ID_VSC8234, 0x000ffff0 },
{ PHY_ID_VSC8244, 0x000fffc0 },
- { PHY_ID_VSC8514, 0x000ffff0 },
{ PHY_ID_VSC8572, 0x000ffff0 },
{ PHY_ID_VSC7385, 0x000ffff0 },
{ PHY_ID_VSC7388, 0x000ffff0 },
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index 941cfa8f1c2a..627b3a4405ad 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -316,7 +316,7 @@ static int
card_send_command(const int ioaddr[], const char* name,
const unsigned char out[], unsigned char in[])
{
- int status, x;
+ int status;
if ((status = card_wait_for_busy_clear(ioaddr, name)))
return status;
@@ -345,9 +345,7 @@ card_send_command(const int ioaddr[], const char* name,
out[0], out[1], out[2], out[3], out[4], out[5]);
}
- if (out[1] == 0x1b) {
- x = (out[2] == 0x02);
- } else {
+ if (out[1] != 0x1b) {
if (out[0] >= 0x80 && in[0] != (out[1] | 0x80))
return -EIO;
}
@@ -490,14 +488,13 @@ sb1000_check_CRC(const int ioaddr[], const char* name)
static const unsigned char Command0[6] = {0x80, 0x1f, 0x00, 0x00, 0x00, 0x00};
unsigned char st[7];
- int crc, status;
+ int status;
/* check CRC */
if ((status = card_send_command(ioaddr, name, Command0, st)))
return status;
if (st[1] != st[3] || st[2] != st[4])
return -EIO;
- crc = st[1] << 8 | st[2];
return 0;
}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 16963f7a88f7..2106045b3e16 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -38,13 +38,11 @@
* Helpers
**********/
-#define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
-
static struct team_port *team_port_get_rtnl(const struct net_device *dev)
{
struct team_port *port = rtnl_dereference(dev->rx_handler_data);
- return team_port_exists(dev) ? port : NULL;
+ return netif_is_team_port(dev) ? port : NULL;
}
/*
@@ -1143,7 +1141,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
return -EINVAL;
}
- if (team_port_exists(port_dev)) {
+ if (netif_is_team_port(port_dev)) {
NL_SET_ERR_MSG(extack, "Device is already a port of a team device");
netdev_err(dev, "Device %s is already a port "
"of a team device\n", portname);
@@ -1724,8 +1722,7 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
}
static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
/*
* This helper function exists to help dev_pick_tx get the correct
@@ -2293,7 +2290,7 @@ static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
if (err)
return err;
- option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
+ option_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_OPTION);
if (!option_item)
return -EMSGSIZE;
@@ -2407,7 +2404,7 @@ start_again:
if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
goto nla_put_failure;
- option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
+ option_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_OPTION);
if (!option_list)
goto nla_put_failure;
@@ -2513,9 +2510,11 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
err = -EINVAL;
goto team_put;
}
- err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
- nl_option, team_nl_option_policy,
- info->extack);
+ err = nla_parse_nested_deprecated(opt_attrs,
+ TEAM_ATTR_OPTION_MAX,
+ nl_option,
+ team_nl_option_policy,
+ info->extack);
if (err)
goto team_put;
if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
@@ -2629,7 +2628,7 @@ static int team_nl_fill_one_port_get(struct sk_buff *skb,
{
struct nlattr *port_item;
- port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
+ port_item = nla_nest_start_noflag(skb, TEAM_ATTR_ITEM_PORT);
if (!port_item)
goto nest_cancel;
if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
@@ -2684,7 +2683,7 @@ start_again:
if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
goto nla_put_failure;
- port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
+ port_list = nla_nest_start_noflag(skb, TEAM_ATTR_LIST_PORT);
if (!port_list)
goto nla_put_failure;
@@ -2758,25 +2757,25 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb,
static const struct genl_ops team_nl_ops[] = {
{
.cmd = TEAM_CMD_NOOP,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = team_nl_cmd_noop,
- .policy = team_nl_policy,
},
{
.cmd = TEAM_CMD_OPTIONS_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = team_nl_cmd_options_set,
- .policy = team_nl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = TEAM_CMD_OPTIONS_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = team_nl_cmd_options_get,
- .policy = team_nl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = TEAM_CMD_PORT_LIST_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = team_nl_cmd_port_list_get,
- .policy = team_nl_policy,
.flags = GENL_ADMIN_PERM,
},
};
@@ -2789,6 +2788,7 @@ static struct genl_family team_nl_family __ro_after_init = {
.name = TEAM_GENL_NAME,
.version = TEAM_GENL_VERSION,
.maxattr = TEAM_ATTR_MAX,
+ .policy = team_nl_policy,
.netnsok = true,
.module = THIS_MODULE,
.ops = team_nl_ops,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e9ca1c088d0b..9d72f8c76c15 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -606,8 +606,7 @@ static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
}
static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct tun_struct *tun = netdev_priv(dev);
u16 ret;
@@ -1043,7 +1042,7 @@ static int tun_net_close(struct net_device *dev)
static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
{
#ifdef CONFIG_RPS
- if (tun->numqueues == 1 && static_key_false(&rps_needed)) {
+ if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
/* Select queue was not called for the skbuff, so we extract the
* RPS hash and save it into the flow_table here.
*/
@@ -1966,7 +1965,8 @@ drop:
if (frags) {
/* Exercise flow dissector code path. */
- u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
+ u32 headlen = eth_get_headlen(tun->dev, skb->data,
+ skb_headlen(skb));
if (unlikely(headlen > skb_headlen(skb))) {
this_cpu_inc(tun->pcpu_stats->rx_dropped);
@@ -2873,8 +2873,7 @@ err_free_dev:
return err;
}
-static void tun_get_iff(struct net *net, struct tun_struct *tun,
- struct ifreq *ifr)
+static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
{
tun_debug(KERN_INFO, tun, "tun_get_iff\n");
@@ -3103,10 +3102,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
+ net = dev_net(tun->dev);
ret = 0;
switch (cmd) {
case TUNGETIFF:
- tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
+ tun_get_iff(tun, &ifr);
if (tfile->detached)
ifr.ifr_flags |= IFF_DETACH_QUEUE;
@@ -3328,6 +3328,13 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
ret = tun_net_change_carrier(tun->dev, (bool)carrier);
break;
+ case TUNGETDEVNETNS:
+ ret = -EPERM;
+ if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+ goto unlock;
+ ret = open_related_ns(&net->ns, get_net_ns);
+ break;
+
default:
ret = -EINVAL;
break;
@@ -3457,7 +3464,7 @@ static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
rtnl_lock();
tun = tun_get(tfile);
if (tun)
- tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
+ tun_get_iff(tun, &ifr);
rtnl_unlock();
if (tun)
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 0362acd5cdca..28321aca48fe 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -23,6 +23,7 @@
#include <linux/usb/cdc_ncm.h>
#include <net/ipv6.h>
#include <net/addrconf.h>
+#include <net/ipv6_stubs.h>
/* alternative VLAN for IP session 0 if not untagged */
#define MBIM_IPS0_VID 4094
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 3d8a70d3ea9b..c247aed2dceb 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -54,17 +54,6 @@
#include <linux/workqueue.h>
#define USB_VENDOR_APPLE 0x05ac
-#define USB_PRODUCT_IPHONE 0x1290
-#define USB_PRODUCT_IPHONE_3G 0x1292
-#define USB_PRODUCT_IPHONE_3GS 0x1294
-#define USB_PRODUCT_IPHONE_4 0x1297
-#define USB_PRODUCT_IPAD 0x129a
-#define USB_PRODUCT_IPAD_2 0x12a2
-#define USB_PRODUCT_IPAD_3 0x12a6
-#define USB_PRODUCT_IPAD_MINI 0x12ab
-#define USB_PRODUCT_IPHONE_4_VZW 0x129c
-#define USB_PRODUCT_IPHONE_4S 0x12a0
-#define USB_PRODUCT_IPHONE_5 0x12a8
#define IPHETH_USBINTF_CLASS 255
#define IPHETH_USBINTF_SUBCLASS 253
@@ -88,50 +77,9 @@
#define IPHETH_CARRIER_ON 0x04
static const struct usb_device_id ipheth_table[] = {
- { USB_DEVICE_AND_INTERFACE_INFO(
- USB_VENDOR_APPLE, USB_PRODUCT_IPHONE,
- IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
- IPHETH_USBINTF_PROTO) },
- { USB_DEVICE_AND_INTERFACE_INFO(
- USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3G,
- IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
- IPHETH_USBINTF_PROTO) },
- { USB_DEVICE_AND_INTERFACE_INFO(
- USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS,
- IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
- IPHETH_USBINTF_PROTO) },
- { USB_DEVICE_AND_INTERFACE_INFO(
- USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4,
- IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
- IPHETH_USBINTF_PROTO) },
- { USB_DEVICE_AND_INTERFACE_INFO(
- USB_VENDOR_APPLE, USB_PRODUCT_IPAD,
- IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
- IPHETH_USBINTF_PROTO) },
- { USB_DEVICE_AND_INTERFACE_INFO(
- USB_VENDOR_APPLE, USB_PRODUCT_IPAD_2,
- IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
- IPHETH_USBINTF_PROTO) },
- { USB_DEVICE_AND_INTERFACE_INFO(
- USB_VENDOR_APPLE, USB_PRODUCT_IPAD_3,
- IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
- IPHETH_USBINTF_PROTO) },
- { USB_DEVICE_AND_INTERFACE_INFO(
- USB_VENDOR_APPLE, USB_PRODUCT_IPAD_MINI,
- IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
- IPHETH_USBINTF_PROTO) },
- { USB_DEVICE_AND_INTERFACE_INFO(
- USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
- IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
- IPHETH_USBINTF_PROTO) },
- { USB_DEVICE_AND_INTERFACE_INFO(
- USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4S,
- IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
- IPHETH_USBINTF_PROTO) },
- { USB_DEVICE_AND_INTERFACE_INFO(
- USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_5,
- IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
- IPHETH_USBINTF_PROTO) },
+ { USB_VENDOR_AND_INTERFACE_INFO(USB_VENDOR_APPLE, IPHETH_USBINTF_CLASS,
+ IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
{ }
};
MODULE_DEVICE_TABLE(usb, ipheth_table);
@@ -293,8 +241,6 @@ static int ipheth_carrier_set(struct ipheth_device *dev)
struct usb_device *udev;
int retval;
- if (!dev)
- return 0;
if (!dev->confirmed_pairing)
return 0;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 679e404a5224..5c3ac97519b7 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -63,6 +63,7 @@ enum qmi_wwan_flags {
enum qmi_wwan_quirks {
QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */
+ QMI_WWAN_QUIRK_QUECTEL_DYNCFG = 1 << 1, /* check num. endpoints */
};
struct qmimux_hdr {
@@ -845,6 +846,16 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = {
.data = QMI_WWAN_QUIRK_DTR,
};
+static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = {
+ .description = "WWAN/QMI device",
+ .flags = FLAG_WWAN | FLAG_SEND_ZLP,
+ .bind = qmi_wwan_bind,
+ .unbind = qmi_wwan_unbind,
+ .manage_power = qmi_wwan_manage_power,
+ .rx_fixup = qmi_wwan_rx_fixup,
+ .data = QMI_WWAN_QUIRK_DTR | QMI_WWAN_QUIRK_QUECTEL_DYNCFG,
+};
+
#define HUAWEI_VENDOR_ID 0x12D1
/* map QMI/wwan function by a fixed interface number */
@@ -865,6 +876,15 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = {
#define QMI_GOBI_DEVICE(vend, prod) \
QMI_FIXED_INTF(vend, prod, 0)
+/* Quectel does not use fixed interface numbers on at least some of their
+ * devices. We need to check the number of endpoints to ensure that we bind to
+ * the correct interface.
+ */
+#define QMI_QUIRK_QUECTEL_DYNCFG(vend, prod) \
+ USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_VENDOR_SPEC, \
+ USB_SUBCLASS_VENDOR_SPEC, 0xff), \
+ .driver_info = (unsigned long)&qmi_wwan_info_quirk_quectel_dyncfg
+
static const struct usb_device_id products[] = {
/* 1. CDC ECM like devices match on the control interface */
{ /* Huawei E392, E398 and possibly others sharing both device id and more... */
@@ -969,20 +989,9 @@ static const struct usb_device_id products[] = {
USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
.driver_info = (unsigned long)&qmi_wwan_info,
},
- { /* Quectel EP06/EG06/EM06 */
- USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0306,
- USB_CLASS_VENDOR_SPEC,
- USB_SUBCLASS_VENDOR_SPEC,
- 0xff),
- .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
- },
- { /* Quectel EG12/EM12 */
- USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0512,
- USB_CLASS_VENDOR_SPEC,
- USB_SUBCLASS_VENDOR_SPEC,
- 0xff),
- .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
- },
+ {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
+ {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
+ {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
@@ -1281,7 +1290,6 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
{QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
- {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
@@ -1361,27 +1369,12 @@ static bool quectel_ec20_detected(struct usb_interface *intf)
return false;
}
-static bool quectel_diag_detected(struct usb_interface *intf)
-{
- struct usb_device *dev = interface_to_usbdev(intf);
- struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
- u16 id_vendor = le16_to_cpu(dev->descriptor.idVendor);
- u16 id_product = le16_to_cpu(dev->descriptor.idProduct);
-
- if (id_vendor != 0x2c7c || intf_desc.bNumEndpoints != 2)
- return false;
-
- if (id_product == 0x0306 || id_product == 0x0512)
- return true;
- else
- return false;
-}
-
static int qmi_wwan_probe(struct usb_interface *intf,
const struct usb_device_id *prod)
{
struct usb_device_id *id = (struct usb_device_id *)prod;
struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
+ const struct driver_info *info;
/* Workaround to enable dynamic IDs. This disables usbnet
* blacklisting functionality. Which, if required, can be
@@ -1415,10 +1408,14 @@ static int qmi_wwan_probe(struct usb_interface *intf,
* we need to match on class/subclass/protocol. These values are
* identical for the diagnostic- and QMI-interface, but bNumEndpoints is
* different. Ignore the current interface if the number of endpoints
- * the number for the diag interface (two).
+ * equals the number for the diag interface (two).
*/
- if (quectel_diag_detected(intf))
- return -ENODEV;
+ info = (void *)&id->driver_info;
+
+ if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) {
+ if (desc->bNumEndpoints == 2)
+ return -ENODEV;
+ }
return usbnet_probe(intf, id);
}
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 86c8c64fbb0f..b01bfa63860d 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1212,7 +1212,6 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
goto amacout;
}
memcpy(sa->sa_data, buf, 6);
- ether_addr_copy(tp->netdev->dev_addr, sa->sa_data);
netif_info(tp, probe, tp->netdev,
"Using pass-thru MAC addr %pM\n", sa->sa_data);
@@ -1221,43 +1220,57 @@ amacout:
return ret;
}
-static int set_ethernet_addr(struct r8152 *tp)
+static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr *sa)
{
struct net_device *dev = tp->netdev;
- struct sockaddr sa;
int ret;
+ sa->sa_family = dev->type;
+
if (tp->version == RTL_VER_01) {
- ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data);
+ ret = pla_ocp_read(tp, PLA_IDR, 8, sa->sa_data);
} else {
/* if device doesn't support MAC pass through this will
* be expected to be non-zero
*/
- ret = vendor_mac_passthru_addr_read(tp, &sa);
+ ret = vendor_mac_passthru_addr_read(tp, sa);
if (ret < 0)
- ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data);
+ ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa->sa_data);
}
if (ret < 0) {
netif_err(tp, probe, dev, "Get ether addr fail\n");
- } else if (!is_valid_ether_addr(sa.sa_data)) {
+ } else if (!is_valid_ether_addr(sa->sa_data)) {
netif_err(tp, probe, dev, "Invalid ether addr %pM\n",
- sa.sa_data);
+ sa->sa_data);
eth_hw_addr_random(dev);
- ether_addr_copy(sa.sa_data, dev->dev_addr);
- ret = rtl8152_set_mac_address(dev, &sa);
+ ether_addr_copy(sa->sa_data, dev->dev_addr);
netif_info(tp, probe, dev, "Random ether addr %pM\n",
- sa.sa_data);
- } else {
- if (tp->version == RTL_VER_01)
- ether_addr_copy(dev->dev_addr, sa.sa_data);
- else
- ret = rtl8152_set_mac_address(dev, &sa);
+ sa->sa_data);
+ return 0;
}
return ret;
}
+static int set_ethernet_addr(struct r8152 *tp)
+{
+ struct net_device *dev = tp->netdev;
+ struct sockaddr sa;
+ int ret;
+
+ ret = determine_ethernet_addr(tp, &sa);
+ if (ret < 0)
+ return ret;
+
+ if (tp->version == RTL_VER_01)
+ ether_addr_copy(dev->dev_addr, sa.sa_data);
+ else
+ ret = rtl8152_set_mac_address(dev, &sa);
+
+ return ret;
+}
+
static void read_bulk_callback(struct urb *urb)
{
struct net_device *netdev;
@@ -4264,10 +4277,18 @@ static int rtl8152_post_reset(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
struct net_device *netdev;
+ struct sockaddr sa;
if (!tp)
return 0;
+ /* reset the MAC adddress in case of policy change */
+ if (determine_ethernet_addr(tp, &sa) >= 0) {
+ rtnl_lock();
+ dev_set_mac_address (tp->netdev, &sa, NULL);
+ rtnl_unlock();
+ }
+
netdev = tp->netdev;
if (!netif_running(netdev))
return 0;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index ec287c9741e8..e4c2f3afce60 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -774,8 +774,8 @@ static void smsc75xx_init_mac_address(struct usbnet *dev)
/* maybe the boot loader passed the MAC address in devicetree */
mac_addr = of_get_mac_address(dev->udev->dev.of_node);
- if (mac_addr) {
- memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
+ if (!IS_ERR(mac_addr)) {
+ ether_addr_copy(dev->net->dev_addr, mac_addr);
return;
}
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index e3d08626828e..a0e119907c84 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -917,8 +917,8 @@ static void smsc95xx_init_mac_address(struct usbnet *dev)
/* maybe the boot loader passed the MAC address in devicetree */
mac_addr = of_get_mac_address(dev->udev->dev.of_node);
- if (mac_addr) {
- memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
+ if (!IS_ERR(mac_addr)) {
+ ether_addr_copy(dev->net->dev_addr, mac_addr);
return;
}
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 569e87a51a33..09a1433b0833 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -162,18 +162,6 @@ static void veth_get_ethtool_stats(struct net_device *dev,
}
}
-static int veth_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
-{
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
- info->phc_index = -1;
-
- return 0;
-}
-
static const struct ethtool_ops veth_ethtool_ops = {
.get_drvinfo = veth_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -181,7 +169,7 @@ static const struct ethtool_ops veth_ethtool_ops = {
.get_sset_count = veth_get_sset_count,
.get_ethtool_stats = veth_get_ethtool_stats,
.get_link_ksettings = veth_get_link_ksettings,
- .get_ts_info = veth_get_ts_info,
+ .get_ts_info = ethtool_op_get_ts_info,
};
/* general routines */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7eb38ea9ba56..559c48e66afc 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -31,7 +31,6 @@
#include <linux/average.h>
#include <linux/filter.h>
#include <linux/kernel.h>
-#include <linux/pci.h>
#include <net/route.h>
#include <net/xdp.h>
#include <net/net_failover.h>
@@ -1568,7 +1567,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
struct send_queue *sq = &vi->sq[qnum];
int err;
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
- bool kick = !skb->xmit_more;
+ bool kick = !netdev_xmit_more();
bool use_napi = sq->napi.weight;
/* Free up any pending old buffers before queueing new ones. */
@@ -1588,7 +1587,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_fifo_errors++;
if (net_ratelimit())
dev_warn(&dev->dev,
- "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
+ "Unexpected TXQ (%d) queue failure: %d\n",
+ qnum, err);
dev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -1925,7 +1925,7 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
return 0;
}
-static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
+static void virtnet_clean_affinity(struct virtnet_info *vi)
{
int i;
@@ -1949,7 +1949,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
int stride;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
- virtnet_clean_affinity(vi, -1);
+ virtnet_clean_affinity(vi);
return;
}
@@ -1999,7 +1999,7 @@ static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
node);
- virtnet_clean_affinity(vi, cpu);
+ virtnet_clean_affinity(vi);
return 0;
}
@@ -2384,7 +2384,7 @@ static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
- dev_warn(&vi->dev->dev, "Fail to set guest offload. \n");
+ dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
return -EINVAL;
}
@@ -2735,7 +2735,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
{
struct virtio_device *vdev = vi->vdev;
- virtnet_clean_affinity(vi, -1);
+ virtnet_clean_affinity(vi);
vdev->config->del_vqs(vdev);
@@ -3115,8 +3115,9 @@ static int virtnet_probe(struct virtio_device *vdev)
/* Should never trigger: MTU was previously validated
* in virtnet_validate.
*/
- dev_err(&vdev->dev, "device MTU appears to have changed "
- "it is now %d < %d", mtu, dev->min_mtu);
+ dev_err(&vdev->dev,
+ "device MTU appears to have changed it is now %d < %d",
+ mtu, dev->min_mtu);
goto free;
}
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 9ee4d7402ca2..cf7e6a92e73c 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -370,7 +370,7 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
if (!IS_ERR(neigh)) {
sock_confirm_neigh(skb, neigh);
- ret = neigh_output(neigh, skb);
+ ret = neigh_output(neigh, skb, false);
rcu_read_unlock_bh();
return ret;
}
@@ -549,7 +549,7 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
struct net_device *dev = dst->dev;
unsigned int hh_len = LL_RESERVED_SPACE(dev);
struct neighbour *neigh;
- u32 nexthop;
+ bool is_v6gw = false;
int ret = -EINVAL;
nf_reset(skb);
@@ -572,13 +572,11 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
rcu_read_lock_bh();
- nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
- neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
- if (unlikely(!neigh))
- neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
+ neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
if (!IS_ERR(neigh)) {
sock_confirm_neigh(skb, neigh);
- ret = neigh_output(neigh, skb);
+ /* if crossing protocols, can not use the cached header */
+ ret = neigh_output(neigh, skb, is_v6gw);
rcu_read_unlock_bh();
return ret;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index d76dfed8d9bb..5994d5415a03 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -20,6 +20,7 @@
#include <linux/ethtool.h>
#include <net/arp.h>
#include <net/ndisc.h>
+#include <net/ipv6_stubs.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/rtnetlink.h>
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index ef298d8525c5..4fe7c7e132c4 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -352,6 +352,7 @@ void i2400m_report_tlv_system_state(struct i2400m *i2400m,
case I2400M_SS_IDLE:
d_printf(1, dev, "entering BS-negotiated idle mode\n");
+ /* Fall through */
case I2400M_SS_DISCONNECTING:
case I2400M_SS_DATA_PATH_CONNECTED:
wimax_state_change(wimax_dev, WIMAX_ST_CONNECTED);
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index f20886ade1cc..ebd64e083726 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -640,8 +640,7 @@ void i2400m_tx_close(struct i2400m *i2400m)
* figure out where the next TX message starts (and where the
* offset to the moved header is).
*/
- hdr_size = sizeof(*tx_msg)
- + le16_to_cpu(tx_msg->num_pls) * sizeof(tx_msg->pld[0]);
+ hdr_size = struct_size(tx_msg, pld, le16_to_cpu(tx_msg->num_pls));
hdr_size = ALIGN(hdr_size, I2400M_PL_ALIGN);
tx_msg->offset = I2400M_TX_PLD_SIZE - hdr_size;
tx_msg_moved = (void *) tx_msg + tx_msg->offset;
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index 6433ff10d80e..a29cfb9c72c2 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -416,8 +416,8 @@ int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct nlattr *tb[ATH10K_TM_ATTR_MAX + 1];
int ret;
- ret = nla_parse(tb, ATH10K_TM_ATTR_MAX, data, len, ath10k_tm_policy,
- NULL);
+ ret = nla_parse_deprecated(tb, ATH10K_TM_ATTR_MAX, data, len,
+ ath10k_tm_policy, NULL);
if (ret)
return ret;
diff --git a/drivers/net/wireless/ath/ath6kl/testmode.c b/drivers/net/wireless/ath/ath6kl/testmode.c
index d8dcacda9add..f3906dbe5495 100644
--- a/drivers/net/wireless/ath/ath6kl/testmode.c
+++ b/drivers/net/wireless/ath/ath6kl/testmode.c
@@ -74,8 +74,8 @@ int ath6kl_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
int err, buf_len;
void *buf;
- err = nla_parse(tb, ATH6KL_TM_ATTR_MAX, data, len, ath6kl_tm_policy,
- NULL);
+ err = nla_parse_deprecated(tb, ATH6KL_TM_ATTR_MAX, data, len,
+ ath6kl_tm_policy, NULL);
if (err)
return err;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 98141b699c88..a04d8616fe09 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -642,7 +642,7 @@ static int ath9k_of_init(struct ath_softc *sc)
}
mac = of_get_mac_address(np);
- if (mac)
+ if (!IS_ERR(mac))
ether_addr_copy(common->macaddr, mac);
return 0;
diff --git a/drivers/net/wireless/ath/wcn36xx/testmode.c b/drivers/net/wireless/ath/wcn36xx/testmode.c
index 51a038022c8b..7ae14b4d2d0e 100644
--- a/drivers/net/wireless/ath/wcn36xx/testmode.c
+++ b/drivers/net/wireless/ath/wcn36xx/testmode.c
@@ -132,8 +132,8 @@ int wcn36xx_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
unsigned short attr;
wcn36xx_dbg_dump(WCN36XX_DBG_TESTMODE_DUMP, "Data:", data, len);
- ret = nla_parse(tb, WCN36XX_TM_ATTR_MAX, data, len,
- wcn36xx_tm_policy, NULL);
+ ret = nla_parse_deprecated(tb, WCN36XX_TM_ATTR_MAX, data, len,
+ wcn36xx_tm_policy, NULL);
if (ret)
return ret;
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index a1e226652b4a..804955d24b30 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -465,7 +465,7 @@ static int wil_cfg80211_validate_add_iface(struct wil6210_priv *wil,
.num_different_channels = 1,
};
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
if (wil->vifs[i]) {
wdev = vif_to_wdev(wil->vifs[i]);
params.iftype_num[wdev->iftype]++;
@@ -486,7 +486,7 @@ static int wil_cfg80211_validate_change_iface(struct wil6210_priv *wil,
};
bool check_combos = false;
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
struct wil6210_vif *vif_pos = wil->vifs[i];
if (vif_pos && vif != vif_pos) {
@@ -1274,7 +1274,12 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
params->wait);
out:
+ /* when the sent packet was not acked by receiver(ACK=0), rc will
+ * be -EAGAIN. In this case this function needs to return success,
+ * the ACK=0 will be reflected in tx_status.
+ */
tx_status = (rc == 0);
+ rc = (rc == -EAGAIN) ? 0 : rc;
cfg80211_mgmt_tx_status(wdev, cookie ? *cookie : 0, buf, len,
tx_status, GFP_KERNEL);
@@ -1806,7 +1811,7 @@ void wil_cfg80211_ap_recovery(struct wil6210_priv *wil)
int rc, i;
struct wiphy *wiphy = wil_to_wiphy(wil);
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
struct wil6210_vif *vif = wil->vifs[i];
struct net_device *ndev;
struct cfg80211_beacon_data bcon = {};
@@ -2620,8 +2625,8 @@ static int wil_rf_sector_get_cfg(struct wiphy *wiphy,
if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
return -EOPNOTSUPP;
- rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
- wil_rf_sector_policy, NULL);
+ rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data,
+ data_len, wil_rf_sector_policy, NULL);
if (rc) {
wil_err(wil, "Invalid rf sector ATTR\n");
return rc;
@@ -2679,13 +2684,13 @@ static int wil_rf_sector_get_cfg(struct wiphy *wiphy,
QCA_ATTR_PAD))
goto nla_put_failure;
- nl_cfgs = nla_nest_start(msg, QCA_ATTR_DMG_RF_SECTOR_CFG);
+ nl_cfgs = nla_nest_start_noflag(msg, QCA_ATTR_DMG_RF_SECTOR_CFG);
if (!nl_cfgs)
goto nla_put_failure;
for (i = 0; i < WMI_MAX_RF_MODULES_NUM; i++) {
if (!(rf_modules_vec & BIT(i)))
continue;
- nl_cfg = nla_nest_start(msg, i);
+ nl_cfg = nla_nest_start_noflag(msg, i);
if (!nl_cfg)
goto nla_put_failure;
si = &reply.evt.sectors_info[i];
@@ -2740,8 +2745,8 @@ static int wil_rf_sector_set_cfg(struct wiphy *wiphy,
if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
return -EOPNOTSUPP;
- rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
- wil_rf_sector_policy, NULL);
+ rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data,
+ data_len, wil_rf_sector_policy, NULL);
if (rc) {
wil_err(wil, "Invalid rf sector ATTR\n");
return rc;
@@ -2773,9 +2778,11 @@ static int wil_rf_sector_set_cfg(struct wiphy *wiphy,
cmd.sector_type = sector_type;
nla_for_each_nested(nl_cfg, tb[QCA_ATTR_DMG_RF_SECTOR_CFG],
tmp) {
- rc = nla_parse_nested(tb2, QCA_ATTR_DMG_RF_SECTOR_CFG_MAX,
- nl_cfg, wil_rf_sector_cfg_policy,
- NULL);
+ rc = nla_parse_nested_deprecated(tb2,
+ QCA_ATTR_DMG_RF_SECTOR_CFG_MAX,
+ nl_cfg,
+ wil_rf_sector_cfg_policy,
+ NULL);
if (rc) {
wil_err(wil, "invalid sector cfg\n");
return -EINVAL;
@@ -2847,8 +2854,8 @@ static int wil_rf_sector_get_selected(struct wiphy *wiphy,
if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
return -EOPNOTSUPP;
- rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
- wil_rf_sector_policy, NULL);
+ rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data,
+ data_len, wil_rf_sector_policy, NULL);
if (rc) {
wil_err(wil, "Invalid rf sector ATTR\n");
return rc;
@@ -2955,8 +2962,8 @@ static int wil_rf_sector_set_selected(struct wiphy *wiphy,
if (!test_bit(WMI_FW_CAPABILITY_RF_SECTORS, wil->fw_capabilities))
return -EOPNOTSUPP;
- rc = nla_parse(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data, data_len,
- wil_rf_sector_policy, NULL);
+ rc = nla_parse_deprecated(tb, QCA_ATTR_DMG_RF_SECTOR_MAX, data,
+ data_len, wil_rf_sector_policy, NULL);
if (rc) {
wil_err(wil, "Invalid rf sector ATTR\n");
return rc;
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 7ad4e5328439..df2adff6c33a 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -207,6 +207,8 @@ static void wil_print_sring(struct seq_file *s, struct wil6210_priv *wil,
seq_puts(s, "???\n");
}
seq_printf(s, " desc_rdy_pol = %d\n", sring->desc_rdy_pol);
+ seq_printf(s, " invalid_buff_id_cnt = %d\n",
+ sring->invalid_buff_id_cnt);
if (sring->va && (sring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) {
uint i;
@@ -258,6 +260,11 @@ static void wil_print_mbox_ring(struct seq_file *s, const char *prefix,
wil_halp_vote(wil);
+ if (wil_mem_access_lock(wil)) {
+ wil_halp_unvote(wil);
+ return;
+ }
+
wil_memcpy_fromio_32(&r, off, sizeof(r));
wil_mbox_ring_le2cpus(&r);
/*
@@ -323,6 +330,7 @@ static void wil_print_mbox_ring(struct seq_file *s, const char *prefix,
}
out:
seq_puts(s, "}\n");
+ wil_mem_access_unlock(wil);
wil_halp_unvote(wil);
}
@@ -601,6 +609,12 @@ static int memread_show(struct seq_file *s, void *data)
if (ret < 0)
return ret;
+ ret = wil_mem_access_lock(wil);
+ if (ret) {
+ wil_pm_runtime_put(wil);
+ return ret;
+ }
+
a = wmi_buffer(wil, cpu_to_le32(mem_addr));
if (a)
@@ -608,6 +622,7 @@ static int memread_show(struct seq_file *s, void *data)
else
seq_printf(s, "[0x%08x] = INVALID\n", mem_addr);
+ wil_mem_access_unlock(wil);
wil_pm_runtime_put(wil);
return 0;
@@ -626,10 +641,6 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
size_t unaligned_bytes, aligned_count, ret;
int rc;
- if (test_bit(wil_status_suspending, wil_blob->wil->status) ||
- test_bit(wil_status_suspended, wil_blob->wil->status))
- return 0;
-
if (pos < 0)
return -EINVAL;
@@ -656,11 +667,19 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
return rc;
}
+ rc = wil_mem_access_lock(wil);
+ if (rc) {
+ kfree(buf);
+ wil_pm_runtime_put(wil);
+ return rc;
+ }
+
wil_memcpy_fromio_32(buf, (const void __iomem *)
wil_blob->blob.data + aligned_pos, aligned_count);
ret = copy_to_user(user_buf, buf + unaligned_bytes, count);
+ wil_mem_access_unlock(wil);
wil_pm_runtime_put(wil);
kfree(buf);
@@ -1364,7 +1383,7 @@ static int link_show(struct seq_file *s, void *data)
if (p->status != wil_sta_connected)
continue;
- vif = (mid < wil->max_vifs) ? wil->vifs[mid] : NULL;
+ vif = (mid < GET_MAX_VIFS(wil)) ? wil->vifs[mid] : NULL;
if (vif) {
rc = wil_cid_fill_sinfo(vif, i, sinfo);
if (rc)
@@ -1562,7 +1581,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
break;
}
mid = (p->status != wil_sta_unused) ? p->mid : U8_MAX;
- if (mid < wil->max_vifs) {
+ if (mid < GET_MAX_VIFS(wil)) {
struct wil6210_vif *vif = wil->vifs[mid];
if (vif->wdev.iftype == NL80211_IFTYPE_STATION &&
@@ -1628,7 +1647,7 @@ static int mids_show(struct seq_file *s, void *data)
int i;
mutex_lock(&wil->vif_mutex);
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
if (vif) {
@@ -1849,7 +1868,7 @@ static int wil_link_stats_debugfs_show(struct seq_file *s, void *data)
/* iterate over all MIDs and show per-cid statistics. Then show the
* global statistics
*/
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
seq_printf(s, "MID %d ", i);
@@ -1905,7 +1924,7 @@ static ssize_t wil_link_stats_write(struct file *file, const char __user *buf,
if (rc)
return rc;
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
if (!vif)
continue;
@@ -2375,6 +2394,7 @@ static const struct dbg_off dbg_wil_regs[] = {
{"RGF_MAC_MTRL_COUNTER_0", 0444, HOSTADDR(RGF_MAC_MTRL_COUNTER_0),
doff_io32},
{"RGF_USER_USAGE_1", 0444, HOSTADDR(RGF_USER_USAGE_1), doff_io32},
+ {"RGF_USER_USAGE_2", 0444, HOSTADDR(RGF_USER_USAGE_2), doff_io32},
{},
};
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index 388b3d4717ca..3ec0f2fab9b7 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -647,6 +647,8 @@ int wil_request_firmware(struct wil6210_priv *wil, const char *name,
out:
release_firmware(fw);
+ if (rc)
+ wil_err_fw(wil, "Loading <%s> failed, rc %d\n", name, rc);
return rc;
}
@@ -741,6 +743,8 @@ int wil_request_board(struct wil6210_priv *wil, const char *name)
out:
release_firmware(brd);
+ if (rc)
+ wil_err_fw(wil, "Loading <%s> failed, rc %d\n", name, rc);
return rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 277abfdf3322..9b9c9ec01536 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -184,6 +184,28 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
}
}
+/* Device memory access is prohibited while reset or suspend.
+ * wil_mem_access_lock protects accessing device memory in these cases
+ */
+int wil_mem_access_lock(struct wil6210_priv *wil)
+{
+ if (!down_read_trylock(&wil->mem_lock))
+ return -EBUSY;
+
+ if (test_bit(wil_status_suspending, wil->status) ||
+ test_bit(wil_status_suspended, wil->status)) {
+ up_read(&wil->mem_lock);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+void wil_mem_access_unlock(struct wil6210_priv *wil)
+{
+ up_read(&wil->mem_lock);
+}
+
static void wil_ring_fini_tx(struct wil6210_priv *wil, int id)
{
struct wil_ring *ring = &wil->ring_tx[id];
@@ -663,7 +685,7 @@ void wil_bcast_fini_all(struct wil6210_priv *wil)
int i;
struct wil6210_vif *vif;
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
if (vif)
wil_bcast_fini(vif);
@@ -703,6 +725,7 @@ int wil_priv_init(struct wil6210_priv *wil)
spin_lock_init(&wil->wmi_ev_lock);
spin_lock_init(&wil->net_queue_lock);
init_waitqueue_head(&wil->wq);
+ init_rwsem(&wil->mem_lock);
wil->wmi_wq = create_singlethread_workqueue(WIL_NAME "_wmi");
if (!wil->wmi_wq)
@@ -1390,13 +1413,22 @@ static int wil_get_otp_info(struct wil6210_priv *wil)
u8 mac[8];
int mac_addr;
- if (wil->hw_version >= HW_VER_TALYN_MB)
- mac_addr = RGF_OTP_MAC_TALYN_MB;
- else
- mac_addr = RGF_OTP_MAC;
+ /* OEM MAC has precedence */
+ mac_addr = RGF_OTP_OEM_MAC;
+ wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(mac_addr), sizeof(mac));
+
+ if (is_valid_ether_addr(mac)) {
+ wil_info(wil, "using OEM MAC %pM\n", mac);
+ } else {
+ if (wil->hw_version >= HW_VER_TALYN_MB)
+ mac_addr = RGF_OTP_MAC_TALYN_MB;
+ else
+ mac_addr = RGF_OTP_MAC;
+
+ wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(mac_addr),
+ sizeof(mac));
+ }
- wil_memcpy_fromio_32(mac, wil->csr + HOSTADDR(mac_addr),
- sizeof(mac));
if (!is_valid_ether_addr(mac)) {
wil_err(wil, "Invalid MAC %pM\n", mac);
return -EINVAL;
@@ -1460,7 +1492,7 @@ void wil_abort_scan_all_vifs(struct wil6210_priv *wil, bool sync)
lockdep_assert_held(&wil->vif_mutex);
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
struct wil6210_vif *vif = wil->vifs[i];
if (vif)
@@ -1500,11 +1532,6 @@ static void wil_pre_fw_config(struct wil6210_priv *wil)
if (wil->hw_version < HW_VER_TALYN_MB) {
wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
- } else {
- wil_s(wil,
- RGF_CAF_ICR_TALYN_MB + offsetof(struct RGF_ICR, ICR), 0);
- wil_w(wil, RGF_CAF_ICR_TALYN_MB +
- offsetof(struct RGF_ICR, IMV), ~0);
}
/* clear PAL_UNIT_ICR (potential D0->D3 leftover)
* In Talyn-MB host cannot access this register due to
@@ -1528,7 +1555,7 @@ static int wil_restore_vifs(struct wil6210_priv *wil)
struct wireless_dev *wdev;
int i, rc;
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
if (!vif)
continue;
@@ -1580,7 +1607,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
if (wil->hw_version == HW_VER_UNKNOWN)
return -ENODEV;
- if (test_bit(WIL_PLATFORM_CAPA_T_PWR_ON_0, wil->platform_capa)) {
+ if (test_bit(WIL_PLATFORM_CAPA_T_PWR_ON_0, wil->platform_capa) &&
+ wil->hw_version < HW_VER_TALYN_MB) {
wil_dbg_misc(wil, "Notify FW to set T_POWER_ON=0\n");
wil_s(wil, RGF_USER_USAGE_8, BIT_USER_SUPPORT_T_POWER_ON_0);
}
@@ -1599,20 +1627,11 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
}
set_bit(wil_status_resetting, wil->status);
- if (test_bit(wil_status_collecting_dumps, wil->status)) {
- /* Device collects crash dump, cancel the reset.
- * following crash dump collection, reset would take place.
- */
- wil_dbg_misc(wil, "reject reset while collecting crash dump\n");
- rc = -EBUSY;
- goto out;
- }
-
mutex_lock(&wil->vif_mutex);
wil_abort_scan_all_vifs(wil, false);
mutex_unlock(&wil->vif_mutex);
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
if (vif) {
cancel_work_sync(&vif->disconnect_worker);
@@ -1782,7 +1801,9 @@ int __wil_up(struct wil6210_priv *wil)
WARN_ON(!mutex_is_locked(&wil->mutex));
+ down_write(&wil->mem_lock);
rc = wil_reset(wil, true);
+ up_write(&wil->mem_lock);
if (rc)
return rc;
@@ -1854,6 +1875,7 @@ int wil_up(struct wil6210_priv *wil)
int __wil_down(struct wil6210_priv *wil)
{
+ int rc;
WARN_ON(!mutex_is_locked(&wil->mutex));
set_bit(wil_status_resetting, wil->status);
@@ -1873,7 +1895,11 @@ int __wil_down(struct wil6210_priv *wil)
wil_abort_scan_all_vifs(wil, false);
mutex_unlock(&wil->vif_mutex);
- return wil_reset(wil, false);
+ down_write(&wil->mem_lock);
+ rc = wil_reset(wil, false);
+ up_write(&wil->mem_lock);
+
+ return rc;
}
int wil_down(struct wil6210_priv *wil)
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index b4e0eb1585b9..59f041d708fe 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -27,7 +27,7 @@ bool wil_has_other_active_ifaces(struct wil6210_priv *wil,
struct wil6210_vif *vif;
struct net_device *ndev_i;
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
if (vif) {
ndev_i = vif_to_ndev(vif);
@@ -155,7 +155,7 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget)
struct wil6210_vif *vif;
if (!ring->va || !txdata->enabled ||
- txdata->mid >= wil->max_vifs)
+ txdata->mid >= GET_MAX_VIFS(wil))
continue;
vif = wil->vifs[txdata->mid];
@@ -294,7 +294,7 @@ static u8 wil_vif_find_free_mid(struct wil6210_priv *wil)
{
u8 i;
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
if (!wil->vifs[i])
return i;
}
@@ -500,7 +500,7 @@ void wil_vif_remove(struct wil6210_priv *wil, u8 mid)
bool any_active = wil_has_active_ifaces(wil, true, false);
ASSERT_RTNL();
- if (mid >= wil->max_vifs) {
+ if (mid >= GET_MAX_VIFS(wil)) {
wil_err(wil, "invalid MID: %d\n", mid);
return;
}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index c8c6613371d1..3b82d6cfc218 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -176,7 +176,7 @@ static void wil_remove_all_additional_vifs(struct wil6210_priv *wil)
struct wil6210_vif *vif;
int i;
- for (i = 1; i < wil->max_vifs; i++) {
+ for (i = 1; i < GET_MAX_VIFS(wil); i++) {
vif = wil->vifs[i];
if (vif) {
wil_vif_prepare_stop(vif);
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 75fe9323547c..56143e7670ed 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -26,7 +26,7 @@ static void wil_pm_wake_connected_net_queues(struct wil6210_priv *wil)
int i;
mutex_lock(&wil->vif_mutex);
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
struct wil6210_vif *vif = wil->vifs[i];
if (vif && test_bit(wil_vif_fwconnected, vif->status))
@@ -40,7 +40,7 @@ static void wil_pm_stop_all_net_queues(struct wil6210_priv *wil)
int i;
mutex_lock(&wil->vif_mutex);
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
struct wil6210_vif *vif = wil->vifs[i];
if (vif)
@@ -123,7 +123,7 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
/* interface is running */
mutex_lock(&wil->vif_mutex);
- for (i = 0; i < wil->max_vifs; i++) {
+ for (i = 0; i < GET_MAX_VIFS(wil); i++) {
struct wil6210_vif *vif = wil->vifs[i];
if (!vif)
@@ -195,14 +195,18 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
wil_dbg_pm(wil, "suspend keep radio on\n");
/* Prevent handling of new tx and wmi commands */
- set_bit(wil_status_suspending, wil->status);
- if (test_bit(wil_status_collecting_dumps, wil->status)) {
- /* Device collects crash dump, cancel the suspend */
- wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
- clear_bit(wil_status_suspending, wil->status);
+ rc = down_write_trylock(&wil->mem_lock);
+ if (!rc) {
+ wil_err(wil,
+ "device is busy. down_write_trylock failed, returned (0x%x)\n",
+ rc);
wil->suspend_stats.rejected_by_host++;
return -EBUSY;
}
+
+ set_bit(wil_status_suspending, wil->status);
+ up_write(&wil->mem_lock);
+
wil_pm_stop_all_net_queues(wil);
if (!wil_is_tx_idle(wil)) {
@@ -310,15 +314,18 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
wil_dbg_pm(wil, "suspend radio off\n");
- set_bit(wil_status_suspending, wil->status);
- if (test_bit(wil_status_collecting_dumps, wil->status)) {
- /* Device collects crash dump, cancel the suspend */
- wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
- clear_bit(wil_status_suspending, wil->status);
+ rc = down_write_trylock(&wil->mem_lock);
+ if (!rc) {
+ wil_err(wil,
+ "device is busy. down_write_trylock failed, returned (0x%x)\n",
+ rc);
wil->suspend_stats.rejected_by_host++;
return -EBUSY;
}
+ set_bit(wil_status_suspending, wil->status);
+ up_write(&wil->mem_lock);
+
/* if netif up, hardware is alive, shut it down */
mutex_lock(&wil->vif_mutex);
active_ifaces = wil_has_active_ifaces(wil, true, false);
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index c38773878ae3..f6fce6ff73d9 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -29,6 +29,7 @@
#define WIL_EDMA_MAX_DATA_OFFSET (2)
/* RX buffer size must be aligned to 4 bytes */
#define WIL_EDMA_RX_BUF_LEN_DEFAULT (2048)
+#define MAX_INVALID_BUFF_ID_RETRY (3)
static void wil_tx_desc_unmap_edma(struct device *dev,
union wil_tx_desc *desc,
@@ -312,7 +313,8 @@ static int wil_init_rx_buff_arr(struct wil6210_priv *wil,
struct list_head *free = &wil->rx_buff_mgmt.free;
int i;
- wil->rx_buff_mgmt.buff_arr = kcalloc(size, sizeof(struct wil_rx_buff),
+ wil->rx_buff_mgmt.buff_arr = kcalloc(size + 1,
+ sizeof(struct wil_rx_buff),
GFP_KERNEL);
if (!wil->rx_buff_mgmt.buff_arr)
return -ENOMEM;
@@ -321,14 +323,16 @@ static int wil_init_rx_buff_arr(struct wil6210_priv *wil,
INIT_LIST_HEAD(active);
INIT_LIST_HEAD(free);
- /* Linkify the list */
+ /* Linkify the list.
+ * buffer id 0 should not be used (marks invalid id).
+ */
buff_arr = wil->rx_buff_mgmt.buff_arr;
- for (i = 0; i < size; i++) {
+ for (i = 1; i <= size; i++) {
list_add(&buff_arr[i].list, free);
buff_arr[i].id = i;
}
- wil->rx_buff_mgmt.size = size;
+ wil->rx_buff_mgmt.size = size + 1;
return 0;
}
@@ -428,6 +432,9 @@ static void wil_ring_free_edma(struct wil6210_priv *wil, struct wil_ring *ring)
&ring->pa, ring->ctx);
wil_move_all_rx_buff_to_free_list(wil, ring);
+ dma_free_coherent(dev, sizeof(*ring->edma_rx_swtail.va),
+ ring->edma_rx_swtail.va,
+ ring->edma_rx_swtail.pa);
goto out;
}
@@ -804,18 +811,9 @@ static int wil_rx_error_check_edma(struct wil6210_priv *wil,
struct sk_buff *skb,
struct wil_net_stats *stats)
{
- int error;
int l2_rx_status;
- int l3_rx_status;
- int l4_rx_status;
void *msg = wil_skb_rxstatus(skb);
- error = wil_rx_status_get_error(msg);
- if (!error) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- return 0;
- }
-
l2_rx_status = wil_rx_status_get_l2_rx_status(msg);
if (l2_rx_status != 0) {
wil_dbg_txrx(wil, "L2 RX error, l2_rx_status=0x%x\n",
@@ -844,17 +842,7 @@ static int wil_rx_error_check_edma(struct wil6210_priv *wil,
return -EFAULT;
}
- l3_rx_status = wil_rx_status_get_l3_rx_status(msg);
- l4_rx_status = wil_rx_status_get_l4_rx_status(msg);
- if (!l3_rx_status && !l4_rx_status)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- /* If HW reports bad checksum, let IP stack re-check it
- * For example, HW don't understand Microsoft IP stack that
- * mis-calculates TCP checksum - if it should be 0x0,
- * it writes 0xffff in violation of RFC 1624
- */
- else
- stats->rx_csum_err++;
+ skb->ip_summed = wil_rx_status_get_checksum(msg, stats);
return 0;
}
@@ -892,26 +880,50 @@ again:
/* Extract the buffer ID from the status message */
buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
- if (unlikely(!wil_val_in_range(buff_id, 0, wil->rx_buff_mgmt.size))) {
+
+ while (!buff_id) {
+ struct wil_rx_status_extended *s;
+ int invalid_buff_id_retry = 0;
+
+ wil_dbg_txrx(wil,
+ "buff_id is not updated yet by HW, (swhead 0x%x)\n",
+ sring->swhead);
+ if (++invalid_buff_id_retry > MAX_INVALID_BUFF_ID_RETRY)
+ break;
+
+ /* Read the status message again */
+ s = (struct wil_rx_status_extended *)
+ (sring->va + (sring->elem_size * sring->swhead));
+ *(struct wil_rx_status_extended *)msg = *s;
+ buff_id = le16_to_cpu(wil_rx_status_get_buff_id(msg));
+ }
+
+ if (unlikely(!wil_val_in_range(buff_id, 1, wil->rx_buff_mgmt.size))) {
wil_err(wil, "Corrupt buff_id=%d, sring->swhead=%d\n",
buff_id, sring->swhead);
+ wil_rx_status_reset_buff_id(sring);
wil_sring_advance_swhead(sring);
+ sring->invalid_buff_id_cnt++;
goto again;
}
- wil_sring_advance_swhead(sring);
-
/* Extract the SKB from the rx_buff management array */
skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
if (!skb) {
wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
+ wil_rx_status_reset_buff_id(sring);
/* Move the buffer from the active list to the free list */
- list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
- &wil->rx_buff_mgmt.free);
+ list_move_tail(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
+ &wil->rx_buff_mgmt.free);
+ wil_sring_advance_swhead(sring);
+ sring->invalid_buff_id_cnt++;
goto again;
}
+ wil_rx_status_reset_buff_id(sring);
+ wil_sring_advance_swhead(sring);
+
memcpy(&pa, skb->cb, sizeof(pa));
dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
dmalen = le16_to_cpu(wil_rx_status_get_length(msg));
@@ -926,8 +938,8 @@ again:
sizeof(struct wil_rx_status_extended), false);
/* Move the buffer from the active list to the free list */
- list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
- &wil->rx_buff_mgmt.free);
+ list_move_tail(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
+ &wil->rx_buff_mgmt.free);
eop = wil_rx_status_get_eop(msg);
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h
index 343516a03a1e..bb4ff28b73e5 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.h
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016,2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2016,2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -427,6 +427,12 @@ static inline int wil_rx_status_get_eop(void *msg) /* EoP = End of Packet */
30, 30);
}
+static inline void wil_rx_status_reset_buff_id(struct wil_status_ring *s)
+{
+ ((struct wil_rx_status_compressed *)
+ (s->va + (s->elem_size * s->swhead)))->buff_id = 0;
+}
+
static inline __le16 wil_rx_status_get_buff_id(void *msg)
{
return ((struct wil_rx_status_compressed *)msg)->buff_id;
@@ -511,6 +517,45 @@ static inline int wil_rx_status_get_l4_rx_status(void *msg)
5, 6);
}
+/* L4 L3 Expected result
+ * 0 0 Ok. No L3 and no L4 known protocols found.
+ * Treated as L2 packet. (no offloads on this packet)
+ * 0 1 Ok. It means that L3 was found, and checksum check passed.
+ * No known L4 protocol was found.
+ * 0 2 It means that L3 protocol was found, and checksum check failed.
+ * No L4 known protocol was found.
+ * 1 any Ok. It means that L4 was found, and checksum check passed.
+ * 3 0 Not a possible scenario.
+ * 3 1 Recalculate. It means that L3 protocol was found, and checksum
+ * passed. But L4 checksum failed. Need to see if really failed,
+ * or due to fragmentation.
+ * 3 2 Both L3 and L4 checksum check failed.
+ */
+static inline int wil_rx_status_get_checksum(void *msg,
+ struct wil_net_stats *stats)
+{
+ int l3_rx_status = wil_rx_status_get_l3_rx_status(msg);
+ int l4_rx_status = wil_rx_status_get_l4_rx_status(msg);
+
+ if (l4_rx_status == 1)
+ return CHECKSUM_UNNECESSARY;
+
+ if (l4_rx_status == 0 && l3_rx_status == 1)
+ return CHECKSUM_UNNECESSARY;
+
+ if (l3_rx_status == 0 && l4_rx_status == 0)
+ /* L2 packet */
+ return CHECKSUM_NONE;
+
+ /* If HW reports bad checksum, let IP stack re-check it
+ * For example, HW doesn't understand Microsoft IP stack that
+ * mis-calculates TCP checksum - if it should be 0x0,
+ * it writes 0xffff in violation of RFC 1624
+ */
+ stats->rx_csum_err++;
+ return CHECKSUM_NONE;
+}
+
static inline int wil_rx_status_get_security(void *msg)
{
return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index e1b1039b13ab..8724d9975606 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -185,6 +185,7 @@ struct RGF_ICR {
/* registers - FW addresses */
#define RGF_USER_USAGE_1 (0x880004)
+#define RGF_USER_USAGE_2 (0x880008)
#define RGF_USER_USAGE_6 (0x880018)
#define BIT_USER_OOB_MODE BIT(31)
#define BIT_USER_OOB_R2_MODE BIT(30)
@@ -367,6 +368,7 @@ struct RGF_ICR {
#define REVISION_ID_SPARROW_D0 (0x3)
#define RGF_OTP_MAC_TALYN_MB (0x8a0304)
+#define RGF_OTP_OEM_MAC (0x8a0334)
#define RGF_OTP_MAC (0x8a0620)
/* Talyn-MB */
@@ -566,10 +568,11 @@ struct wil_status_ring {
bool is_rx;
u8 desc_rdy_pol; /* Expected descriptor ready bit polarity */
struct wil_ring_rx_data rx_data;
+ u32 invalid_buff_id_cnt; /* relevant only for RX */
};
#define WIL_STA_TID_NUM (16)
-#define WIL_MCS_MAX (12) /* Maximum MCS supported */
+#define WIL_MCS_MAX (15) /* Maximum MCS supported */
struct wil_net_stats {
unsigned long rx_packets;
@@ -660,7 +663,6 @@ enum { /* for wil6210_priv.status */
wil_status_suspending, /* suspend in progress */
wil_status_suspended, /* suspend completed, device is suspended */
wil_status_resuming, /* resume in progress */
- wil_status_collecting_dumps, /* crashdump collection in progress */
wil_status_last /* keep last */
};
@@ -992,6 +994,8 @@ struct wil6210_priv {
struct wil_txrx_ops txrx_ops;
struct mutex mutex; /* for wil6210_priv access in wil_{up|down} */
+ /* for synchronizing device memory access while reset or suspend */
+ struct rw_semaphore mem_lock;
/* statistics */
atomic_t isr_count_rx, isr_count_tx;
/* debugfs */
@@ -1060,6 +1064,7 @@ struct wil6210_priv {
#define vif_to_wil(v) (v->wil)
#define vif_to_ndev(v) (v->ndev)
#define vif_to_wdev(v) (&v->wdev)
+#define GET_MAX_VIFS(wil) min_t(int, (wil)->max_vifs, WIL_MAX_VIFS)
static inline struct wil6210_vif *wdev_to_vif(struct wil6210_priv *wil,
struct wireless_dev *wdev)
@@ -1176,6 +1181,8 @@ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
size_t count);
void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
size_t count);
+int wil_mem_access_lock(struct wil6210_priv *wil);
+void wil_mem_access_unlock(struct wil6210_priv *wil);
struct wil6210_vif *
wil_vif_alloc(struct wil6210_priv *wil, const char *name,
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index dc33a0b4c3fa..772cb00c2002 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -57,7 +57,7 @@ static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil,
int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
{
- int i;
+ int i, rc;
const struct fw_map *map;
void *data;
u32 host_min, dump_size, offset, len;
@@ -73,14 +73,9 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
return -EINVAL;
}
- set_bit(wil_status_collecting_dumps, wil->status);
- if (test_bit(wil_status_suspending, wil->status) ||
- test_bit(wil_status_suspended, wil->status) ||
- test_bit(wil_status_resetting, wil->status)) {
- wil_err(wil, "cannot collect fw dump during suspend/reset\n");
- clear_bit(wil_status_collecting_dumps, wil->status);
- return -EINVAL;
- }
+ rc = wil_mem_access_lock(wil);
+ if (rc)
+ return rc;
/* copy to crash dump area */
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
@@ -100,8 +95,7 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
wil_memcpy_fromio_32((void * __force)(dest + offset),
(const void __iomem * __force)data, len);
}
-
- clear_bit(wil_status_collecting_dumps, wil->status);
+ wil_mem_access_unlock(wil);
return 0;
}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index bda4a9712f91..d89cd41e78ac 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -41,6 +41,7 @@ MODULE_PARM_DESC(led_id,
#define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200
#define WIL_WMI_CALL_GENERAL_TO_MS 100
+#define WIL_WMI_PCP_STOP_TO_MS 5000
/**
* WMI event receiving - theory of operations
@@ -2195,7 +2196,8 @@ int wmi_pcp_stop(struct wil6210_vif *vif)
return rc;
return wmi_call(wil, WMI_PCP_STOP_CMDID, vif->mid, NULL, 0,
- WMI_PCP_STOPPED_EVENTID, NULL, 0, 20);
+ WMI_PCP_STOPPED_EVENTID, NULL, 0,
+ WIL_WMI_PCP_STOP_TO_MS);
}
int wmi_set_ssid(struct wil6210_vif *vif, u8 ssid_len, const void *ssid)
@@ -2957,6 +2959,10 @@ static const char *suspend_status2name(u8 status)
switch (status) {
case WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE:
return "LINK_NOT_IDLE";
+ case WMI_TRAFFIC_SUSPEND_REJECTED_DISCONNECT:
+ return "DISCONNECT";
+ case WMI_TRAFFIC_SUSPEND_REJECTED_OTHER:
+ return "OTHER";
default:
return "Untracked status";
}
@@ -3046,6 +3052,9 @@ static void resume_triggers2string(u32 triggers, char *string, int str_size)
if (triggers & WMI_RESUME_TRIGGER_WMI_EVT)
strlcat(string, " WMI_EVT", str_size);
+
+ if (triggers & WMI_RESUME_TRIGGER_DISCONNECT)
+ strlcat(string, " DISCONNECT", str_size);
}
int wmi_resume(struct wil6210_priv *wil)
@@ -3196,7 +3205,7 @@ static void wmi_event_handle(struct wil6210_priv *wil,
if (mid == MID_BROADCAST)
mid = 0;
- if (mid >= ARRAY_SIZE(wil->vifs) || mid >= wil->max_vifs) {
+ if (mid >= GET_MAX_VIFS(wil)) {
wil_dbg_wmi(wil, "invalid mid %d, event skipped\n",
mid);
return;
@@ -3502,8 +3511,9 @@ int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len)
rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, vif->mid, cmd, total,
WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) {
- wil_err(wil, "mgmt_tx failed with status %d\n", evt.evt.status);
- rc = -EINVAL;
+ wil_dbg_wmi(wil, "mgmt_tx failed with status %d\n",
+ evt.evt.status);
+ rc = -EAGAIN;
}
kfree(cmd);
@@ -3555,9 +3565,9 @@ int wmi_mgmt_tx_ext(struct wil6210_vif *vif, const u8 *buf, size_t len,
rc = wmi_call(wil, WMI_SW_TX_REQ_EXT_CMDID, vif->mid, cmd, total,
WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) {
- wil_err(wil, "mgmt_tx_ext failed with status %d\n",
- evt.evt.status);
- rc = -EINVAL;
+ wil_dbg_wmi(wil, "mgmt_tx_ext failed with status %d\n",
+ evt.evt.status);
+ rc = -EAGAIN;
}
kfree(cmd);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index b668758da994..da46fc8d39cf 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2006-2012 Wilocity
*
@@ -104,6 +104,7 @@ enum wmi_fw_capability {
WMI_FW_CAPABILITY_RAW_MODE = 24,
WMI_FW_CAPABILITY_TX_REQ_EXT = 25,
WMI_FW_CAPABILITY_CHANNEL_4 = 26,
+ WMI_FW_CAPABILITY_IPA = 27,
WMI_FW_CAPABILITY_MAX,
};
@@ -294,6 +295,7 @@ enum wmi_command_id {
WMI_SET_AP_SLOT_SIZE_CMDID = 0xA0F,
WMI_SET_VRING_PRIORITY_WEIGHT_CMDID = 0xA10,
WMI_SET_VRING_PRIORITY_CMDID = 0xA11,
+ WMI_RBUFCAP_CFG_CMDID = 0xA12,
WMI_SET_MAC_ADDRESS_CMDID = 0xF003,
WMI_ABORT_SCAN_CMDID = 0xF007,
WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041,
@@ -979,10 +981,22 @@ enum wmi_rx_msg_type {
WMI_RX_MSG_TYPE_EXTENDED = 0x01,
};
+enum wmi_ring_add_irq_mode {
+ /* Backwards compatibility
+ * for DESC ring - interrupt disabled
+ * for STATUS ring - interrupt enabled
+ */
+ WMI_RING_ADD_IRQ_MODE_BWC = 0x00,
+ WMI_RING_ADD_IRQ_MODE_DISABLE = 0x01,
+ WMI_RING_ADD_IRQ_MODE_ENABLE = 0x02,
+};
+
struct wmi_tx_status_ring_add_cmd {
struct wmi_edma_ring_cfg ring_cfg;
u8 irq_index;
- u8 reserved[3];
+ /* wmi_ring_add_irq_mode */
+ u8 irq_mode;
+ u8 reserved[2];
} __packed;
struct wmi_rx_status_ring_add_cmd {
@@ -1016,7 +1030,10 @@ struct wmi_tx_desc_ring_add_cmd {
u8 mac_ctrl;
u8 to_resolution;
u8 agg_max_wsize;
- u8 reserved[3];
+ u8 irq_index;
+ /* wmi_ring_add_irq_mode */
+ u8 irq_mode;
+ u8 reserved;
struct wmi_vring_cfg_schd schd_params;
} __packed;
@@ -1982,6 +1999,7 @@ enum wmi_event_id {
WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836,
WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
+ WMI_BF_TRIG_EVENTID = 0x183A,
WMI_RS_MGMT_DONE_EVENTID = 0x1852,
WMI_RF_MGMT_STATUS_EVENTID = 0x1853,
WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
@@ -2082,6 +2100,7 @@ enum wmi_event_id {
WMI_SET_AP_SLOT_SIZE_EVENTID = 0x1A0F,
WMI_SET_VRING_PRIORITY_WEIGHT_EVENTID = 0x1A10,
WMI_SET_VRING_PRIORITY_EVENTID = 0x1A11,
+ WMI_RBUFCAP_CFG_EVENTID = 0x1A12,
WMI_SET_CHANNEL_EVENTID = 0x9000,
WMI_ASSOC_REQ_EVENTID = 0x9001,
WMI_EAPOL_RX_EVENTID = 0x9002,
@@ -2267,7 +2286,9 @@ struct wmi_notify_req_done_event {
__le32 status;
__le64 tsf;
s8 rssi;
- u8 reserved0[3];
+ /* enum wmi_edmg_tx_mode */
+ u8 tx_mode;
+ u8 reserved0[2];
__le32 tx_tpt;
__le32 tx_goodput;
__le32 rx_goodput;
@@ -2316,6 +2337,7 @@ enum wmi_disconnect_reason {
WMI_DIS_REASON_PROFILE_MISMATCH = 0x0C,
WMI_DIS_REASON_CONNECTION_EVICTED = 0x0D,
WMI_DIS_REASON_IBSS_MERGE = 0x0E,
+ WMI_DIS_REASON_HIGH_TEMPERATURE = 0x0F,
};
/* WMI_DISCONNECT_EVENTID */
@@ -3168,6 +3190,30 @@ struct wmi_brp_set_ant_limit_event {
u8 reserved[3];
} __packed;
+enum wmi_bf_type {
+ WMI_BF_TYPE_SLS = 0x00,
+ WMI_BF_TYPE_BRP_RX = 0x01,
+};
+
+/* WMI_BF_TRIG_CMDID */
+struct wmi_bf_trig_cmd {
+ /* enum wmi_bf_type - type of requested beamforming */
+ u8 bf_type;
+ /* used only for WMI_BF_TYPE_BRP_RX */
+ u8 cid;
+ /* used only for WMI_BF_TYPE_SLS */
+ u8 dst_mac[WMI_MAC_LEN];
+ u8 reserved[4];
+} __packed;
+
+/* WMI_BF_TRIG_EVENTID */
+struct wmi_bf_trig_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 cid;
+ u8 reserved[2];
+} __packed;
+
/* broadcast connection ID */
#define WMI_LINK_MAINTAIN_CFG_CID_BROADCAST (0xFFFFFFFF)
@@ -3263,6 +3309,8 @@ struct wmi_link_maintain_cfg_read_done_event {
enum wmi_traffic_suspend_status {
WMI_TRAFFIC_SUSPEND_APPROVED = 0x0,
WMI_TRAFFIC_SUSPEND_REJECTED_LINK_NOT_IDLE = 0x1,
+ WMI_TRAFFIC_SUSPEND_REJECTED_DISCONNECT = 0x2,
+ WMI_TRAFFIC_SUSPEND_REJECTED_OTHER = 0x3,
};
/* WMI_TRAFFIC_SUSPEND_EVENTID */
@@ -3282,6 +3330,7 @@ enum wmi_resume_trigger {
WMI_RESUME_TRIGGER_UCAST_RX = 0x2,
WMI_RESUME_TRIGGER_BCAST_RX = 0x4,
WMI_RESUME_TRIGGER_WMI_EVT = 0x8,
+ WMI_RESUME_TRIGGER_DISCONNECT = 0x10,
};
/* WMI_TRAFFIC_RESUME_EVENTID */
@@ -4057,4 +4106,38 @@ struct wmi_set_vring_priority_event {
u8 reserved[3];
} __packed;
+/* WMI_RADAR_PCI_CTRL_BLOCK struct */
+struct wmi_radar_pci_ctrl_block {
+ /* last fw tail address index */
+ __le32 fw_tail_index;
+ /* last SW head address index known to FW */
+ __le32 sw_head_index;
+ __le32 last_wr_pulse_tsf_low;
+ __le32 last_wr_pulse_count;
+ __le32 last_wr_in_bytes;
+ __le32 last_wr_pulse_id;
+ __le32 last_wr_burst_id;
+ /* When pre overflow detected, advance sw head in unit of pulses */
+ __le32 sw_head_inc;
+ __le32 reserved[8];
+} __packed;
+
+/* WMI_RBUFCAP_CFG_CMD */
+struct wmi_rbufcap_cfg_cmd {
+ u8 enable;
+ u8 reserved;
+ /* RBUFCAP indicates rx space unavailable when number of rx
+ * descriptors drops below this threshold. Set 0 to use system
+ * default
+ */
+ __le16 rx_desc_threshold;
+} __packed;
+
+/* WMI_RBUFCAP_CFG_EVENTID */
+struct wmi_rbufcap_cfg_event {
+ /* enum wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
#endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index e99e766a3028..1cabae424839 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -2585,8 +2585,8 @@ static int __init at76_mod_init(void)
if (result < 0)
printk(KERN_ERR DRIVER_NAME
": usb_register failed (status %d)\n", result);
-
- led_trigger_register_simple("at76_usb-tx", &ledtrig_tx);
+ else
+ led_trigger_register_simple("at76_usb-tx", &ledtrig_tx);
return result;
}
diff --git a/drivers/net/wireless/broadcom/b43/phy_lp.c b/drivers/net/wireless/broadcom/b43/phy_lp.c
index 46408a560814..6b7f0238723f 100644
--- a/drivers/net/wireless/broadcom/b43/phy_lp.c
+++ b/drivers/net/wireless/broadcom/b43/phy_lp.c
@@ -1826,16 +1826,10 @@ static void lpphy_stop_tx_tone(struct b43_wldev *dev)
}
-static void lpphy_papd_cal(struct b43_wldev *dev, struct lpphy_tx_gains gains,
- int mode, bool useindex, u8 index)
-{
- //TODO
-}
-
static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
- struct lpphy_tx_gains gains, oldgains;
+ struct lpphy_tx_gains oldgains;
int old_txpctl, old_afe_ovr, old_rf, old_bbmult;
lpphy_read_tx_pctl_mode_from_hardware(dev);
@@ -1848,11 +1842,6 @@ static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
- if (dev->dev->chip_id == 0x4325 && dev->dev->chip_rev == 0)
- lpphy_papd_cal(dev, gains, 0, 1, 30);
- else
- lpphy_papd_cal(dev, gains, 0, 1, 65);
-
if (old_afe_ovr)
lpphy_set_tx_gains(dev, oldgains);
lpphy_set_bb_mult(dev, old_bbmult);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index 73d3c1a0a7c9..98b168736df0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -490,11 +490,18 @@ fail:
return -ENOMEM;
}
-void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr)
+void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr)
+{
+ struct brcmf_bcdc *bcdc = drvr->proto->pd;
+
+ brcmf_fws_detach_pre_delif(bcdc->fws);
+}
+
+void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr)
{
struct brcmf_bcdc *bcdc = drvr->proto->pd;
drvr->proto->pd = NULL;
- brcmf_fws_detach(bcdc->fws);
+ brcmf_fws_detach_post_delif(bcdc->fws);
kfree(bcdc);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h
index 3b0e9eff21b5..4bc52240ccea 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.h
@@ -18,14 +18,16 @@
#ifdef CONFIG_BRCMFMAC_PROTO_BCDC
int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr);
-void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr);
+void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr);
+void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr);
void brcmf_proto_bcdc_txflowblock(struct device *dev, bool state);
void brcmf_proto_bcdc_txcomplete(struct device *dev, struct sk_buff *txp,
bool success);
struct brcmf_fws_info *drvr_to_fws(struct brcmf_pub *drvr);
#else
static inline int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr) { return 0; }
-static inline void brcmf_proto_bcdc_detach(struct brcmf_pub *drvr) {}
+static void brcmf_proto_bcdc_detach_pre_delif(struct brcmf_pub *drvr) {};
+static inline void brcmf_proto_bcdc_detach_post_delif(struct brcmf_pub *drvr) {}
#endif
#endif /* BRCMFMAC_BCDC_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index ec129864cc9c..60aede5abb4d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -628,15 +628,13 @@ int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
if (err)
- return err;
+ goto out;
addr &= SBSDIO_SB_OFT_ADDR_MASK;
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
- if (!err)
- err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2, addr,
- mypkt);
-
+ err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2, addr, mypkt);
+out:
brcmu_pkt_buf_free_skb(mypkt);
return err;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index 3d441c5c745c..2fe167eae22c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -91,6 +91,7 @@ struct brcmf_bus_ops {
int (*get_fwname)(struct device *dev, const char *ext,
unsigned char *fw_name);
void (*debugfs_create)(struct device *dev);
+ int (*reset)(struct device *dev);
};
@@ -245,6 +246,15 @@ void brcmf_bus_debugfs_create(struct brcmf_bus *bus)
return bus->ops->debugfs_create(bus->dev);
}
+static inline
+int brcmf_bus_reset(struct brcmf_bus *bus)
+{
+ if (!bus->ops->reset)
+ return -EOPNOTSUPP;
+
+ return bus->ops->reset(bus->dev);
+}
+
/*
* interface functions from common layer
*/
@@ -262,6 +272,8 @@ void brcmf_detach(struct device *dev);
void brcmf_dev_reset(struct device *dev);
/* Request from bus module to initiate a coredump */
void brcmf_dev_coredump(struct device *dev);
+/* Indication that firmware has halted or crashed */
+void brcmf_fw_crashed(struct device *dev);
/* Configure the "global" bus state used by upper layers */
void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index e92f6351bd22..8ee8af4e7ec4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -5464,6 +5464,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
conn_info->req_ie =
kmemdup(cfg->extra_buf, conn_info->req_ie_len,
GFP_KERNEL);
+ if (!conn_info->req_ie)
+ conn_info->req_ie_len = 0;
} else {
conn_info->req_ie_len = 0;
conn_info->req_ie = NULL;
@@ -5480,6 +5482,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
conn_info->resp_ie =
kmemdup(cfg->extra_buf, conn_info->resp_ie_len,
GFP_KERNEL);
+ if (!conn_info->resp_ie)
+ conn_info->resp_ie_len = 0;
} else {
conn_info->resp_ie_len = 0;
conn_info->resp_ie = NULL;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 4fbe8791f674..7d6a08779693 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -841,17 +841,17 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
bool rtnl_locked)
{
struct brcmf_if *ifp;
+ int ifidx;
ifp = drvr->iflist[bsscfgidx];
- drvr->iflist[bsscfgidx] = NULL;
if (!ifp) {
bphy_err(drvr, "Null interface, bsscfgidx=%d\n", bsscfgidx);
return;
}
brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", bsscfgidx,
ifp->ifidx);
- if (drvr->if2bss[ifp->ifidx] == bsscfgidx)
- drvr->if2bss[ifp->ifidx] = BRCMF_BSSIDX_INVALID;
+ ifidx = ifp->ifidx;
+
if (ifp->ndev) {
if (bsscfgidx == 0) {
if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
@@ -879,6 +879,10 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
brcmf_p2p_ifp_removed(ifp, rtnl_locked);
kfree(ifp);
}
+
+ drvr->iflist[bsscfgidx] = NULL;
+ if (drvr->if2bss[ifidx] == bsscfgidx)
+ drvr->if2bss[ifidx] = BRCMF_BSSIDX_INVALID;
}
void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked)
@@ -1084,6 +1088,14 @@ static int brcmf_revinfo_read(struct seq_file *s, void *data)
return 0;
}
+static void brcmf_core_bus_reset(struct work_struct *work)
+{
+ struct brcmf_pub *drvr = container_of(work, struct brcmf_pub,
+ bus_reset);
+
+ brcmf_bus_reset(drvr->bus_if);
+}
+
static int brcmf_bus_started(struct brcmf_pub *drvr, struct cfg80211_ops *ops)
{
int ret = -1;
@@ -1155,6 +1167,8 @@ static int brcmf_bus_started(struct brcmf_pub *drvr, struct cfg80211_ops *ops)
#endif
#endif /* CONFIG_INET */
+ INIT_WORK(&drvr->bus_reset, brcmf_core_bus_reset);
+
/* populate debugfs */
brcmf_debugfs_add_entry(drvr, "revinfo", brcmf_revinfo_read);
brcmf_feat_debugfs_create(drvr);
@@ -1273,6 +1287,18 @@ void brcmf_dev_coredump(struct device *dev)
brcmf_dbg(TRACE, "failed to create coredump\n");
}
+void brcmf_fw_crashed(struct device *dev)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
+
+ bphy_err(drvr, "Firmware has halted or crashed\n");
+
+ brcmf_dev_coredump(dev);
+
+ schedule_work(&drvr->bus_reset);
+}
+
void brcmf_detach(struct device *dev)
{
s32 i;
@@ -1299,6 +1325,8 @@ void brcmf_detach(struct device *dev)
brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN);
+ brcmf_proto_detach_pre_delif(drvr);
+
/* make sure primary interface removed last */
for (i = BRCMF_MAX_IFS-1; i > -1; i--)
brcmf_remove_interface(drvr->iflist[i], false);
@@ -1308,7 +1336,7 @@ void brcmf_detach(struct device *dev)
brcmf_bus_stop(drvr->bus_if);
- brcmf_proto_detach(drvr);
+ brcmf_proto_detach_post_delif(drvr);
bus_if->drvr = NULL;
wiphy_free(drvr->wiphy);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index d8085ce579f4..9f09aa31eeda 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -143,6 +143,8 @@ struct brcmf_pub {
struct notifier_block inet6addr_notifier;
struct brcmf_mp_device *settings;
+ struct work_struct bus_reset;
+
u8 clmver[BRCMF_DCMD_SMLEN];
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
index 7535cb0d4ac0..9f1417e00073 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
@@ -31,6 +31,10 @@ struct brcmf_dmi_data {
/* NOTE: Please keep all entries sorted alphabetically */
+static const struct brcmf_dmi_data acepc_t8_data = {
+ BRCM_CC_4345_CHIP_ID, 6, "acepc-t8"
+};
+
static const struct brcmf_dmi_data gpd_win_pocket_data = {
BRCM_CC_4356_CHIP_ID, 2, "gpd-win-pocket"
};
@@ -49,6 +53,28 @@ static const struct brcmf_dmi_data pov_tab_p1006w_data = {
static const struct dmi_system_id dmi_platform_data[] = {
{
+ /* ACEPC T8 Cherry Trail Z8350 mini PC */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T8"),
+ /* also match on somewhat unique bios-version */
+ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
+ },
+ .driver_data = (void *)&acepc_t8_data,
+ },
+ {
+ /* ACEPC T11 Cherry Trail Z8350 mini PC, same wifi as the T8 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by O.E.M."),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "T11"),
+ /* also match on somewhat unique bios-version */
+ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1.000"),
+ },
+ .driver_data = (void *)&acepc_t8_data,
+ },
+ {
/* Match for the GPDwin which unfortunately uses somewhat
* generic dmi strings, which is why we test for 4 strings.
* Comparing against 23 other byt/cht boards, board_vendor
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 8209a42dea72..6a333dd80b2d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -711,7 +711,6 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
size_t mp_path_len;
u32 i, j;
char end = '\0';
- size_t reqsz;
for (i = 0; i < table_size; i++) {
if (mapping_table[i].chipid == chip &&
@@ -726,8 +725,7 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
return NULL;
}
- reqsz = sizeof(*fwreq) + n_fwnames * sizeof(struct brcmf_fw_item);
- fwreq = kzalloc(reqsz, GFP_KERNEL);
+ fwreq = kzalloc(struct_size(fwreq, items, n_fwnames), GFP_KERNEL);
if (!fwreq)
return NULL;
@@ -743,6 +741,7 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
for (j = 0; j < n_fwnames; j++) {
fwreq->items[j].path = fwnames[j].path;
+ fwnames[j].path[0] = '\0';
/* check if firmware path is provided by module parameter */
if (brcmf_mp_global.firmware_path[0] != '\0') {
strlcpy(fwnames[j].path, mp_path,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index abeb305492e0..c22c49ae552e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -580,24 +580,6 @@ static bool brcmf_fws_ifidx_match(struct sk_buff *skb, void *arg)
return ifidx == *(int *)arg;
}
-static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
- int ifidx)
-{
- bool (*matchfn)(struct sk_buff *, void *) = NULL;
- struct sk_buff *skb;
- int prec;
-
- if (ifidx != -1)
- matchfn = brcmf_fws_ifidx_match;
- for (prec = 0; prec < q->num_prec; prec++) {
- skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
- while (skb) {
- brcmu_pkt_buf_free_skb(skb);
- skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
- }
- }
-}
-
static void brcmf_fws_hanger_init(struct brcmf_fws_hanger *hanger)
{
int i;
@@ -669,6 +651,28 @@ static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
return 0;
}
+static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
+ int ifidx)
+{
+ bool (*matchfn)(struct sk_buff *, void *) = NULL;
+ struct sk_buff *skb;
+ int prec;
+ u32 hslot;
+
+ if (ifidx != -1)
+ matchfn = brcmf_fws_ifidx_match;
+ for (prec = 0; prec < q->num_prec; prec++) {
+ skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
+ while (skb) {
+ hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
+ brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
+ true);
+ brcmu_pkt_buf_free_skb(skb);
+ skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
+ }
+ }
+}
+
static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h,
u32 slot_id)
{
@@ -2200,6 +2204,8 @@ void brcmf_fws_del_interface(struct brcmf_if *ifp)
brcmf_fws_lock(fws);
ifp->fws_desc = NULL;
brcmf_dbg(TRACE, "deleting %s\n", entry->name);
+ brcmf_fws_macdesc_cleanup(fws, &fws->desc.iface[ifp->ifidx],
+ ifp->ifidx);
brcmf_fws_macdesc_deinit(entry);
brcmf_fws_cleanup(fws, ifp->ifidx);
brcmf_fws_unlock(fws);
@@ -2437,17 +2443,25 @@ struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr)
return fws;
fail:
- brcmf_fws_detach(fws);
+ brcmf_fws_detach_pre_delif(fws);
+ brcmf_fws_detach_post_delif(fws);
return ERR_PTR(rc);
}
-void brcmf_fws_detach(struct brcmf_fws_info *fws)
+void brcmf_fws_detach_pre_delif(struct brcmf_fws_info *fws)
{
if (!fws)
return;
-
- if (fws->fws_wq)
+ if (fws->fws_wq) {
destroy_workqueue(fws->fws_wq);
+ fws->fws_wq = NULL;
+ }
+}
+
+void brcmf_fws_detach_post_delif(struct brcmf_fws_info *fws)
+{
+ if (!fws)
+ return;
/* cleanup */
brcmf_fws_lock(fws);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
index 4e6835766d5d..749c06dcdc17 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
@@ -19,7 +19,8 @@
#define FWSIGNAL_H_
struct brcmf_fws_info *brcmf_fws_attach(struct brcmf_pub *drvr);
-void brcmf_fws_detach(struct brcmf_fws_info *fws);
+void brcmf_fws_detach_pre_delif(struct brcmf_fws_info *fws);
+void brcmf_fws_detach_post_delif(struct brcmf_fws_info *fws);
void brcmf_fws_debugfs_create(struct brcmf_pub *drvr);
bool brcmf_fws_queue_skbs(struct brcmf_fws_info *fws);
bool brcmf_fws_fc_active(struct brcmf_fws_info *fws);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index d3780eae7f19..9d1f9ff25bfa 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -375,7 +375,7 @@ brcmf_msgbuf_get_pktid(struct device *dev, struct brcmf_msgbuf_pktids *pktids,
struct brcmf_msgbuf_pktid *pktid;
struct sk_buff *skb;
- if (idx >= pktids->array_size) {
+ if (idx < 0 || idx >= pktids->array_size) {
brcmf_err("Invalid packet id %d (max %d)\n", idx,
pktids->array_size);
return NULL;
@@ -747,7 +747,7 @@ static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid)
tx_msghdr = (struct msgbuf_tx_msghdr *)ret_ptr;
tx_msghdr->msg.msgtype = MSGBUF_TYPE_TX_POST;
- tx_msghdr->msg.request_id = cpu_to_le32(pktid);
+ tx_msghdr->msg.request_id = cpu_to_le32(pktid + 1);
tx_msghdr->msg.ifidx = brcmf_flowring_ifidx_get(flow, flowid);
tx_msghdr->flags = BRCMF_MSGBUF_PKT_FLAGS_FRAME_802_3;
tx_msghdr->flags |= (skb->priority & 0x07) <<
@@ -884,7 +884,7 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
u16 flowid;
tx_status = (struct msgbuf_tx_status *)buf;
- idx = le32_to_cpu(tx_status->msg.request_id);
+ idx = le32_to_cpu(tx_status->msg.request_id) - 1;
flowid = le16_to_cpu(tx_status->compl_hdr.flow_ring_id);
flowid -= BRCMF_H2D_MSGRING_FLOWRING_IDSTART;
skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index 58a6bc379358..83e4938527f4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -345,6 +345,10 @@ static const u32 brcmf_ring_itemsize[BRCMF_NROF_COMMON_MSGRINGS] = {
BRCMF_D2H_MSGRING_RX_COMPLETE_ITEMSIZE
};
+static void brcmf_pcie_setup(struct device *dev, int ret,
+ struct brcmf_fw_request *fwreq);
+static struct brcmf_fw_request *
+brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo);
static u32
brcmf_pcie_read_reg32(struct brcmf_pciedev_info *devinfo, u32 reg_offset)
@@ -671,6 +675,7 @@ static int
brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
{
struct brcmf_pcie_shared_info *shared;
+ struct brcmf_core *core;
u32 addr;
u32 cur_htod_mb_data;
u32 i;
@@ -694,7 +699,11 @@ brcmf_pcie_send_mb_data(struct brcmf_pciedev_info *devinfo, u32 htod_mb_data)
brcmf_pcie_write_tcm32(devinfo, addr, htod_mb_data);
pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
- pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
+
+ /* Send mailbox interrupt twice as a hardware workaround */
+ core = brcmf_chip_get_core(devinfo->ci, BCMA_CORE_PCIE2);
+ if (core->rev <= 13)
+ pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_SBMBX, 1);
return 0;
}
@@ -730,7 +739,7 @@ static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo)
}
if (dtoh_mb_data & BRCMF_D2H_DEV_FWHALT) {
brcmf_dbg(PCIE, "D2H_MB_DATA: FW HALT\n");
- brcmf_dev_coredump(&devinfo->pdev->dev);
+ brcmf_fw_crashed(&devinfo->pdev->dev);
}
}
@@ -755,15 +764,22 @@ static void brcmf_pcie_bus_console_init(struct brcmf_pciedev_info *devinfo)
console->base_addr, console->buf_addr, console->bufsize);
}
-
-static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
+/**
+ * brcmf_pcie_bus_console_read - reads firmware messages
+ *
+ * @error: specifies if error has occurred (prints messages unconditionally)
+ */
+static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo,
+ bool error)
{
+ struct pci_dev *pdev = devinfo->pdev;
+ struct brcmf_bus *bus = dev_get_drvdata(&pdev->dev);
struct brcmf_pcie_console *console;
u32 addr;
u8 ch;
u32 newidx;
- if (!BRCMF_FWCON_ON())
+ if (!error && !BRCMF_FWCON_ON())
return;
console = &devinfo->shared.console;
@@ -787,7 +803,10 @@ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo)
}
if (ch == '\n') {
console->log_str[console->log_idx] = 0;
- pr_debug("CONSOLE: %s", console->log_str);
+ if (error)
+ brcmf_err(bus, "CONSOLE: %s", console->log_str);
+ else
+ pr_debug("CONSOLE: %s", console->log_str);
console->log_idx = 0;
}
}
@@ -848,7 +867,7 @@ static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg)
&devinfo->pdev->dev);
}
}
- brcmf_pcie_bus_console_read(devinfo);
+ brcmf_pcie_bus_console_read(devinfo, false);
if (devinfo->state == BRCMFMAC_PCIE_STATE_UP)
brcmf_pcie_intr_enable(devinfo);
devinfo->in_irq = false;
@@ -1409,6 +1428,38 @@ int brcmf_pcie_get_fwname(struct device *dev, const char *ext, u8 *fw_name)
return 0;
}
+static int brcmf_pcie_reset(struct device *dev)
+{
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pciedev *buspub = bus_if->bus_priv.pcie;
+ struct brcmf_pciedev_info *devinfo = buspub->devinfo;
+ struct brcmf_fw_request *fwreq;
+ int err;
+
+ brcmf_pcie_bus_console_read(devinfo, true);
+
+ brcmf_detach(dev);
+
+ brcmf_pcie_release_irq(devinfo);
+ brcmf_pcie_release_scratchbuffers(devinfo);
+ brcmf_pcie_release_ringbuffers(devinfo);
+ brcmf_pcie_reset_device(devinfo);
+
+ fwreq = brcmf_pcie_prepare_fw_request(devinfo);
+ if (!fwreq) {
+ dev_err(dev, "Failed to prepare FW request\n");
+ return -ENOMEM;
+ }
+
+ err = brcmf_fw_get_firmwares(dev, fwreq, brcmf_pcie_setup);
+ if (err) {
+ dev_err(dev, "Failed to prepare FW request\n");
+ kfree(fwreq);
+ }
+
+ return err;
+}
+
static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
.txdata = brcmf_pcie_tx,
.stop = brcmf_pcie_down,
@@ -1418,6 +1469,7 @@ static const struct brcmf_bus_ops brcmf_pcie_bus_ops = {
.get_ramsize = brcmf_pcie_get_ramsize,
.get_memdump = brcmf_pcie_get_memdump,
.get_fwname = brcmf_pcie_get_fwname,
+ .reset = brcmf_pcie_reset,
};
@@ -1778,7 +1830,7 @@ static void brcmf_pcie_setup(struct device *dev, int ret,
if (brcmf_attach(&devinfo->pdev->dev, devinfo->settings) == 0)
return;
- brcmf_pcie_bus_console_read(devinfo);
+ brcmf_pcie_bus_console_read(devinfo, false);
fail:
device_release_driver(dev);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c
index 024c643052bc..c7964ccdda69 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.c
@@ -67,16 +67,22 @@ fail:
return -ENOMEM;
}
-void brcmf_proto_detach(struct brcmf_pub *drvr)
+void brcmf_proto_detach_post_delif(struct brcmf_pub *drvr)
{
brcmf_dbg(TRACE, "Enter\n");
if (drvr->proto) {
if (drvr->bus_if->proto_type == BRCMF_PROTO_BCDC)
- brcmf_proto_bcdc_detach(drvr);
+ brcmf_proto_bcdc_detach_post_delif(drvr);
else if (drvr->bus_if->proto_type == BRCMF_PROTO_MSGBUF)
brcmf_proto_msgbuf_detach(drvr);
kfree(drvr->proto);
drvr->proto = NULL;
}
}
+
+void brcmf_proto_detach_pre_delif(struct brcmf_pub *drvr)
+{
+ if (drvr->proto && drvr->bus_if->proto_type == BRCMF_PROTO_BCDC)
+ brcmf_proto_bcdc_detach_pre_delif(drvr);
+}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
index d3c3b9a815ad..72355aea9028 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
@@ -54,7 +54,8 @@ struct brcmf_proto {
int brcmf_proto_attach(struct brcmf_pub *drvr);
-void brcmf_proto_detach(struct brcmf_pub *drvr);
+void brcmf_proto_detach_pre_delif(struct brcmf_pub *drvr);
+void brcmf_proto_detach_post_delif(struct brcmf_pub *drvr);
static inline int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws,
struct sk_buff *skb,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 4d104ab80fd8..22b73da42822 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -622,6 +622,7 @@ BRCMF_FW_DEF(43430A0, "brcmfmac43430a0-sdio");
/* Note the names are not postfixed with a1 for backward compatibility */
BRCMF_FW_DEF(43430A1, "brcmfmac43430-sdio");
BRCMF_FW_DEF(43455, "brcmfmac43455-sdio");
+BRCMF_FW_DEF(43456, "brcmfmac43456-sdio");
BRCMF_FW_DEF(4354, "brcmfmac4354-sdio");
BRCMF_FW_DEF(4356, "brcmfmac4356-sdio");
BRCMF_FW_DEF(4373, "brcmfmac4373-sdio");
@@ -642,7 +643,8 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, 4339),
BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0x00000001, 43430A0),
BRCMF_FW_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFE, 43430A1),
- BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, 43455),
+ BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0x00000200, 43456),
+ BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFDC0, 43455),
BRCMF_FW_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354),
BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373),
@@ -1090,8 +1092,8 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
/* dongle indicates the firmware has halted/crashed */
if (hmb_data & HMB_DATA_FWHALT) {
- brcmf_err("mailbox indicates firmware halted\n");
- brcmf_dev_coredump(&sdiod->func1->dev);
+ brcmf_dbg(SDIO, "mailbox indicates firmware halted\n");
+ brcmf_fw_crashed(&sdiod->func1->dev);
}
/* Dongle recomposed rx frames, accept them again */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index e9cbfd077710..75fcd6752edc 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -160,7 +160,7 @@ struct brcmf_usbdev_info {
struct usb_device *usbdev;
struct device *dev;
- struct mutex dev_init_lock;
+ struct completion dev_init_done;
int ctl_in_pipe, ctl_out_pipe;
struct urb *ctl_urb; /* URB for control endpoint */
@@ -445,22 +445,17 @@ fail:
}
-static void brcmf_usb_free_q(struct list_head *q, bool pending)
+static void brcmf_usb_free_q(struct list_head *q)
{
struct brcmf_usbreq *req, *next;
- int i = 0;
+
list_for_each_entry_safe(req, next, q, list) {
if (!req->urb) {
brcmf_err("bad req\n");
break;
}
- i++;
- if (pending) {
- usb_kill_urb(req->urb);
- } else {
- usb_free_urb(req->urb);
- list_del_init(&req->list);
- }
+ usb_free_urb(req->urb);
+ list_del_init(&req->list);
}
}
@@ -682,12 +677,18 @@ static int brcmf_usb_up(struct device *dev)
static void brcmf_cancel_all_urbs(struct brcmf_usbdev_info *devinfo)
{
+ int i;
+
if (devinfo->ctl_urb)
usb_kill_urb(devinfo->ctl_urb);
if (devinfo->bulk_urb)
usb_kill_urb(devinfo->bulk_urb);
- brcmf_usb_free_q(&devinfo->tx_postq, true);
- brcmf_usb_free_q(&devinfo->rx_postq, true);
+ if (devinfo->tx_reqs)
+ for (i = 0; i < devinfo->bus_pub.ntxq; i++)
+ usb_kill_urb(devinfo->tx_reqs[i].urb);
+ if (devinfo->rx_reqs)
+ for (i = 0; i < devinfo->bus_pub.nrxq; i++)
+ usb_kill_urb(devinfo->rx_reqs[i].urb);
}
static void brcmf_usb_down(struct device *dev)
@@ -1023,8 +1024,8 @@ static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo)
brcmf_dbg(USB, "Enter, devinfo %p\n", devinfo);
/* free the URBS */
- brcmf_usb_free_q(&devinfo->rx_freeq, false);
- brcmf_usb_free_q(&devinfo->tx_freeq, false);
+ brcmf_usb_free_q(&devinfo->rx_freeq);
+ brcmf_usb_free_q(&devinfo->tx_freeq);
usb_free_urb(devinfo->ctl_urb);
usb_free_urb(devinfo->bulk_urb);
@@ -1193,11 +1194,11 @@ static void brcmf_usb_probe_phase2(struct device *dev, int ret,
if (ret)
goto error;
- mutex_unlock(&devinfo->dev_init_lock);
+ complete(&devinfo->dev_init_done);
return;
error:
brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret);
- mutex_unlock(&devinfo->dev_init_lock);
+ complete(&devinfo->dev_init_done);
device_release_driver(dev);
}
@@ -1265,7 +1266,7 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
if (ret)
goto fail;
/* we are done */
- mutex_unlock(&devinfo->dev_init_lock);
+ complete(&devinfo->dev_init_done);
return 0;
}
bus->chip = bus_pub->devid;
@@ -1325,11 +1326,10 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
devinfo->usbdev = usb;
devinfo->dev = &usb->dev;
- /* Take an init lock, to protect for disconnect while still loading.
+ /* Init completion, to protect for disconnect while still loading.
* Necessary because of the asynchronous firmware load construction
*/
- mutex_init(&devinfo->dev_init_lock);
- mutex_lock(&devinfo->dev_init_lock);
+ init_completion(&devinfo->dev_init_done);
usb_set_intfdata(intf, devinfo);
@@ -1407,7 +1407,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
return 0;
fail:
- mutex_unlock(&devinfo->dev_init_lock);
+ complete(&devinfo->dev_init_done);
kfree(devinfo);
usb_set_intfdata(intf, NULL);
return ret;
@@ -1422,7 +1422,7 @@ brcmf_usb_disconnect(struct usb_interface *intf)
devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf);
if (devinfo) {
- mutex_lock(&devinfo->dev_init_lock);
+ wait_for_completion(&devinfo->dev_init_done);
/* Make sure that devinfo still exists. Firmware probe routines
* may have released the device and cleared the intfdata.
*/
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
index 8eff2753abad..d493021f6031 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
@@ -35,9 +35,10 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
struct brcmf_if *ifp;
const struct brcmf_vndr_dcmd_hdr *cmdhdr = data;
struct sk_buff *reply;
- int ret, payload, ret_len;
+ unsigned int payload, ret_len;
void *dcmd_buf = NULL, *wr_pointer;
u16 msglen, maxmsglen = PAGE_SIZE - 0x100;
+ int ret;
if (len < sizeof(*cmdhdr)) {
brcmf_err("vendor command too short: %d\n", len);
@@ -65,7 +66,7 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
brcmf_err("oversize return buffer %d\n", ret_len);
ret_len = BRCMF_DCMD_MAXLEN;
}
- payload = max(ret_len, len) + 1;
+ payload = max_t(unsigned int, ret_len, len) + 1;
dcmd_buf = vzalloc(payload);
if (NULL == dcmd_buf)
return -ENOMEM;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-debug.c b/drivers/net/wireless/intel/iwlegacy/3945-debug.c
index a2960032be81..4b912e707f38 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-debug.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-debug.c
@@ -185,7 +185,7 @@ il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
pos +=
scnprintf(buf + pos, bufsz - pos,
"%-32s current"
- "acumulative delta max\n",
+ "accumulative delta max\n",
"Statistics_Rx - CCK:");
pos +=
scnprintf(buf + pos, bufsz - pos,
@@ -273,7 +273,7 @@ il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
pos +=
scnprintf(buf + pos, bufsz - pos,
"%-32s current"
- "acumulative delta max\n",
+ "accumulative delta max\n",
"Statistics_Rx - GENERAL:");
pos +=
scnprintf(buf + pos, bufsz - pos,
@@ -346,7 +346,7 @@ il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
pos +=
scnprintf(buf + pos, bufsz - pos,
"%-32s current"
- "acumulative delta max\n",
+ "accumulative delta max\n",
"Statistics_Tx:");
pos +=
scnprintf(buf + pos, bufsz - pos,
@@ -447,7 +447,7 @@ il3945_ucode_general_stats_read(struct file *file, char __user *user_buf,
pos +=
scnprintf(buf + pos, bufsz - pos,
"%-32s current"
- "acumulative delta max\n",
+ "accumulative delta max\n",
"Statistics_General:");
pos +=
scnprintf(buf + pos, bufsz - pos,
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c
index ce4144a89217..a20b6c885047 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965.c
@@ -577,7 +577,6 @@ il4965_math_div_round(s32 num, s32 denom, s32 * res)
sign = -sign;
denom = -denom;
}
- *res = 1;
*res = ((num * 2 + denom) / (denom * 2)) * sign;
return 1;
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index 0a87d87fbb4f..a9c846c59289 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -56,7 +56,7 @@
#include "iwl-config.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 46
+#define IWL_22000_UCODE_API_MAX 48
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@@ -80,7 +80,6 @@
#define IWL_22000_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
#define IWL_22000_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-"
#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
-#define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-"
#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
#define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0-"
#define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-"
@@ -89,6 +88,7 @@
#define IWL_22000_SO_A_HR_B_FW_PRE "iwlwifi-so-a0-hr-b0-"
#define IWL_22000_SO_A_GF_A_FW_PRE "iwlwifi-so-a0-gf-a0-"
#define IWL_22000_TY_A_GF_A_FW_PRE "iwlwifi-ty-a0-gf-a0-"
+#define IWL_22000_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-"
#define IWL_22000_HR_MODULE_FIRMWARE(api) \
IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
@@ -104,8 +104,6 @@
IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
-#define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
- IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
#define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \
IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
@@ -180,7 +178,11 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.dbgc_supported = true, \
.min_umac_error_event_table = 0x400000, \
.d3_debug_data_base_addr = 0x401000, \
- .d3_debug_data_length = 60 * 1024
+ .d3_debug_data_length = 60 * 1024, \
+ .fw_mon_smem_write_ptr_addr = 0xa0c16c, \
+ .fw_mon_smem_write_ptr_msk = 0xfffff, \
+ .fw_mon_smem_cycle_cnt_ptr_addr = 0xa0c174, \
+ .fw_mon_smem_cycle_cnt_ptr_msk = 0xfffff
#define IWL_DEVICE_AX200_COMMON \
IWL_DEVICE_22000_COMMON, \
@@ -190,7 +192,8 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
IWL_DEVICE_22000_COMMON, \
.device_family = IWL_DEVICE_FAMILY_22000, \
.base_params = &iwl_22000_base_params, \
- .csr = &iwl_csr_v1
+ .csr = &iwl_csr_v1, \
+ .gp2_reg_addr = 0xa02c68
#define IWL_DEVICE_22560 \
IWL_DEVICE_22000_COMMON, \
@@ -203,7 +206,9 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.device_family = IWL_DEVICE_FAMILY_AX210, \
.base_params = &iwl_22560_base_params, \
.csr = &iwl_csr_v1, \
- .min_txq_size = 128
+ .min_txq_size = 128, \
+ .gp2_reg_addr = 0xd02c68, \
+ .min_256_ba_txq_size = 512
const struct iwl_cfg iwl22000_2ac_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AC 22000",
@@ -412,19 +417,6 @@ const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
-const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = {
- .name = "Intel(R) Dual Band Wireless AX 22560",
- .fw_name_pre = IWL_22000_SU_Z0_FW_PRE,
- IWL_DEVICE_22560,
- .cdb = true,
- /*
- * This device doesn't support receiving BlockAck with a large bitmap
- * so we need to restrict the size of transmitted aggregation to the
- * HT size; mac80211 would otherwise pick the HE max (256) by default.
- */
- .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
-};
-
const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0 = {
.name = "Intel(R) Wireless-AC 9560 160MHz",
.fw_name_pre = IWL_22000_SO_A_JF_B_FW_PRE,
@@ -440,12 +432,20 @@ const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = {
const struct iwl_cfg iwlax210_2ax_cfg_so_gf_a0 = {
.name = "Intel(R) Wi-Fi 7 AX211 160MHz",
.fw_name_pre = IWL_22000_SO_A_GF_A_FW_PRE,
+ .uhb_supported = true,
IWL_DEVICE_AX210,
};
const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
.name = "Intel(R) Wi-Fi 7 AX210 160MHz",
.fw_name_pre = IWL_22000_TY_A_GF_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+};
+
+const struct iwl_cfg iwlax210_2ax_cfg_so_gf4_a0 = {
+ .name = "Intel(R) Wi-Fi 7 AX210 160MHz",
+ .fw_name_pre = IWL_22000_SO_A_GF4_A_FW_PRE,
IWL_DEVICE_AX210,
};
@@ -455,7 +455,6 @@ MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
-MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index 3225b64eb845..41bdd0eaf62c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -148,7 +148,11 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.d3_debug_data_length = 92 * 1024, \
.ht_params = &iwl9000_ht_params, \
.nvm_ver = IWL9000_NVM_VERSION, \
- .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
+ .fw_mon_smem_write_ptr_addr = 0xa0476c, \
+ .fw_mon_smem_write_ptr_msk = 0xfffff, \
+ .fw_mon_smem_cycle_cnt_ptr_addr = 0xa04774, \
+ .fw_mon_smem_cycle_cnt_ptr_msk = 0xfffff
const struct iwl_cfg iwl9160_2ac_cfg = {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index 86ea0784e1a3..31231b223aae 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -214,7 +214,7 @@ struct iwl_proto_offload_cmd_v3_large {
#define IWL_WOWLAN_MIN_PATTERN_LEN 16
#define IWL_WOWLAN_MAX_PATTERN_LEN 128
-struct iwl_wowlan_pattern {
+struct iwl_wowlan_pattern_v1 {
u8 mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
u8 pattern[IWL_WOWLAN_MAX_PATTERN_LEN];
u8 mask_size;
@@ -227,7 +227,7 @@ struct iwl_wowlan_pattern {
/**
* struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns
*/
-struct iwl_wowlan_patterns_cmd {
+struct iwl_wowlan_patterns_cmd_v1 {
/**
* @n_patterns: number of patterns
*/
@@ -236,9 +236,129 @@ struct iwl_wowlan_patterns_cmd {
/**
* @patterns: the patterns, array length in @n_patterns
*/
- struct iwl_wowlan_pattern patterns[];
+ struct iwl_wowlan_pattern_v1 patterns[];
} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */
+#define IPV4_ADDR_SIZE 4
+#define IPV6_ADDR_SIZE 16
+
+enum iwl_wowlan_pattern_type {
+ WOWLAN_PATTERN_TYPE_BITMASK,
+ WOWLAN_PATTERN_TYPE_IPV4_TCP_SYN,
+ WOWLAN_PATTERN_TYPE_IPV6_TCP_SYN,
+ WOWLAN_PATTERN_TYPE_IPV4_TCP_SYN_WILDCARD,
+ WOWLAN_PATTERN_TYPE_IPV6_TCP_SYN_WILDCARD,
+}; /* WOWLAN_PATTERN_TYPE_API_E_VER_1 */
+
+/**
+ * struct iwl_wowlan_ipv4_tcp_syn - WoWLAN IPv4 TCP SYN pattern data
+ */
+struct iwl_wowlan_ipv4_tcp_syn {
+ /**
+ * @src_addr: source IP address to match
+ */
+ u8 src_addr[IPV4_ADDR_SIZE];
+
+ /**
+ * @dst_addr: destination IP address to match
+ */
+ u8 dst_addr[IPV4_ADDR_SIZE];
+
+ /**
+ * @src_port: source TCP port to match
+ */
+ __le16 src_port;
+
+ /**
+ * @dst_port: destination TCP port to match
+ */
+ __le16 dst_port;
+} __packed; /* WOWLAN_IPV4_TCP_SYN_API_S_VER_1 */
+
+/**
+ * struct iwl_wowlan_ipv6_tcp_syn - WoWLAN Ipv6 TCP SYN pattern data
+ */
+struct iwl_wowlan_ipv6_tcp_syn {
+ /**
+ * @src_addr: source IP address to match
+ */
+ u8 src_addr[IPV6_ADDR_SIZE];
+
+ /**
+ * @dst_addr: destination IP address to match
+ */
+ u8 dst_addr[IPV6_ADDR_SIZE];
+
+ /**
+ * @src_port: source TCP port to match
+ */
+ __le16 src_port;
+
+ /**
+ * @dst_port: destination TCP port to match
+ */
+ __le16 dst_port;
+} __packed; /* WOWLAN_IPV6_TCP_SYN_API_S_VER_1 */
+
+/**
+ * union iwl_wowlan_pattern_data - Data for the different pattern types
+ *
+ * If wildcard addresses/ports are to be used, the union can be left
+ * undefined.
+ */
+union iwl_wowlan_pattern_data {
+ /**
+ * @bitmask: bitmask pattern data
+ */
+ struct iwl_wowlan_pattern_v1 bitmask;
+
+ /**
+ * @ipv4_tcp_syn: IPv4 TCP SYN pattern data
+ */
+ struct iwl_wowlan_ipv4_tcp_syn ipv4_tcp_syn;
+
+ /**
+ * @ipv6_tcp_syn: IPv6 TCP SYN pattern data
+ */
+ struct iwl_wowlan_ipv6_tcp_syn ipv6_tcp_syn;
+}; /* WOWLAN_PATTERN_API_U_VER_1 */
+
+/**
+ * struct iwl_wowlan_pattern_v2 - Pattern entry for the WoWLAN wakeup patterns
+ */
+struct iwl_wowlan_pattern_v2 {
+ /**
+ * @pattern_type: defines the struct type to be used in the union
+ */
+ u8 pattern_type;
+
+ /**
+ * @reserved: reserved for alignment
+ */
+ u8 reserved[3];
+
+ /**
+ * @u: the union containing the match data, or undefined for
+ * wildcard matches
+ */
+ union iwl_wowlan_pattern_data u;
+} __packed; /* WOWLAN_PATTERN_API_S_VER_2 */
+
+/**
+ * struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns command
+ */
+struct iwl_wowlan_patterns_cmd {
+ /**
+ * @n_patterns: number of patterns
+ */
+ __le32 n_patterns;
+
+ /**
+ * @patterns: the patterns, array length in @n_patterns
+ */
+ struct iwl_wowlan_pattern_v2 patterns[];
+} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_2 */
+
enum iwl_wowlan_wakeup_filters {
IWL_WOWLAN_WAKEUP_MAGIC_PACKET = BIT(0),
IWL_WOWLAN_WAKEUP_PATTERN_MATCH = BIT(1),
@@ -383,7 +503,11 @@ enum iwl_wowlan_wakeup_reason {
IWL_WOWLAN_WAKEUP_BY_D3_WAKEUP_HOST_TIMER = BIT(14),
IWL_WOWLAN_WAKEUP_BY_RXFRAME_FILTERED_IN = BIT(15),
IWL_WOWLAN_WAKEUP_BY_BEACON_FILTERED_IN = BIT(16),
-
+ IWL_WAKEUP_BY_11W_UNPROTECTED_DEAUTH_OR_DISASSOC = BIT(17),
+ IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN = BIT(18),
+ IWL_WAKEUP_BY_PATTERN_IPV4_TCP_SYN_WILDCARD = BIT(19),
+ IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN = BIT(20),
+ IWL_WAKEUP_BY_PATTERN_IPV6_TCP_SYN_WILDCARD = BIT(21),
}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
struct iwl_wowlan_gtk_status_v1 {
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index 33858787817b..f4202bc231a6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -60,12 +60,13 @@
#include <linux/bitops.h>
-/*
+/**
* struct iwl_fw_ini_header: Common Header for all debug group TLV's structures
+ *
* @tlv_version: version info
* @apply_point: &enum iwl_fw_ini_apply_point
* @data: TLV data followed
- **/
+ */
struct iwl_fw_ini_header {
__le32 tlv_version;
__le32 apply_point;
@@ -73,7 +74,7 @@ struct iwl_fw_ini_header {
} __packed; /* FW_DEBUG_TLV_HEADER_S */
/**
- * struct iwl_fw_ini_allocation_tlv - (IWL_FW_INI_TLV_TYPE_BUFFER_ALLOCATION)
+ * struct iwl_fw_ini_allocation_tlv - (IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION)
* buffer allocation TLV - for debug
*
* @iwl_fw_ini_header: header
@@ -84,7 +85,7 @@ struct iwl_fw_ini_header {
* @max_fragments: the maximum allowed fragmentation in the desired memory
* allocation above
* @min_frag_size: the minimum allowed fragmentation size in bytes
-*/
+ */
struct iwl_fw_ini_allocation_tlv {
struct iwl_fw_ini_header header;
__le32 allocation_id;
@@ -95,33 +96,52 @@ struct iwl_fw_ini_allocation_tlv {
} __packed; /* FW_DEBUG_TLV_BUFFER_ALLOCATION_TLV_S_VER_1 */
/**
- * struct iwl_fw_ini_hcmd (IWL_FW_INI_TLV_TYPE_HCMD)
- * Generic Host command pass through TLV
+ * enum iwl_fw_ini_dbg_domain - debug domains
+ * allows to send host cmd or collect memory region if a given domain is enabled
+ *
+ * @IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON: the default domain, always on
+ * @IWL_FW_INI_DBG_DOMAIN_REPORT_PS: power save domain
+ */
+enum iwl_fw_ini_dbg_domain {
+ IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON = 0,
+ IWL_FW_INI_DBG_DOMAIN_REPORT_PS,
+}; /* FW_DEBUG_TLV_DOMAIN_API_E_VER_1 */
+
+/**
+ * struct iwl_fw_ini_hcmd
*
* @id: the debug configuration command type for instance: 0xf6 / 0xf5 / DHC
* @group: the desired cmd group
- * @padding: all zeros for dword alignment
- * @data: all of the relevant command (0xf6/0xf5) to be sent
-*/
+ * @reserved: to align to FW struct
+ * @data: all of the relevant command data to be sent
+ */
struct iwl_fw_ini_hcmd {
u8 id;
u8 group;
- __le16 padding;
+ __le16 reserved;
u8 data[0];
-} __packed; /* FW_DEBUG_TLV_HCMD_DATA_S */
+} __packed; /* FW_DEBUG_TLV_HCMD_DATA_API_S_VER_1 */
/**
- * struct iwl_fw_ini_hcmd_tlv
+ * struct iwl_fw_ini_hcmd_tlv - (IWL_UCODE_TLV_TYPE_HCMD)
+ * Generic Host command pass through TLV
+ *
* @header: header
+ * @domain: send command only if the specific domain is enabled
+ * &enum iwl_fw_ini_dbg_domain
+ * @period_msec: period in which the hcmd will be sent to FW. Measured in msec
+ * (0 = one time command).
* @hcmd: a variable length host-command to be sent to apply the configuration.
*/
struct iwl_fw_ini_hcmd_tlv {
struct iwl_fw_ini_header header;
+ __le32 domain;
+ __le32 period_msec;
struct iwl_fw_ini_hcmd hcmd;
-} __packed; /* FW_DEBUG_TLV_HCMD_S_VER_1 */
+} __packed; /* FW_DEBUG_TLV_HCMD_API_S_VER_1 */
-/*
- * struct iwl_fw_ini_debug_flow_tlv (IWL_FW_INI_TLV_TYPE_DEBUG_FLOW)
+/**
+ * struct iwl_fw_ini_debug_flow_tlv - (IWL_UCODE_TLV_TYPE_DEBUG_FLOW)
*
* @header: header
* @debug_flow_cfg: &enum iwl_fw_ini_debug_flow
@@ -135,7 +155,19 @@ struct iwl_fw_ini_debug_flow_tlv {
#define IWL_FW_INI_MAX_NAME 32
/**
+ * struct iwl_fw_ini_region_cfg_dhc - defines dhc response to dump.
+ *
+ * @id_and_grp: id and group of dhc response.
+ * @desc: dhc response descriptor.
+ */
+struct iwl_fw_ini_region_cfg_dhc {
+ __le32 id_and_grp;
+ __le32 desc;
+} __packed; /* FW_DEBUG_TLV_REGION_DHC_API_S_VER_1 */
+
+/**
* struct iwl_fw_ini_region_cfg_internal - meta data of internal memory region
+ *
* @num_of_range: the amount of ranges in the region
* @range_data_size: size of the data to read per range, in bytes.
*/
@@ -146,6 +178,7 @@ struct iwl_fw_ini_region_cfg_internal {
/**
* struct iwl_fw_ini_region_cfg_fifos - meta data of fifos region
+ *
* @fid1: fifo id 1 - bitmap of lmac tx/rx fifos to include in the region
* @fid2: fifo id 2 - bitmap of umac rx fifos to include in the region.
* It is unused for tx.
@@ -163,34 +196,43 @@ struct iwl_fw_ini_region_cfg_fifos {
/**
* struct iwl_fw_ini_region_cfg
+ *
* @region_id: ID of this dump configuration
* @region_type: &enum iwl_fw_ini_region_type
- * @num_regions: amount of regions in the address array.
+ * @domain: dump this region only if the specific domain is enabled
+ * &enum iwl_fw_ini_dbg_domain
* @name_len: name length
* @name: file name to use for this region
* @internal: used in case the region uses internal memory.
* @allocation_id: For DRAM type field substitutes for allocation_id
* @fifos: used in case of fifos region.
+ * @dhc_desc: dhc response descriptor.
+ * @notif_id_and_grp: dump this region only if the specific notification
+ * occurred.
* @offset: offset to use for each memory base address
* @start_addr: array of addresses.
*/
struct iwl_fw_ini_region_cfg {
__le32 region_id;
__le32 region_type;
+ __le32 domain;
__le32 name_len;
u8 name[IWL_FW_INI_MAX_NAME];
union {
struct iwl_fw_ini_region_cfg_internal internal;
__le32 allocation_id;
struct iwl_fw_ini_region_cfg_fifos fifos;
- };
+ struct iwl_fw_ini_region_cfg_dhc dhc_desc;
+ __le32 notif_id_and_grp;
+ }; /* FW_DEBUG_TLV_REGION_EXT_INT_PARAMS_API_U_VER_1 */
__le32 offset;
__le32 start_addr[];
-} __packed; /* FW_DEBUG_TLV_REGION_CONFIG_S */
+} __packed; /* FW_DEBUG_TLV_REGION_CONFIG_API_S_VER_1 */
/**
- * struct iwl_fw_ini_region_tlv - (IWL_FW_INI_TLV_TYPE_REGION_CFG)
- * DUMP sections define IDs and triggers that use those IDs TLV
+ * struct iwl_fw_ini_region_tlv - (IWL_UCODE_TLV_TYPE_REGIONS)
+ * defines memory regions to dump
+ *
* @header: header
* @num_regions: how many different region section and IDs are coming next
* @region_config: list of dump configurations
@@ -199,13 +241,12 @@ struct iwl_fw_ini_region_tlv {
struct iwl_fw_ini_header header;
__le32 num_regions;
struct iwl_fw_ini_region_cfg region_config[];
-} __packed; /* FW_DEBUG_TLV_REGIONS_S_VER_1 */
+} __packed; /* FW_DEBUG_TLV_REGIONS_API_S_VER_1 */
/**
- * struct iwl_fw_ini_trigger - (IWL_FW_INI_TLV_TYPE_DUMP_CFG)
- * Region sections define IDs and triggers that use those IDs TLV
+ * struct iwl_fw_ini_trigger
*
- * @trigger_id: enum &iwl_fw_ini_tigger_id
+ * @trigger_id: &enum iwl_fw_ini_trigger_id
* @override_trig: determines how apply trigger in case a trigger with the
* same id is already in use. Using the first 2 bytes:
* Byte 0: if 0, override trigger configuration, otherwise use the
@@ -214,6 +255,7 @@ struct iwl_fw_ini_region_tlv {
* existing trigger.
* @dump_delay: delay from trigger fire to dump, in usec
* @occurrences: max amount of times to be fired
+ * @reserved: to align to FW struct
* @ignore_consec: ignore consecutive triggers, in usec
* @force_restart: force FW restart
* @multi_dut: initiate debug dump data on several DUTs
@@ -226,17 +268,18 @@ struct iwl_fw_ini_trigger {
__le32 override_trig;
__le32 dump_delay;
__le32 occurrences;
+ __le32 reserved;
__le32 ignore_consec;
__le32 force_restart;
__le32 multi_dut;
__le32 trigger_data;
__le32 num_regions;
__le32 data[];
-} __packed; /* FW_TLV_DEBUG_TRIGGER_CONFIG_S */
+} __packed; /* FW_TLV_DEBUG_TRIGGER_CONFIG_API_S_VER_1 */
/**
- * struct iwl_fw_ini_trigger_tlv - (IWL_FW_INI_TLV_TYPE_TRIGGERS_CFG)
- * DUMP sections define IDs and triggers that use those IDs TLV
+ * struct iwl_fw_ini_trigger_tlv - (IWL_UCODE_TLV_TYPE_TRIGGERS)
+ * Triggers that hold memory regions to dump in case a trigger fires
*
* @header: header
* @num_triggers: how many different triggers section and IDs are coming next
@@ -246,16 +289,18 @@ struct iwl_fw_ini_trigger_tlv {
struct iwl_fw_ini_header header;
__le32 num_triggers;
struct iwl_fw_ini_trigger trigger_config[];
-} __packed; /* FW_TLV_DEBUG_TRIGGERS_S_VER_1 */
+} __packed; /* FW_TLV_DEBUG_TRIGGERS_API_S_VER_1 */
/**
* enum iwl_fw_ini_trigger_id
+ *
* @IWL_FW_TRIGGER_ID_FW_ASSERT: FW assert
* @IWL_FW_TRIGGER_ID_FW_HW_ERROR: HW assert
* @IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG: TFD queue hang
* @IWL_FW_TRIGGER_ID_FW_DEBUG_HOST_TRIGGER: FW debug notification
- * @IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFOCATION: FW generic notification
+ * @IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFICATION: FW generic notification
* @IWL_FW_TRIGGER_ID_USER_TRIGGER: User trigger
+ * @IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER: triggers periodically
* @IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY: peer inactivity
* @IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED: TX latency
* threshold was crossed
@@ -299,47 +344,51 @@ enum iwl_fw_ini_trigger_id {
/* FW triggers */
IWL_FW_TRIGGER_ID_FW_DEBUG_HOST_TRIGGER = 4,
- IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFOCATION = 5,
+ IWL_FW_TRIGGER_ID_FW_GENERIC_NOTIFICATION = 5,
/* User trigger */
IWL_FW_TRIGGER_ID_USER_TRIGGER = 6,
+ /* periodic uses the data field for the interval time */
+ IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER = 7,
+
/* Host triggers */
- IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY = 7,
- IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED = 8,
- IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED = 9,
- IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER = 10,
- IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST = 11,
- IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST = 12,
- IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST = 13,
- IWL_FW_TRIGGER_ID_HOST_SCAN_START = 14,
- IWL_FW_TRIGGER_ID_HOST_SCAN_SUBMITTED = 15,
- IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS = 16,
- IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG = 17,
- IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED = 18,
- IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED = 19,
- IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED = 20,
- IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED = 21,
- IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT = 22,
- IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE = 23,
- IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED = 24,
- IWL_FW_TRIGGER_ID_HOST_D3_START = 25,
- IWL_FW_TRIGGER_ID_HOST_D3_END = 26,
- IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS = 27,
- IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS = 28,
- IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES = 29,
- IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED = 30,
- IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED = 31,
- IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE = 32,
- IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT = 33,
- IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE = 34,
- IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE = 35,
+ IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY = 8,
+ IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED = 9,
+ IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED = 10,
+ IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER = 11,
+ IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST = 12,
+ IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST = 13,
+ IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST = 14,
+ IWL_FW_TRIGGER_ID_HOST_SCAN_START = 15,
+ IWL_FW_TRIGGER_ID_HOST_SCAN_SUBMITTED = 16,
+ IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS = 17,
+ IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG = 18,
+ IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED = 19,
+ IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED = 20,
+ IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED = 21,
+ IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED = 22,
+ IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT = 23,
+ IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE = 24,
+ IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED = 25,
+ IWL_FW_TRIGGER_ID_HOST_D3_START = 26,
+ IWL_FW_TRIGGER_ID_HOST_D3_END = 27,
+ IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS = 28,
+ IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS = 29,
+ IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES = 30,
+ IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED = 31,
+ IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED = 32,
+ IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE = 33,
+ IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT = 34,
+ IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE = 35,
+ IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE = 36,
IWL_FW_TRIGGER_ID_NUM,
}; /* FW_DEBUG_TLV_TRIGGER_ID_E_VER_1 */
/**
* enum iwl_fw_ini_apply_point
+ *
* @IWL_FW_INI_APPLY_INVALID: invalid
* @IWL_FW_INI_APPLY_EARLY: pre loading FW
* @IWL_FW_INI_APPLY_AFTER_ALIVE: first cmd from host after alive
@@ -360,6 +409,7 @@ enum iwl_fw_ini_apply_point {
/**
* enum iwl_fw_ini_allocation_id
+ *
* @IWL_FW_INI_ALLOCATION_INVALID: invalid
* @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration
* @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration
@@ -380,18 +430,22 @@ enum iwl_fw_ini_allocation_id {
/**
* enum iwl_fw_ini_buffer_location
+ *
* @IWL_FW_INI_LOCATION_INVALID: invalid
* @IWL_FW_INI_LOCATION_SRAM_PATH: SRAM location
* @IWL_FW_INI_LOCATION_DRAM_PATH: DRAM location
+ * @IWL_FW_INI_LOCATION_NPK_PATH: NPK location
*/
enum iwl_fw_ini_buffer_location {
IWL_FW_INI_LOCATION_INVALID,
IWL_FW_INI_LOCATION_SRAM_PATH,
IWL_FW_INI_LOCATION_DRAM_PATH,
+ IWL_FW_INI_LOCATION_NPK_PATH,
}; /* FW_DEBUG_TLV_BUFFER_LOCATION_E_VER_1 */
/**
* enum iwl_fw_ini_debug_flow
+ *
* @IWL_FW_INI_DEBUG_INVALID: invalid
* @IWL_FW_INI_DEBUG_DBTR_FLOW: undefined
* @IWL_FW_INI_DEBUG_TB2DTF_FLOW: undefined
@@ -404,6 +458,7 @@ enum iwl_fw_ini_debug_flow {
/**
* enum iwl_fw_ini_region_type
+ *
* @IWL_FW_INI_REGION_INVALID: invalid
* @IWL_FW_INI_REGION_DEVICE_MEMORY: device internal memory
* @IWL_FW_INI_REGION_PERIPHERY_MAC: periphery registers of MAC
@@ -416,6 +471,10 @@ enum iwl_fw_ini_debug_flow {
* @IWL_FW_INI_REGION_RXF: RX fifo
* @IWL_FW_INI_REGION_PAGING: paging memory
* @IWL_FW_INI_REGION_CSR: CSR registers
+ * @IWL_FW_INI_REGION_NOTIFICATION: FW notification data
+ * @IWL_FW_INI_REGION_DHC: dhc response to dump
+ * @IWL_FW_INI_REGION_LMAC_ERROR_TABLE: lmac error table
+ * @IWL_FW_INI_REGION_UMAC_ERROR_TABLE: umac error table
* @IWL_FW_INI_REGION_NUM: number of region types
*/
enum iwl_fw_ini_region_type {
@@ -431,6 +490,10 @@ enum iwl_fw_ini_region_type {
IWL_FW_INI_REGION_RXF,
IWL_FW_INI_REGION_PAGING,
IWL_FW_INI_REGION_CSR,
+ IWL_FW_INI_REGION_NOTIFICATION,
+ IWL_FW_INI_REGION_DHC,
+ IWL_FW_INI_REGION_LMAC_ERROR_TABLE,
+ IWL_FW_INI_REGION_UMAC_ERROR_TABLE,
IWL_FW_INI_REGION_NUM
}; /* FW_DEBUG_TLV_REGION_TYPE_E_VER_1 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index 5dddb21c1c4d..8d78b0e671c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -675,6 +675,59 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v3 {
} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_3 */
/**
+ * struct iwl_tof_range_rsp_ap_entry_ntfy_v4 - AP parameters (response)
+ * @bssid: BSSID of the AP
+ * @measure_status: current APs measurement status, one of
+ * &enum iwl_tof_entry_status.
+ * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
+ * @rtt: The Round Trip Time that took for the last measurement for
+ * current AP [pSec]
+ * @rtt_variance: The Variance of the RTT values measured for current AP
+ * @rtt_spread: The Difference between the maximum and the minimum RTT
+ * values measured for current AP in the current session [pSec]
+ * @rssi: RSSI as uploaded in the Channel Estimation notification
+ * @rssi_spread: The Difference between the maximum and the minimum RSSI values
+ * measured for current AP in the current session
+ * @last_burst: 1 if no more FTM sessions are scheduled for this responder
+ * @refusal_period: refusal period in case of
+ * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec]
+ * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
+ * uploaded by the LMAC
+ * @start_tsf: measurement start time in TSF of the mac specified in the range
+ * request
+ * @rx_rate_n_flags: rate and flags of the last FTM frame received from this
+ * responder
+ * @tx_rate_n_flags: rate and flags of the last ack sent to this responder
+ * @t2t3_initiator: as calculated from the algo in the initiator
+ * @t1t4_responder: as calculated from the algo in the responder
+ * @common_calib: Calib val that was used in for this AP measurement
+ * @specific_calib: val that was used in for this AP measurement
+ * @papd_calib_output: The result of the tof papd calibration that was injected
+ * into the algorithm.
+ */
+struct iwl_tof_range_rsp_ap_entry_ntfy_v4 {
+ u8 bssid[ETH_ALEN];
+ u8 measure_status;
+ u8 measure_bw;
+ __le32 rtt;
+ __le32 rtt_variance;
+ __le32 rtt_spread;
+ s8 rssi;
+ u8 rssi_spread;
+ u8 last_burst;
+ u8 refusal_period;
+ __le32 timestamp;
+ __le32 start_tsf;
+ __le32 rx_rate_n_flags;
+ __le32 tx_rate_n_flags;
+ __le32 t2t3_initiator;
+ __le32 t1t4_responder;
+ __le16 common_calib;
+ __le16 specific_calib;
+ __le32 papd_calib_output;
+} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_4 */
+
+/**
* struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
* @bssid: BSSID of the AP
* @measure_status: current APs measurement status, one of
@@ -704,6 +757,8 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v3 {
* @specific_calib: val that was used in for this AP measurement
* @papd_calib_output: The result of the tof papd calibration that was injected
* into the algorithm.
+ * @rttConfidence: a value between 0 - 31 that represents the rtt accuracy.
+ * @reserved: for alignment
*/
struct iwl_tof_range_rsp_ap_entry_ntfy {
u8 bssid[ETH_ALEN];
@@ -725,7 +780,9 @@ struct iwl_tof_range_rsp_ap_entry_ntfy {
__le16 common_calib;
__le16 specific_calib;
__le32 papd_calib_output;
-} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_4 */
+ u8 rttConfidence;
+ u8 reserved[3];
+} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_5 */
/**
* enum iwl_tof_response_status - tof response status
@@ -761,6 +818,22 @@ struct iwl_tof_range_rsp_ntfy_v5 {
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_5 */
/**
+ * struct iwl_tof_range_rsp_ntfy_v6 - ranging response notification
+ * @request_id: A Token ID of the corresponding Range request
+ * @num_of_aps: Number of APs results
+ * @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise.
+ * @reserved: reserved
+ * @ap: per-AP data
+ */
+struct iwl_tof_range_rsp_ntfy_v6 {
+ u8 request_id;
+ u8 num_of_aps;
+ u8 last_report;
+ u8 reserved;
+ struct iwl_tof_range_rsp_ap_entry_ntfy_v4 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_6 */
+
+/**
* struct iwl_tof_range_rsp_ntfy - ranging response notification
* @request_id: A Token ID of the corresponding Range request
* @num_of_aps: Number of APs results
@@ -774,7 +847,7 @@ struct iwl_tof_range_rsp_ntfy {
u8 last_report;
u8 reserved;
struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
-} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_6 */
+} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_7 */
#define IWL_MVM_TOF_MCSI_BUF_SIZE (245)
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index 941c50477003..85c5e367cbf1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -542,6 +542,66 @@ enum iwl_he_htc_flags {
#define IWL_HE_HTC_LINK_ADAP_BOTH (3 << IWL_HE_HTC_LINK_ADAP_POS)
/**
+ * struct iwl_he_sta_context_cmd_v1 - configure FW to work with HE AP
+ * @sta_id: STA id
+ * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
+ * 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
+ * @reserved1: reserved byte for future use
+ * @reserved2: reserved byte for future use
+ * @flags: see %iwl_11ax_sta_ctxt_flags
+ * @ref_bssid_addr: reference BSSID used by the AP
+ * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes
+ * @htc_flags: which features are supported in HTC
+ * @frag_flags: frag support in A-MSDU
+ * @frag_level: frag support level
+ * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2)
+ * @frag_min_size: min frag size (except last frag)
+ * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa
+ * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
+ * @htc_trig_based_pkt_ext: default PE in 4us units
+ * @frame_time_rts_th: HE duration RTS threshold, in units of 32us
+ * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1
+ * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
+ * @reserved3: reserved byte for future use
+ * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
+ */
+struct iwl_he_sta_context_cmd_v1 {
+ u8 sta_id;
+ u8 tid_limit;
+ u8 reserved1;
+ u8 reserved2;
+ __le32 flags;
+
+ /* The below fields are set via Multiple BSSID IE */
+ u8 ref_bssid_addr[6];
+ __le16 reserved0;
+
+ /* The below fields are set via HE-capabilities IE */
+ __le32 htc_flags;
+
+ u8 frag_flags;
+ u8 frag_level;
+ u8 frag_max_num;
+ u8 frag_min_size;
+
+ /* The below fields are set via PPE thresholds element */
+ struct iwl_he_pkt_ext pkt_ext;
+
+ /* The below fields are set via HE-Operation IE */
+ u8 bss_color;
+ u8 htc_trig_based_pkt_ext;
+ __le16 frame_time_rts_th;
+
+ /* Random access parameter set (i.e. RAPS) */
+ u8 rand_alloc_ecwmin;
+ u8 rand_alloc_ecwmax;
+ __le16 reserved3;
+
+ /* The below fields are set via MU EDCA parameter set element */
+ struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
+} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_1 */
+
+/**
* struct iwl_he_sta_context_cmd - configure FW to work with HE AP
* @sta_id: STA id
* @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
@@ -564,6 +624,14 @@ enum iwl_he_htc_flags {
* @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
* @reserved3: reserved byte for future use
* @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
+ * @max_bssid_indicator: indicator of the max bssid supported on the associated
+ * bss
+ * @bssid_index: index of the associated VAP
+ * @ema_ap: AP supports enhanced Multi BSSID advertisement
+ * @profile_periodicity: number of Beacon periods that are needed to receive the
+ * complete VAPs info
+ * @bssid_count: actual number of VAPs in the MultiBSS Set
+ * @reserved4: alignment
*/
struct iwl_he_sta_context_cmd {
u8 sta_id;
@@ -599,7 +667,14 @@ struct iwl_he_sta_context_cmd {
/* The below fields are set via MU EDCA parameter set element */
struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
-} __packed; /* STA_CONTEXT_DOT11AX_API_S */
+
+ u8 max_bssid_indicator;
+ u8 bssid_index;
+ u8 ema_ap;
+ u8 profile_periodicity;
+ u8 bssid_count;
+ u8 reserved4[3];
+} __packed; /* STA_CONTEXT_DOT11AX_API_S_VER_2 */
/**
* struct iwl_he_monitor_cmd - configure air sniffer for HE
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 93b392f0c6a4..97b49843e318 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright(C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright(C) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -233,7 +233,8 @@ struct iwl_nvm_get_info_phy {
__le32 rx_chains;
} __packed; /* REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */
-#define IWL_NUM_CHANNELS (51)
+#define IWL_NUM_CHANNELS_V1 51
+#define IWL_NUM_CHANNELS 110
/**
* struct iwl_nvm_get_info_regulatory - regulatory information
@@ -241,13 +242,39 @@ struct iwl_nvm_get_info_phy {
* @channel_profile: regulatory data of this channel
* @reserved: reserved
*/
-struct iwl_nvm_get_info_regulatory {
+struct iwl_nvm_get_info_regulatory_v1 {
__le32 lar_enabled;
- __le16 channel_profile[IWL_NUM_CHANNELS];
+ __le16 channel_profile[IWL_NUM_CHANNELS_V1];
__le16 reserved;
} __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */
/**
+ * struct iwl_nvm_get_info_regulatory - regulatory information
+ * @lar_enabled: is LAR enabled
+ * @n_channels: number of valid channels in the array
+ * @channel_profile: regulatory data of this channel
+ */
+struct iwl_nvm_get_info_regulatory {
+ __le32 lar_enabled;
+ __le32 n_channels;
+ __le32 channel_profile[IWL_NUM_CHANNELS];
+} __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_2 */
+
+/**
+ * struct iwl_nvm_get_info_rsp_v3 - response to get NVM data
+ * @general: general NVM data
+ * @mac_sku: data relating to MAC sku
+ * @phy_sku: data relating to PHY sku
+ * @regulatory: regulatory data
+ */
+struct iwl_nvm_get_info_rsp_v3 {
+ struct iwl_nvm_get_info_general general;
+ struct iwl_nvm_get_info_sku mac_sku;
+ struct iwl_nvm_get_info_phy phy_sku;
+ struct iwl_nvm_get_info_regulatory_v1 regulatory;
+} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_3 */
+
+/**
* struct iwl_nvm_get_info_rsp - response to get NVM data
* @general: general NVM data
* @mac_sku: data relating to MAC sku
@@ -259,7 +286,7 @@ struct iwl_nvm_get_info_rsp {
struct iwl_nvm_get_info_sku mac_sku;
struct iwl_nvm_get_info_phy phy_sku;
struct iwl_nvm_get_info_regulatory regulatory;
-} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_3 */
+} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_4 */
/**
* struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 6e8224ce8906..d55312ef58c9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -688,13 +688,6 @@ struct iwl_rx_mpdu_desc {
#define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1)
-#define IWL_CD_STTS_OPTIMIZED_POS 0
-#define IWL_CD_STTS_OPTIMIZED_MSK 0x01
-#define IWL_CD_STTS_TRANSFER_STATUS_POS 1
-#define IWL_CD_STTS_TRANSFER_STATUS_MSK 0x0E
-#define IWL_CD_STTS_WIFI_STATUS_POS 4
-#define IWL_CD_STTS_WIFI_STATUS_MSK 0xF0
-
#define RX_NO_DATA_CHAIN_A_POS 0
#define RX_NO_DATA_CHAIN_A_MSK (0xff << RX_NO_DATA_CHAIN_A_POS)
#define RX_NO_DATA_CHAIN_B_POS 8
@@ -747,62 +740,6 @@ struct iwl_rx_no_data {
__le32 rx_vec[2];
} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1 */
-/**
- * enum iwl_completion_desc_transfer_status - transfer status (bits 1-3)
- * @IWL_CD_STTS_UNUSED: unused
- * @IWL_CD_STTS_UNUSED_2: unused
- * @IWL_CD_STTS_END_TRANSFER: successful transfer complete.
- * In sniffer mode, when split is used, set in last CD completion. (RX)
- * @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for
- * all CD completion. (RX)
- * @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX)
- * @IWL_CD_STTS_ERROR: general error (RX)
- */
-enum iwl_completion_desc_transfer_status {
- IWL_CD_STTS_UNUSED,
- IWL_CD_STTS_UNUSED_2,
- IWL_CD_STTS_END_TRANSFER,
- IWL_CD_STTS_OVERFLOW,
- IWL_CD_STTS_ABORTED,
- IWL_CD_STTS_ERROR,
-};
-
-/**
- * enum iwl_completion_desc_wifi_status - wifi status (bits 4-7)
- * @IWL_CD_STTS_VALID: the packet is valid (RX)
- * @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX)
- * @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX)
- * @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX)
- * @IWL_CD_STTS_DUP: duplicate packet (RX)
- * @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX)
- * @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX)
- * @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX)
- * @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX)
- * @IWL_CD_STTS_QOS_NULL: QoS null packet (RX)
- * @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX)
- * @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX)
- * @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX)
- * @IWL_CD_STTS_NOT_USED: completed but not used (RX)
- * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX)
- */
-enum iwl_completion_desc_wifi_status {
- IWL_CD_STTS_VALID,
- IWL_CD_STTS_FCS_ERR,
- IWL_CD_STTS_SEC_KEY_ERR,
- IWL_CD_STTS_DECRYPTION_ERR,
- IWL_CD_STTS_DUP,
- IWL_CD_STTS_ICV_MIC_ERR,
- IWL_CD_STTS_INTERNAL_SNAP_ERR,
- IWL_CD_STTS_SEC_PORT_FAIL,
- IWL_CD_STTS_BA_OLD_SN,
- IWL_CD_STTS_QOS_NULL,
- IWL_CD_STTS_MAC_HDR_ERR,
- IWL_CD_STTS_MAX_RETRANS,
- IWL_CD_STTS_EX_LIFETIME,
- IWL_CD_STTS_NOT_USED,
- IWL_CD_STTS_REPLAY_ERR,
-};
-
struct iwl_frame_release {
u8 baid;
u8 reserved;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 890a939c463d..1a67a2a439ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -788,7 +788,53 @@ struct iwl_umac_scan_complete {
__le32 reserved;
} __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */
-#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 5
+#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 5
+#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 7
+
+/**
+ * struct iwl_scan_offload_profile_match_v1 - match information
+ * @bssid: matched bssid
+ * @reserved: reserved
+ * @channel: channel where the match occurred
+ * @energy: energy
+ * @matching_feature: feature matches
+ * @matching_channels: bitmap of channels that matched, referencing
+ * the channels passed in the scan offload request.
+ */
+struct iwl_scan_offload_profile_match_v1 {
+ u8 bssid[ETH_ALEN];
+ __le16 reserved;
+ u8 channel;
+ u8 energy;
+ u8 matching_feature;
+ u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1];
+} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */
+
+/**
+ * struct iwl_scan_offload_profiles_query_v1 - match results query response
+ * @matched_profiles: bitmap of matched profiles, referencing the
+ * matches passed in the scan offload request
+ * @last_scan_age: age of the last offloaded scan
+ * @n_scans_done: number of offloaded scans done
+ * @gp2_d0u: GP2 when D0U occurred
+ * @gp2_invoked: GP2 when scan offload was invoked
+ * @resume_while_scanning: not used
+ * @self_recovery: obsolete
+ * @reserved: reserved
+ * @matches: array of match information, one for each match
+ */
+struct iwl_scan_offload_profiles_query_v1 {
+ __le32 matched_profiles;
+ __le32 last_scan_age;
+ __le32 n_scans_done;
+ __le32 gp2_d0u;
+ __le32 gp2_invoked;
+ u8 resume_while_scanning;
+ u8 self_recovery;
+ __le16 reserved;
+ struct iwl_scan_offload_profile_match_v1 matches[IWL_SCAN_MAX_PROFILES];
+} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
+
/**
* struct iwl_scan_offload_profile_match - match information
* @bssid: matched bssid
@@ -797,7 +843,7 @@ struct iwl_umac_scan_complete {
* @energy: energy
* @matching_feature: feature matches
* @matching_channels: bitmap of channels that matched, referencing
- * the channels passed in tue scan offload request
+ * the channels passed in the scan offload request.
*/
struct iwl_scan_offload_profile_match {
u8 bssid[ETH_ALEN];
@@ -806,7 +852,7 @@ struct iwl_scan_offload_profile_match {
u8 energy;
u8 matching_feature;
u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN];
-} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */
+} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_2 */
/**
* struct iwl_scan_offload_profiles_query - match results query response
@@ -831,7 +877,7 @@ struct iwl_scan_offload_profiles_query {
u8 self_recovery;
__le16 reserved;
struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
-} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
+} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_3 */
/**
* struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
index 6ac240b6eace..73196cbc7fbe 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
@@ -8,6 +8,7 @@
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,6 +31,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -133,6 +135,7 @@ enum iwl_tx_queue_cfg_actions {
#define IWL_DEFAULT_QUEUE_SIZE 256
#define IWL_MGMT_QUEUE_SIZE 16
+#define IWL_CMD_QUEUE_SIZE 32
/**
* struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command
* @sta_id: station id
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index d7380016f1c0..5f52e40a2903 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -545,6 +545,7 @@ static const struct iwl_prph_range iwl_prph_dump_addr_22000[] = {
{ .start = 0x00a04590, .end = 0x00a04590 },
{ .start = 0x00a04598, .end = 0x00a04598 },
{ .start = 0x00a045c0, .end = 0x00a045f4 },
+ { .start = 0x00a05c18, .end = 0x00a05c1c },
{ .start = 0x00a0c000, .end = 0x00a0c018 },
{ .start = 0x00a0c020, .end = 0x00a0c028 },
{ .start = 0x00a0c038, .end = 0x00a0c094 },
@@ -557,6 +558,12 @@ static const struct iwl_prph_range iwl_prph_dump_addr_22000[] = {
{ .start = 0x00a0c1b0, .end = 0x00a0c1b8 },
};
+static const struct iwl_prph_range iwl_prph_dump_addr_ax210[] = {
+ { .start = 0x00d03c00, .end = 0x00d03c64 },
+ { .start = 0x00d05c18, .end = 0x00d05c1c },
+ { .start = 0x00d0c000, .end = 0x00d0c174 },
+};
+
static void iwl_read_prph_block(struct iwl_trans *trans, u32 start,
u32 len_bytes, __le32 *data)
{
@@ -675,7 +682,8 @@ static void iwl_fw_prph_handler(struct iwl_fw_runtime *fwrt, void *ptr,
u32 range_len;
if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- /* TODO */
+ range_len = ARRAY_SIZE(iwl_prph_dump_addr_ax210);
+ handler(fwrt, iwl_prph_dump_addr_ax210, range_len, ptr);
} else if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
range_len = ARRAY_SIZE(iwl_prph_dump_addr_22000);
handler(fwrt, iwl_prph_dump_addr_22000, range_len, ptr);
@@ -804,8 +812,8 @@ static void iwl_dump_paging(struct iwl_fw_runtime *fwrt,
}
static struct iwl_fw_error_dump_file *
-_iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_dump_ptrs *fw_error_dump)
+iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_dump_ptrs *fw_error_dump)
{
struct iwl_fw_error_dump_file *dump_file;
struct iwl_fw_error_dump_data *dump_data;
@@ -909,11 +917,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_info));
dump_info = (void *)dump_data->data;
- dump_info->device_family =
- fwrt->trans->cfg->device_family ==
- IWL_DEVICE_FAMILY_7000 ?
- cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
- cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
+ dump_info->hw_type =
+ cpu_to_le32(CSR_HW_REV_TYPE(fwrt->trans->hw_rev));
dump_info->hw_step =
cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
@@ -967,10 +972,11 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
if (fifo_len) {
iwl_fw_dump_rxf(fwrt, &dump_data);
iwl_fw_dump_txf(fwrt, &dump_data);
- if (radio_len)
- iwl_read_radio_regs(fwrt, &dump_data);
}
+ if (radio_len)
+ iwl_read_radio_regs(fwrt, &dump_data);
+
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
fwrt->dump.desc) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
@@ -1049,14 +1055,14 @@ static int iwl_dump_ini_prph_iter(struct iwl_fw_runtime *fwrt,
{
struct iwl_fw_ini_error_dump_range *range = range_ptr;
__le32 *val = range->data;
- u32 addr, prph_val, offset = le32_to_cpu(reg->offset);
+ u32 prph_val;
+ u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset);
int i;
- range->start_addr = reg->start_addr[idx];
+ range->start_addr = cpu_to_le64(addr);
range->range_data_size = reg->internal.range_data_size;
for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4) {
- addr = le32_to_cpu(range->start_addr) + i;
- prph_val = iwl_read_prph(fwrt->trans, addr + offset);
+ prph_val = iwl_read_prph(fwrt->trans, addr + i);
if (prph_val == 0x5a5a5a5a)
return -EBUSY;
*val++ = cpu_to_le32(prph_val);
@@ -1071,16 +1077,13 @@ static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime *fwrt,
{
struct iwl_fw_ini_error_dump_range *range = range_ptr;
__le32 *val = range->data;
- u32 addr, offset = le32_to_cpu(reg->offset);
+ u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset);
int i;
- range->start_addr = reg->start_addr[idx];
+ range->start_addr = cpu_to_le64(addr);
range->range_data_size = reg->internal.range_data_size;
- for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4) {
- addr = le32_to_cpu(range->start_addr) + i;
- *val++ = cpu_to_le32(iwl_trans_read32(fwrt->trans,
- addr + offset));
- }
+ for (i = 0; i < le32_to_cpu(reg->internal.range_data_size); i += 4)
+ *val++ = cpu_to_le32(iwl_trans_read32(fwrt->trans, addr + i));
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
@@ -1090,12 +1093,11 @@ static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
void *range_ptr, int idx)
{
struct iwl_fw_ini_error_dump_range *range = range_ptr;
- u32 addr = le32_to_cpu(range->start_addr);
- u32 offset = le32_to_cpu(reg->offset);
+ u32 addr = le32_to_cpu(reg->start_addr[idx]) + le32_to_cpu(reg->offset);
- range->start_addr = reg->start_addr[idx];
+ range->start_addr = cpu_to_le64(addr);
range->range_data_size = reg->internal.range_data_size;
- iwl_trans_read_mem_bytes(fwrt->trans, addr + offset, range->data,
+ iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
le32_to_cpu(reg->internal.range_data_size));
return sizeof(*range) + le32_to_cpu(range->range_data_size);
@@ -1109,7 +1111,7 @@ iwl_dump_ini_paging_gen2_iter(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_error_dump_range *range = range_ptr;
u32 page_size = fwrt->trans->init_dram.paging[idx].size;
- range->start_addr = cpu_to_le32(idx);
+ range->start_addr = cpu_to_le64(idx);
range->range_data_size = cpu_to_le32(page_size);
memcpy(range->data, fwrt->trans->init_dram.paging[idx].block,
page_size);
@@ -1129,7 +1131,7 @@ static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime *fwrt,
dma_addr_t addr = fwrt->fw_paging_db[idx].fw_paging_phys;
u32 page_size = fwrt->fw_paging_db[idx].fw_paging_size;
- range->start_addr = cpu_to_le32(idx);
+ range->start_addr = cpu_to_le64(idx);
range->range_data_size = cpu_to_le32(page_size);
dma_sync_single_for_cpu(fwrt->trans->dev, addr, page_size,
DMA_BIDIRECTIONAL);
@@ -1152,7 +1154,7 @@ iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime *fwrt,
if (start_addr == 0x5a5a5a5a)
return -EBUSY;
- range->start_addr = cpu_to_le32(start_addr);
+ range->start_addr = cpu_to_le64(start_addr);
range->range_data_size = cpu_to_le32(fwrt->trans->fw_mon[idx].size);
memcpy(range->data, fwrt->trans->fw_mon[idx].block,
@@ -1228,10 +1230,11 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
{
struct iwl_fw_ini_fifo_error_dump_range *range = range_ptr;
struct iwl_ini_txf_iter_data *iter;
+ struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data;
u32 offs = le32_to_cpu(reg->offset), addr;
u32 registers_size =
- le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32);
- __le32 *val = range->data;
+ le32_to_cpu(reg->fifos.num_of_registers) * sizeof(*reg_dump);
+ __le32 *data;
unsigned long flags;
int i;
@@ -1249,11 +1252,18 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
iwl_write_prph_no_grab(fwrt->trans, TXF_LARC_NUM + offs, iter->fifo);
- /* read txf registers */
+ /*
+ * read txf registers. for each register, write to the dump the
+ * register address and its value
+ */
for (i = 0; i < le32_to_cpu(reg->fifos.num_of_registers); i++) {
addr = le32_to_cpu(reg->start_addr[i]) + offs;
- *val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
+ reg_dump->addr = cpu_to_le32(addr);
+ reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans,
+ addr));
+
+ reg_dump++;
}
if (reg->fifos.header_only) {
@@ -1270,8 +1280,9 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
/* Read FIFO */
addr = TXF_READ_MODIFY_DATA + offs;
- for (i = 0; i < iter->fifo_size; i += sizeof(__le32))
- *val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
+ data = (void *)reg_dump;
+ for (i = 0; i < iter->fifo_size; i += sizeof(*data))
+ *data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
out:
iwl_trans_release_nic_access(fwrt->trans, &flags);
@@ -1327,10 +1338,11 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
{
struct iwl_fw_ini_fifo_error_dump_range *range = range_ptr;
struct iwl_ini_rxf_data rxf_data;
+ struct iwl_fw_ini_error_dump_register *reg_dump = (void *)range->data;
u32 offs = le32_to_cpu(reg->offset), addr;
u32 registers_size =
- le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32);
- __le32 *val = range->data;
+ le32_to_cpu(reg->fifos.num_of_registers) * sizeof(*reg_dump);
+ __le32 *data;
unsigned long flags;
int i;
@@ -1341,17 +1353,22 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
return -EBUSY;
- offs += rxf_data.offset;
-
range->fifo_num = cpu_to_le32(rxf_data.fifo_num);
range->num_of_registers = reg->fifos.num_of_registers;
range->range_data_size = cpu_to_le32(rxf_data.size + registers_size);
- /* read rxf registers */
+ /*
+ * read rxf registers. for each register, write to the dump the
+ * register address and its value
+ */
for (i = 0; i < le32_to_cpu(reg->fifos.num_of_registers); i++) {
addr = le32_to_cpu(reg->start_addr[i]) + offs;
- *val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
+ reg_dump->addr = cpu_to_le32(addr);
+ reg_dump->data = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans,
+ addr));
+
+ reg_dump++;
}
if (reg->fifos.header_only) {
@@ -1359,6 +1376,12 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
goto out;
}
+ /*
+ * region register have absolute value so apply rxf offset after
+ * reading the registers
+ */
+ offs += rxf_data.offset;
+
/* Lock fence */
iwl_write_prph_no_grab(fwrt->trans, RXF_SET_FENCE_MODE + offs, 0x1);
/* Set fence pointer to the same place like WR pointer */
@@ -1369,8 +1392,9 @@ static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime *fwrt,
/* Read FIFO */
addr = RXF_FIFO_RD_FENCE_INC + offs;
- for (i = 0; i < rxf_data.size; i += sizeof(__le32))
- *val++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
+ data = (void *)reg_dump;
+ for (i = 0; i < rxf_data.size; i += sizeof(*data))
+ *data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
out:
iwl_trans_release_nic_access(fwrt->trans, &flags);
@@ -1384,32 +1408,86 @@ static void *iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime *fwrt,
{
struct iwl_fw_ini_error_dump *dump = data;
+ dump->header.version = cpu_to_le32(IWL_INI_DUMP_MEM_VER);
+
return dump->ranges;
}
static void
-*iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_ini_region_cfg *reg,
- void *data)
+*iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg,
+ struct iwl_fw_ini_monitor_dump *data,
+ u32 write_ptr_addr, u32 write_ptr_msk,
+ u32 cycle_cnt_addr, u32 cycle_cnt_msk)
{
- struct iwl_fw_ini_monitor_dram_dump *mon_dump = (void *)data;
u32 write_ptr, cycle_cnt;
unsigned long flags;
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) {
- IWL_ERR(fwrt, "Failed to get DRAM monitor header\n");
+ IWL_ERR(fwrt, "Failed to get monitor header\n");
return NULL;
}
- write_ptr = iwl_read_umac_prph_no_grab(fwrt->trans,
- MON_BUFF_WRPTR_VER2);
- cycle_cnt = iwl_read_umac_prph_no_grab(fwrt->trans,
- MON_BUFF_CYCLE_CNT_VER2);
+
+ write_ptr = iwl_read_prph_no_grab(fwrt->trans, write_ptr_addr);
+ cycle_cnt = iwl_read_prph_no_grab(fwrt->trans, cycle_cnt_addr);
+
iwl_trans_release_nic_access(fwrt->trans, &flags);
- mon_dump->write_ptr = cpu_to_le32(write_ptr);
- mon_dump->cycle_cnt = cpu_to_le32(cycle_cnt);
+ data->header.version = cpu_to_le32(IWL_INI_DUMP_MONITOR_VER);
+ data->write_ptr = cpu_to_le32(write_ptr & write_ptr_msk);
+ data->cycle_cnt = cpu_to_le32(cycle_cnt & cycle_cnt_msk);
+
+ return data->ranges;
+}
+
+static void
+*iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg,
+ void *data)
+{
+ struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
+ u32 write_ptr_addr, write_ptr_msk, cycle_cnt_addr, cycle_cnt_msk;
+
+ switch (fwrt->trans->cfg->device_family) {
+ case IWL_DEVICE_FAMILY_9000:
+ case IWL_DEVICE_FAMILY_22000:
+ write_ptr_addr = MON_BUFF_WRPTR_VER2;
+ write_ptr_msk = -1;
+ cycle_cnt_addr = MON_BUFF_CYCLE_CNT_VER2;
+ cycle_cnt_msk = -1;
+ break;
+ default:
+ IWL_ERR(fwrt, "Unsupported device family %d\n",
+ fwrt->trans->cfg->device_family);
+ return NULL;
+ }
+
+ return iwl_dump_ini_mon_fill_header(fwrt, reg, mon_dump, write_ptr_addr,
+ write_ptr_msk, cycle_cnt_addr,
+ cycle_cnt_msk);
+}
+
+static void
+*iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg,
+ void *data)
+{
+ struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
+ const struct iwl_cfg *cfg = fwrt->trans->cfg;
+
+ if (fwrt->trans->cfg->device_family != IWL_DEVICE_FAMILY_9000 &&
+ fwrt->trans->cfg->device_family != IWL_DEVICE_FAMILY_22000) {
+ IWL_ERR(fwrt, "Unsupported device family %d\n",
+ fwrt->trans->cfg->device_family);
+ return NULL;
+ }
+
+ return iwl_dump_ini_mon_fill_header(fwrt, reg, mon_dump,
+ cfg->fw_mon_smem_write_ptr_addr,
+ cfg->fw_mon_smem_write_ptr_msk,
+ cfg->fw_mon_smem_cycle_cnt_ptr_addr,
+ cfg->fw_mon_smem_cycle_cnt_ptr_msk);
- return mon_dump->ranges;
}
static void *iwl_dump_ini_fifo_fill_header(struct iwl_fw_runtime *fwrt,
@@ -1418,6 +1496,8 @@ static void *iwl_dump_ini_fifo_fill_header(struct iwl_fw_runtime *fwrt,
{
struct iwl_fw_ini_fifo_error_dump *dump = data;
+ dump->header.version = cpu_to_le32(IWL_INI_DUMP_FIFO_VER);
+
return dump->ranges;
}
@@ -1509,7 +1589,8 @@ static u32 iwl_dump_ini_paging_get_size(struct iwl_fw_runtime *fwrt,
static u32 iwl_dump_ini_mon_dram_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_region_cfg *reg)
{
- u32 size = sizeof(struct iwl_fw_ini_monitor_dram_dump);
+ u32 size = sizeof(struct iwl_fw_ini_monitor_dump) +
+ sizeof(struct iwl_fw_ini_error_dump_range);
if (fwrt->trans->num_blocks)
size += fwrt->trans->fw_mon[0].size;
@@ -1517,6 +1598,15 @@ static u32 iwl_dump_ini_mon_dram_get_size(struct iwl_fw_runtime *fwrt,
return size;
}
+static u32 iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_fw_ini_region_cfg *reg)
+{
+ return sizeof(struct iwl_fw_ini_monitor_dump) +
+ iwl_dump_ini_mem_ranges(fwrt, reg) *
+ (sizeof(struct iwl_fw_ini_error_dump_range) +
+ le32_to_cpu(reg->internal.range_data_size));
+}
+
static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_region_cfg *reg)
{
@@ -1524,7 +1614,7 @@ static u32 iwl_dump_ini_txf_get_size(struct iwl_fw_runtime *fwrt,
void *fifo_iter = fwrt->dump.fifo_iter;
u32 size = 0;
u32 fifo_hdr = sizeof(struct iwl_fw_ini_fifo_error_dump_range) +
- le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32);
+ le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32) * 2;
fwrt->dump.fifo_iter = &iter;
while (iwl_ini_txf_iter(fwrt, reg)) {
@@ -1547,7 +1637,7 @@ static u32 iwl_dump_ini_rxf_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_ini_rxf_data rx_data;
u32 size = sizeof(struct iwl_fw_ini_fifo_error_dump) +
sizeof(struct iwl_fw_ini_fifo_error_dump_range) +
- le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32);
+ le32_to_cpu(reg->fifos.num_of_registers) * sizeof(__le32) * 2;
if (reg->fifos.header_only)
return size;
@@ -1584,27 +1674,31 @@ struct iwl_dump_ini_mem_ops {
* @fwrt: fw runtime struct.
* @data: dump memory data.
* @reg: region to copy to the dump.
+ * @ops: memory dump operations.
*/
static void
iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
- enum iwl_fw_ini_region_type type,
struct iwl_fw_error_dump_data **data,
struct iwl_fw_ini_region_cfg *reg,
struct iwl_dump_ini_mem_ops *ops)
{
struct iwl_fw_ini_error_dump_header *header = (void *)(*data)->data;
+ u32 num_of_ranges, i, type = le32_to_cpu(reg->region_type);
void *range;
- u32 num_of_ranges, i;
if (WARN_ON(!ops || !ops->get_num_of_ranges || !ops->get_size ||
!ops->fill_mem_hdr || !ops->fill_range))
return;
+ IWL_DEBUG_FW(fwrt, "WRT: collecting region: id=%d, type=%d\n",
+ le32_to_cpu(reg->region_id), type);
+
num_of_ranges = ops->get_num_of_ranges(fwrt, reg);
(*data)->type = cpu_to_le32(type | INI_DUMP_BIT);
(*data)->len = cpu_to_le32(ops->get_size(fwrt, reg));
+ header->region_id = reg->region_id;
header->num_of_ranges = cpu_to_le32(num_of_ranges);
header->name_len = cpu_to_le32(min_t(int, IWL_FW_INI_MAX_NAME,
le32_to_cpu(reg->name_len)));
@@ -1612,7 +1706,8 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
range = ops->fill_mem_hdr(fwrt, reg, header);
if (!range) {
- IWL_ERR(fwrt, "Failed to fill region header: id=%d, type=%d\n",
+ IWL_ERR(fwrt,
+ "WRT: failed to fill region header: id=%d, type=%d\n",
le32_to_cpu(reg->region_id), type);
memset(*data, 0, le32_to_cpu((*data)->len));
return;
@@ -1622,7 +1717,8 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
int range_size = ops->fill_range(fwrt, reg, range, i);
if (range_size < 0) {
- IWL_ERR(fwrt, "Failed to dump region: id=%d, type=%d\n",
+ IWL_ERR(fwrt,
+ "WRT: failed to dump region: id=%d, type=%d\n",
le32_to_cpu(reg->region_id), type);
memset(*data, 0, le32_to_cpu((*data)->len));
return;
@@ -1643,23 +1739,30 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt,
for (i = 0; i < le32_to_cpu(trigger->num_regions); i++) {
u32 reg_id = le32_to_cpu(trigger->data[i]);
struct iwl_fw_ini_region_cfg *reg;
- enum iwl_fw_ini_region_type type;
if (WARN_ON(reg_id >= ARRAY_SIZE(fwrt->dump.active_regs)))
continue;
reg = fwrt->dump.active_regs[reg_id];
- if (WARN(!reg, "Unassigned region %d\n", reg_id))
+ if (!reg) {
+ IWL_WARN(fwrt,
+ "WRT: unassigned region id %d, skipping\n",
+ reg_id);
continue;
+ }
- type = le32_to_cpu(reg->region_type);
- switch (type) {
+ /* currently the driver supports always on domain only */
+ if (le32_to_cpu(reg->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON)
+ continue;
+
+ switch (le32_to_cpu(reg->region_type)) {
case IWL_FW_INI_REGION_DEVICE_MEMORY:
case IWL_FW_INI_REGION_PERIPHERY_MAC:
case IWL_FW_INI_REGION_PERIPHERY_PHY:
case IWL_FW_INI_REGION_PERIPHERY_AUX:
- case IWL_FW_INI_REGION_INTERNAL_BUFFER:
case IWL_FW_INI_REGION_CSR:
+ case IWL_FW_INI_REGION_LMAC_ERROR_TABLE:
+ case IWL_FW_INI_REGION_UMAC_ERROR_TABLE:
size += hdr_len + iwl_dump_ini_mem_get_size(fwrt, reg);
break;
case IWL_FW_INI_REGION_TXF:
@@ -1668,7 +1771,7 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt,
case IWL_FW_INI_REGION_RXF:
size += hdr_len + iwl_dump_ini_rxf_get_size(fwrt, reg);
break;
- case IWL_FW_INI_REGION_PAGING: {
+ case IWL_FW_INI_REGION_PAGING:
size += hdr_len;
if (iwl_fw_dbg_is_paging_enabled(fwrt)) {
size += iwl_dump_ini_paging_get_size(fwrt, reg);
@@ -1677,13 +1780,16 @@ static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt,
reg);
}
break;
- }
case IWL_FW_INI_REGION_DRAM_BUFFER:
if (!fwrt->trans->num_blocks)
break;
size += hdr_len +
iwl_dump_ini_mon_dram_get_size(fwrt, reg);
break;
+ case IWL_FW_INI_REGION_INTERNAL_BUFFER:
+ size += hdr_len +
+ iwl_dump_ini_mon_smem_get_size(fwrt, reg);
+ break;
case IWL_FW_INI_REGION_DRAM_IMR:
/* Undefined yet */
default:
@@ -1701,7 +1807,6 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
for (i = 0; i < num; i++) {
u32 reg_id = le32_to_cpu(trigger->data[i]);
- enum iwl_fw_ini_region_type type;
struct iwl_fw_ini_region_cfg *reg;
struct iwl_dump_ini_mem_ops ops;
@@ -1713,15 +1818,19 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
if (!reg)
continue;
- type = le32_to_cpu(reg->region_type);
- switch (type) {
+ /* currently the driver supports always on domain only */
+ if (le32_to_cpu(reg->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON)
+ continue;
+
+ switch (le32_to_cpu(reg->region_type)) {
case IWL_FW_INI_REGION_DEVICE_MEMORY:
- case IWL_FW_INI_REGION_INTERNAL_BUFFER:
+ case IWL_FW_INI_REGION_LMAC_ERROR_TABLE:
+ case IWL_FW_INI_REGION_UMAC_ERROR_TABLE:
ops.get_num_of_ranges = iwl_dump_ini_mem_ranges;
ops.get_size = iwl_dump_ini_mem_get_size;
ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header;
ops.fill_range = iwl_dump_ini_dev_mem_iter;
- iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
+ iwl_dump_ini_mem(fwrt, data, reg, &ops);
break;
case IWL_FW_INI_REGION_PERIPHERY_MAC:
case IWL_FW_INI_REGION_PERIPHERY_PHY:
@@ -1730,16 +1839,23 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
ops.get_size = iwl_dump_ini_mem_get_size;
ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header;
ops.fill_range = iwl_dump_ini_prph_iter;
- iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
+ iwl_dump_ini_mem(fwrt, data, reg, &ops);
break;
case IWL_FW_INI_REGION_DRAM_BUFFER:
ops.get_num_of_ranges = iwl_dump_ini_mon_dram_ranges;
ops.get_size = iwl_dump_ini_mon_dram_get_size;
ops.fill_mem_hdr = iwl_dump_ini_mon_dram_fill_header;
ops.fill_range = iwl_dump_ini_mon_dram_iter;
- iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
+ iwl_dump_ini_mem(fwrt, data, reg, &ops);
+ break;
+ case IWL_FW_INI_REGION_INTERNAL_BUFFER:
+ ops.get_num_of_ranges = iwl_dump_ini_mem_ranges;
+ ops.get_size = iwl_dump_ini_mon_smem_get_size;
+ ops.fill_mem_hdr = iwl_dump_ini_mon_smem_fill_header;
+ ops.fill_range = iwl_dump_ini_dev_mem_iter;
+ iwl_dump_ini_mem(fwrt, data, reg, &ops);
break;
- case IWL_FW_INI_REGION_PAGING: {
+ case IWL_FW_INI_REGION_PAGING:
ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header;
if (iwl_fw_dbg_is_paging_enabled(fwrt)) {
ops.get_num_of_ranges =
@@ -1754,9 +1870,8 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
ops.fill_range = iwl_dump_ini_paging_gen2_iter;
}
- iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
+ iwl_dump_ini_mem(fwrt, data, reg, &ops);
break;
- }
case IWL_FW_INI_REGION_TXF: {
struct iwl_ini_txf_iter_data iter = { .init = true };
void *fifo_iter = fwrt->dump.fifo_iter;
@@ -1766,7 +1881,7 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
ops.get_size = iwl_dump_ini_txf_get_size;
ops.fill_mem_hdr = iwl_dump_ini_fifo_fill_header;
ops.fill_range = iwl_dump_ini_txf_iter;
- iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
+ iwl_dump_ini_mem(fwrt, data, reg, &ops);
fwrt->dump.fifo_iter = fifo_iter;
break;
}
@@ -1775,14 +1890,14 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
ops.get_size = iwl_dump_ini_rxf_get_size;
ops.fill_mem_hdr = iwl_dump_ini_fifo_fill_header;
ops.fill_range = iwl_dump_ini_rxf_iter;
- iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
+ iwl_dump_ini_mem(fwrt, data, reg, &ops);
break;
case IWL_FW_INI_REGION_CSR:
ops.get_num_of_ranges = iwl_dump_ini_mem_ranges;
ops.get_size = iwl_dump_ini_mem_get_size;
ops.fill_mem_hdr = iwl_dump_ini_mem_fill_header;
ops.fill_range = iwl_dump_ini_csr_iter;
- iwl_dump_ini_mem(fwrt, type, data, reg, &ops);
+ iwl_dump_ini_mem(fwrt, data, reg, &ops);
break;
case IWL_FW_INI_REGION_DRAM_IMR:
/* This is undefined yet */
@@ -1793,16 +1908,13 @@ static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt,
}
static struct iwl_fw_error_dump_file *
-_iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
- struct iwl_fw_dump_ptrs *fw_error_dump)
+iwl_fw_error_ini_dump_file(struct iwl_fw_runtime *fwrt)
{
- int size, id = le32_to_cpu(fwrt->dump.desc->trig_desc.type);
+ int size;
struct iwl_fw_error_dump_data *dump_data;
struct iwl_fw_error_dump_file *dump_file;
struct iwl_fw_ini_trigger *trigger;
-
- if (id == FW_DBG_TRIGGER_FW_ASSERT)
- id = IWL_FW_TRIGGER_ID_FW_ASSERT;
+ enum iwl_fw_ini_trigger_id id = fwrt->dump.ini_trig_id;
if (!iwl_fw_ini_trigger_on(fwrt, id))
return NULL;
@@ -1819,8 +1931,6 @@ _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
if (!dump_file)
return NULL;
- fw_error_dump->fwrt_ptr = dump_file;
-
dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
dump_data = (void *)dump_file->data;
dump_file->file_len = cpu_to_le32(size);
@@ -1830,47 +1940,27 @@ _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
return dump_file;
}
-void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
+static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
{
- struct iwl_fw_dump_ptrs *fw_error_dump;
+ struct iwl_fw_dump_ptrs fw_error_dump = {};
struct iwl_fw_error_dump_file *dump_file;
struct scatterlist *sg_dump_data;
u32 file_len;
u32 dump_mask = fwrt->fw->dbg.dump_mask;
- IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
-
- /* there's no point in fw dump if the bus is dead */
- if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
- IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
- goto out;
- }
-
- fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
- if (!fw_error_dump)
- goto out;
-
- if (fwrt->trans->ini_valid)
- dump_file = _iwl_fw_error_ini_dump(fwrt, fw_error_dump);
- else
- dump_file = _iwl_fw_error_dump(fwrt, fw_error_dump);
-
- if (!dump_file) {
- kfree(fw_error_dump);
+ dump_file = iwl_fw_error_dump_file(fwrt, &fw_error_dump);
+ if (!dump_file)
goto out;
- }
if (!fwrt->trans->ini_valid && fwrt->dump.monitor_only)
dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR;
- if (!fwrt->trans->ini_valid)
- fw_error_dump->trans_ptr =
- iwl_trans_dump_data(fwrt->trans, dump_mask);
-
+ fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask);
file_len = le32_to_cpu(dump_file->file_len);
- fw_error_dump->fwrt_len = file_len;
- if (fw_error_dump->trans_ptr) {
- file_len += fw_error_dump->trans_ptr->len;
+ fw_error_dump.fwrt_len = file_len;
+
+ if (fw_error_dump.trans_ptr) {
+ file_len += fw_error_dump.trans_ptr->len;
dump_file->file_len = cpu_to_le32(file_len);
}
@@ -1878,27 +1968,49 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
if (sg_dump_data) {
sg_pcopy_from_buffer(sg_dump_data,
sg_nents(sg_dump_data),
- fw_error_dump->fwrt_ptr,
- fw_error_dump->fwrt_len, 0);
- if (fw_error_dump->trans_ptr)
+ fw_error_dump.fwrt_ptr,
+ fw_error_dump.fwrt_len, 0);
+ if (fw_error_dump.trans_ptr)
sg_pcopy_from_buffer(sg_dump_data,
sg_nents(sg_dump_data),
- fw_error_dump->trans_ptr->data,
- fw_error_dump->trans_ptr->len,
- fw_error_dump->fwrt_len);
+ fw_error_dump.trans_ptr->data,
+ fw_error_dump.trans_ptr->len,
+ fw_error_dump.fwrt_len);
dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len,
GFP_KERNEL);
}
- vfree(fw_error_dump->fwrt_ptr);
- vfree(fw_error_dump->trans_ptr);
- kfree(fw_error_dump);
+ vfree(fw_error_dump.fwrt_ptr);
+ vfree(fw_error_dump.trans_ptr);
out:
iwl_fw_free_dump_desc(fwrt);
clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
- IWL_DEBUG_INFO(fwrt, "WRT dump done\n");
}
-IWL_EXPORT_SYMBOL(iwl_fw_error_dump);
+
+static void iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt)
+{
+ struct iwl_fw_error_dump_file *dump_file;
+ struct scatterlist *sg_dump_data;
+ u32 file_len;
+
+ dump_file = iwl_fw_error_ini_dump_file(fwrt);
+ if (!dump_file)
+ goto out;
+
+ file_len = le32_to_cpu(dump_file->file_len);
+
+ sg_dump_data = alloc_sgtable(file_len);
+ if (sg_dump_data) {
+ sg_pcopy_from_buffer(sg_dump_data, sg_nents(sg_dump_data),
+ dump_file, file_len, 0);
+ dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len,
+ GFP_KERNEL);
+ }
+ vfree(dump_file);
+out:
+ fwrt->dump.ini_trig_id = IWL_FW_TRIGGER_ID_INVALID;
+ clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
+}
const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
.trig_desc = {
@@ -1912,6 +2024,17 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
bool monitor_only,
unsigned int delay)
{
+ u32 trig_type = le32_to_cpu(desc->trig_desc.type);
+ int ret;
+
+ if (fwrt->trans->ini_valid) {
+ ret = iwl_fw_dbg_ini_collect(fwrt, trig_type);
+ if (!ret)
+ iwl_fw_free_dump_desc(fwrt);
+
+ return ret;
+ }
+
if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
return -EBUSY;
@@ -1953,10 +2076,10 @@ int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_error_collect);
-int _iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
- enum iwl_fw_dbg_trigger trig,
- const char *str, size_t len,
- struct iwl_fw_dbg_trigger_tlv *trigger)
+int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_dbg_trigger trig,
+ const char *str, size_t len,
+ struct iwl_fw_dbg_trigger_tlv *trigger)
{
struct iwl_fw_dump_desc *desc;
unsigned int delay = 0;
@@ -1993,50 +2116,73 @@ int _iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
return iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay);
}
-IWL_EXPORT_SYMBOL(_iwl_fw_dbg_collect);
+IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
-int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
- u32 id, const char *str, size_t len)
+int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_ini_trigger_id id)
{
- struct iwl_fw_dump_desc *desc;
struct iwl_fw_ini_active_triggers *active;
u32 occur, delay;
- if (!fwrt->trans->ini_valid)
- return _iwl_fw_dbg_collect(fwrt, id, str, len, NULL);
-
- if (id == FW_DBG_TRIGGER_USER)
- id = IWL_FW_TRIGGER_ID_USER_TRIGGER;
+ if (WARN_ON(!iwl_fw_ini_trigger_on(fwrt, id)))
+ return -EINVAL;
- active = &fwrt->dump.active_trigs[id];
+ if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
+ return -EBUSY;
- if (WARN_ON(!active->active))
+ if (!iwl_fw_ini_trigger_on(fwrt, id)) {
+ IWL_WARN(fwrt, "WRT: Trigger %d is not active, aborting dump\n",
+ id);
return -EINVAL;
+ }
+ active = &fwrt->dump.active_trigs[id];
delay = le32_to_cpu(active->trig->dump_delay);
occur = le32_to_cpu(active->trig->occurrences);
if (!occur)
return 0;
+ active->trig->occurrences = cpu_to_le32(--occur);
+
if (le32_to_cpu(active->trig->force_restart)) {
- IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", id);
+ IWL_WARN(fwrt, "WRT: force restart: trigger %d fired.\n", id);
iwl_force_nmi(fwrt->trans);
return 0;
}
- desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
- if (!desc)
- return -ENOMEM;
+ if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
+ return -EBUSY;
- active->trig->occurrences = cpu_to_le32(--occur);
+ fwrt->dump.ini_trig_id = id;
- desc->len = len;
- desc->trig_desc.type = cpu_to_le32(id);
- memcpy(desc->trig_desc.data, str, len);
+ IWL_WARN(fwrt, "WRT: collecting data: ini trigger %d fired.\n", id);
+
+ schedule_delayed_work(&fwrt->dump.wk, usecs_to_jiffies(delay));
- return iwl_fw_dbg_collect_desc(fwrt, desc, true, delay);
+ return 0;
}
-IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
+IWL_EXPORT_SYMBOL(_iwl_fw_dbg_ini_collect);
+
+int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, u32 legacy_trigger_id)
+{
+ int id;
+
+ switch (legacy_trigger_id) {
+ case FW_DBG_TRIGGER_FW_ASSERT:
+ case FW_DBG_TRIGGER_ALIVE_TIMEOUT:
+ case FW_DBG_TRIGGER_DRIVER:
+ id = IWL_FW_TRIGGER_ID_FW_ASSERT;
+ break;
+ case FW_DBG_TRIGGER_USER:
+ id = IWL_FW_TRIGGER_ID_USER_TRIGGER;
+ break;
+ default:
+ return -EIO;
+ }
+
+ return _iwl_fw_dbg_ini_collect(fwrt, id);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_dbg_ini_collect);
int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_trigger_tlv *trigger,
@@ -2064,8 +2210,8 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
len = strlen(buf) + 1;
}
- ret = _iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len,
- trigger);
+ ret = iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len,
+ trigger);
if (ret)
return ret;
@@ -2139,9 +2285,20 @@ void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt)
return;
}
+ /* there's no point in fw dump if the bus is dead */
+ if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
+ IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
+ return;
+ }
+
iwl_fw_dbg_stop_recording(fwrt, &params);
- iwl_fw_error_dump(fwrt);
+ IWL_DEBUG_FW_INFO(fwrt, "WRT: data collection start\n");
+ if (fwrt->trans->ini_valid)
+ iwl_fw_error_ini_dump(fwrt);
+ else
+ iwl_fw_error_dump(fwrt);
+ IWL_DEBUG_FW_INFO(fwrt, "WRT: data collection done\n");
/* start recording again if the firmware is not crashed */
if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
@@ -2211,12 +2368,14 @@ iwl_fw_dbg_buffer_allocation(struct iwl_fw_runtime *fwrt, u32 size)
if (!virtual_addr)
IWL_ERR(fwrt, "Failed to allocate debug memory\n");
+ IWL_DEBUG_FW(trans,
+ "Allocated DRAM buffer[%d], size=0x%x\n",
+ trans->num_blocks, size);
+
trans->fw_mon[trans->num_blocks].block = virtual_addr;
trans->fw_mon[trans->num_blocks].physical = phys_addr;
trans->fw_mon[trans->num_blocks].size = size;
trans->num_blocks++;
-
- IWL_DEBUG_FW(trans, "Allocated debug block of size %d\n", size);
}
static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt,
@@ -2239,11 +2398,15 @@ static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt,
if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH) {
if (!WARN(pnt != IWL_FW_INI_APPLY_EARLY,
- "Invalid apply point %d for SMEM buffer allocation",
- pnt))
+ "WRT: Invalid apply point %d for SMEM buffer allocation, aborting\n",
+ pnt)) {
+ IWL_DEBUG_FW(trans,
+ "WRT: applying SMEM buffer destination\n");
+
/* set sram monitor by enabling bit 7 */
iwl_set_bit(fwrt->trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);
+ }
return;
}
@@ -2262,6 +2425,9 @@ static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt,
if (trans->num_blocks == 1)
return;
+ IWL_DEBUG_FW(trans,
+ "WRT: applying DRAM buffer[%d] destination\n", block_idx);
+
cmd->num_frags = cpu_to_le32(1);
cmd->fragments[0].address =
cpu_to_le64(trans->fw_mon[block_idx].physical);
@@ -2273,7 +2439,8 @@ static void iwl_fw_dbg_buffer_apply(struct iwl_fw_runtime *fwrt,
}
static void iwl_fw_dbg_send_hcmd(struct iwl_fw_runtime *fwrt,
- struct iwl_ucode_tlv *tlv)
+ struct iwl_ucode_tlv *tlv,
+ bool ext)
{
struct iwl_fw_ini_hcmd_tlv *hcmd_tlv = (void *)&tlv->data[0];
struct iwl_fw_ini_hcmd *data = &hcmd_tlv->hcmd;
@@ -2285,6 +2452,14 @@ static void iwl_fw_dbg_send_hcmd(struct iwl_fw_runtime *fwrt,
.data = { data->data, },
};
+ /* currently the driver supports always on domain only */
+ if (le32_to_cpu(hcmd_tlv->domain) != IWL_FW_INI_DBG_DOMAIN_ALWAYS_ON)
+ return;
+
+ IWL_DEBUG_FW(fwrt,
+ "WRT: ext=%d. Sending host command id=0x%x, group=0x%x\n",
+ ext, data->id, data->group);
+
iwl_trans_send_cmd(fwrt->trans, &hcmd);
}
@@ -2294,24 +2469,32 @@ static void iwl_fw_dbg_update_regions(struct iwl_fw_runtime *fwrt,
{
void *iter = (void *)tlv->region_config;
int i, size = le32_to_cpu(tlv->num_regions);
+ const char *err_st =
+ "WRT: ext=%d. Invalid region %s %d for apply point %d\n";
for (i = 0; i < size; i++) {
struct iwl_fw_ini_region_cfg *reg = iter, **active;
int id = le32_to_cpu(reg->region_id);
u32 type = le32_to_cpu(reg->region_type);
- if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_regs),
- "Invalid region id %d for apply point %d\n", id, pnt))
+ if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_regs), err_st, ext,
+ "id", id, pnt))
+ break;
+
+ if (WARN(type == 0 || type >= IWL_FW_INI_REGION_NUM, err_st,
+ ext, "type", type, pnt))
break;
active = &fwrt->dump.active_regs[id];
if (*active)
- IWL_WARN(fwrt->trans, "region TLV %d override\n", id);
+ IWL_WARN(fwrt->trans,
+ "WRT: ext=%d. Region id %d override\n",
+ ext, id);
IWL_DEBUG_FW(fwrt,
- "%s: apply point %d, activating region ID %d\n",
- __func__, pnt, id);
+ "WRT: ext=%d. Activating region id %d\n",
+ ext, id);
*active = reg;
@@ -2319,7 +2502,15 @@ static void iwl_fw_dbg_update_regions(struct iwl_fw_runtime *fwrt,
type == IWL_FW_INI_REGION_RXF)
iter += le32_to_cpu(reg->fifos.num_of_registers) *
sizeof(__le32);
- else if (type != IWL_FW_INI_REGION_DRAM_BUFFER)
+ else if (type == IWL_FW_INI_REGION_DEVICE_MEMORY ||
+ type == IWL_FW_INI_REGION_PERIPHERY_MAC ||
+ type == IWL_FW_INI_REGION_PERIPHERY_PHY ||
+ type == IWL_FW_INI_REGION_PERIPHERY_AUX ||
+ type == IWL_FW_INI_REGION_INTERNAL_BUFFER ||
+ type == IWL_FW_INI_REGION_PAGING ||
+ type == IWL_FW_INI_REGION_CSR ||
+ type == IWL_FW_INI_REGION_LMAC_ERROR_TABLE ||
+ type == IWL_FW_INI_REGION_UMAC_ERROR_TABLE)
iter += le32_to_cpu(reg->internal.num_of_ranges) *
sizeof(__le32);
@@ -2338,7 +2529,8 @@ static int iwl_fw_dbg_trig_realloc(struct iwl_fw_runtime *fwrt,
ptr = krealloc(active->trig, size, GFP_KERNEL);
if (!ptr) {
- IWL_ERR(fwrt, "Failed to allocate memory for trigger %d\n", id);
+ IWL_ERR(fwrt, "WRT: Failed to allocate memory for trigger %d\n",
+ id);
return -ENOMEM;
}
active->trig = ptr;
@@ -2362,7 +2554,9 @@ static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
u32 trig_regs_size = le32_to_cpu(trig->num_regions) *
sizeof(__le32);
- if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_trigs)))
+ if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_trigs),
+ "WRT: ext=%d. Invalid trigger id %d for apply point %d\n",
+ ext, id, apply_point))
break;
active = &fwrt->dump.active_trigs[id];
@@ -2370,6 +2564,10 @@ static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
if (!active->active) {
size_t trig_size = sizeof(*trig) + trig_regs_size;
+ IWL_DEBUG_FW(fwrt,
+ "WRT: ext=%d. Activating trigger %d\n",
+ ext, id);
+
if (iwl_fw_dbg_trig_realloc(fwrt, active, id,
trig_size))
goto next;
@@ -2388,8 +2586,16 @@ static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
int mem_to_add = trig_regs_size;
if (region_override) {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: ext=%d. Trigger %d regions override\n",
+ ext, id);
+
mem_to_add -= active_regs * sizeof(__le32);
} else {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: ext=%d. Trigger %d regions appending\n",
+ ext, id);
+
offset += active_regs;
new_regs += active_regs;
}
@@ -2398,8 +2604,13 @@ static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
active->size + mem_to_add))
goto next;
- if (conf_override)
+ if (conf_override) {
+ IWL_DEBUG_FW(fwrt,
+ "WRT: ext=%d. Trigger %d configuration override\n",
+ ext, id);
+
memcpy(active->trig, trig, sizeof(*trig));
+ }
memcpy(active->trig->data + offset, trig->data,
trig_regs_size);
@@ -2411,6 +2622,20 @@ static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
active->trig->occurrences = cpu_to_le32(-1);
active->active = true;
+
+ if (id == IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER) {
+ u32 collect_interval = le32_to_cpu(trig->trigger_data);
+
+ /* the minimum allowed interval is 50ms */
+ if (collect_interval < 50) {
+ collect_interval = 50;
+ trig->trigger_data =
+ cpu_to_le32(collect_interval);
+ }
+
+ mod_timer(&fwrt->dump.periodic_trig,
+ jiffies + msecs_to_jiffies(collect_interval));
+ }
next:
iter += sizeof(*trig) + trig_regs_size;
@@ -2440,11 +2665,11 @@ static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
case IWL_UCODE_TLV_TYPE_HCMD:
if (pnt < IWL_FW_INI_APPLY_AFTER_ALIVE) {
IWL_ERR(fwrt,
- "Invalid apply point %x for host command\n",
- pnt);
+ "WRT: ext=%d. Invalid apply point %d for host command\n",
+ ext, pnt);
goto next;
}
- iwl_fw_dbg_send_hcmd(fwrt, tlv);
+ iwl_fw_dbg_send_hcmd(fwrt, tlv, ext);
break;
case IWL_UCODE_TLV_TYPE_REGIONS:
iwl_fw_dbg_update_regions(fwrt, ini_tlv, ext, pnt);
@@ -2455,7 +2680,9 @@ static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
case IWL_UCODE_TLV_TYPE_DEBUG_FLOW:
break;
default:
- WARN_ONCE(1, "Invalid TLV %x for apply point\n", type);
+ WARN_ONCE(1,
+ "WRT: ext=%d. Invalid TLV 0x%x for apply point\n",
+ ext, type);
break;
}
next:
@@ -2469,6 +2696,8 @@ void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
void *data = &fwrt->trans->apply_points[apply_point];
int i;
+ IWL_DEBUG_FW(fwrt, "WRT: enabling apply point %d\n", apply_point);
+
if (apply_point == IWL_FW_INI_APPLY_EARLY) {
for (i = 0; i < IWL_FW_INI_MAX_REGION_ID; i++)
fwrt->dump.active_regs[i] = NULL;
@@ -2487,8 +2716,34 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point);
void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt)
{
+ del_timer(&fwrt->dump.periodic_trig);
iwl_fw_dbg_collect_sync(fwrt);
iwl_trans_stop_device(fwrt->trans);
}
IWL_EXPORT_SYMBOL(iwl_fwrt_stop_device);
+
+void iwl_fw_dbg_periodic_trig_handler(struct timer_list *t)
+{
+ struct iwl_fw_runtime *fwrt;
+ enum iwl_fw_ini_trigger_id id = IWL_FW_TRIGGER_ID_PERIODIC_TRIGGER;
+ int ret;
+ typeof(fwrt->dump) *dump_ptr = container_of(t, typeof(fwrt->dump),
+ periodic_trig);
+
+ fwrt = container_of(dump_ptr, typeof(*fwrt), dump);
+
+ ret = _iwl_fw_dbg_ini_collect(fwrt, id);
+ if (!ret || ret == -EBUSY) {
+ struct iwl_fw_ini_trigger *trig =
+ fwrt->dump.active_trigs[id].trig;
+ u32 occur = le32_to_cpu(trig->occurrences);
+ u32 collect_interval = le32_to_cpu(trig->trigger_data);
+
+ if (!occur)
+ return;
+
+ mod_timer(&fwrt->dump.periodic_trig,
+ jiffies + msecs_to_jiffies(collect_interval));
+ }
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index a199056234d3..2a9e560a906b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -108,18 +108,17 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
fwrt->dump.umac_err_id = 0;
}
-void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt);
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
const struct iwl_fw_dump_desc *desc,
bool monitor_only, unsigned int delay);
int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
enum iwl_fw_dbg_trigger trig_type);
-int _iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
- enum iwl_fw_dbg_trigger trig,
- const char *str, size_t len,
- struct iwl_fw_dbg_trigger_tlv *trigger);
+int _iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_ini_trigger_id id);
+int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime *fwrt, u32 legacy_trigger_id);
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
- u32 id, const char *str, size_t len);
+ enum iwl_fw_dbg_trigger trig, const char *str,
+ size_t len, struct iwl_fw_dbg_trigger_tlv *trigger);
int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
struct iwl_fw_dbg_trigger_tlv *trigger,
const char *fmt, ...) __printf(3, 4);
@@ -229,10 +228,8 @@ iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_trigger *trig;
u32 usec;
-
-
- if (!fwrt->trans->ini_valid || id >= IWL_FW_TRIGGER_ID_NUM ||
- !fwrt->dump.active_trigs[id].active)
+ if (!fwrt->trans->ini_valid || id == IWL_FW_TRIGGER_ID_INVALID ||
+ id >= IWL_FW_TRIGGER_ID_NUM || !fwrt->dump.active_trigs[id].active)
return false;
trig = fwrt->dump.active_trigs[id].trig;
@@ -388,11 +385,13 @@ void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt);
static inline void iwl_fw_flush_dump(struct iwl_fw_runtime *fwrt)
{
+ del_timer(&fwrt->dump.periodic_trig);
flush_delayed_work(&fwrt->dump.wk);
}
static inline void iwl_fw_cancel_dump(struct iwl_fw_runtime *fwrt)
{
+ del_timer(&fwrt->dump.periodic_trig);
cancel_delayed_work_sync(&fwrt->dump.wk);
}
@@ -461,4 +460,15 @@ static inline void iwl_fw_umac_set_alive_err_table(struct iwl_trans *trans,
/* This bit is used to differentiate the legacy dump from the ini dump */
#define INI_DUMP_BIT BIT(31)
+static inline void iwl_fw_error_collect(struct iwl_fw_runtime *fwrt)
+{
+ if (fwrt->trans->ini_valid && fwrt->trans->hw_error) {
+ _iwl_fw_dbg_ini_collect(fwrt, IWL_FW_TRIGGER_ID_FW_HW_ERROR);
+ fwrt->trans->hw_error = false;
+ } else {
+ iwl_fw_dbg_collect_desc(fwrt, &iwl_dump_desc_assert, false, 0);
+ }
+}
+
+void iwl_fw_dbg_periodic_trig_handler(struct timer_list *t);
#endif /* __iwl_fw_dbg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 9b5077bd46c3..0feff4c33e39 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -184,7 +184,7 @@ enum iwl_fw_error_dump_family {
/**
* struct iwl_fw_error_dump_info - info on the device / firmware
- * @device_family: the family of the device (7 / 8)
+ * @hw_type: the type of the device
* @hw_step: the step of the device
* @fw_human_readable: human readable FW version
* @dev_human_readable: name of the device
@@ -196,7 +196,7 @@ enum iwl_fw_error_dump_family {
* if the dump collection was not initiated by an assert, the value is 0
*/
struct iwl_fw_error_dump_info {
- __le32 device_family;
+ __le32 hw_type;
__le32 hw_step;
u8 fw_human_readable[FW_VER_HUMAN_READABLE_SZ];
u8 dev_human_readable[64];
@@ -211,6 +211,9 @@ struct iwl_fw_error_dump_info {
* @fw_mon_wr_ptr: the position of the write pointer in the cyclic buffer
* @fw_mon_base_ptr: base pointer of the data
* @fw_mon_cycle_cnt: number of wraparounds
+ * @fw_mon_base_high_ptr: used in AX210 devices, the base adderss is 64 bit
+ * so fw_mon_base_ptr holds LSB 32 bits and fw_mon_base_high_ptr hold
+ * MSB 32 bits
* @reserved: for future use
* @data: captured data
*/
@@ -218,7 +221,8 @@ struct iwl_fw_error_dump_fw_mon {
__le32 fw_mon_wr_ptr;
__le32 fw_mon_base_ptr;
__le32 fw_mon_cycle_cnt;
- __le32 reserved[3];
+ __le32 fw_mon_base_high_ptr;
+ __le32 reserved[2];
u8 data[];
} __packed;
@@ -274,25 +278,33 @@ struct iwl_fw_error_dump_mem {
u8 data[];
};
+#define IWL_INI_DUMP_MEM_VER 1
+#define IWL_INI_DUMP_MONITOR_VER 1
+#define IWL_INI_DUMP_FIFO_VER 1
+
/**
* struct iwl_fw_ini_error_dump_range - range of memory
- * @start_addr: the start address of this range
* @range_data_size: the size of this range, in bytes
+ * @start_addr: the start address of this range
* @data: the actual memory
*/
struct iwl_fw_ini_error_dump_range {
- __le32 start_addr;
__le32 range_data_size;
+ __le64 start_addr;
__le32 data[];
} __packed;
/**
* struct iwl_fw_ini_error_dump_header - ini region dump header
+ * @version: dump version
+ * @region_id: id of the region
* @num_of_ranges: number of ranges in this region
* @name_len: number of bytes allocated to the name string of this region
* @name: name of the region
*/
struct iwl_fw_ini_error_dump_header {
+ __le32 version;
+ __le32 region_id;
__le32 num_of_ranges;
__le32 name_len;
u8 name[IWL_FW_INI_MAX_NAME];
@@ -312,12 +324,23 @@ struct iwl_fw_ini_error_dump {
#define IWL_RXF_UMAC_BIT BIT(31)
/**
+ * struct iwl_fw_ini_error_dump_register - ini register dump
+ * @addr: address of the register
+ * @data: data of the register
+ */
+struct iwl_fw_ini_error_dump_register {
+ __le32 addr;
+ __le32 data;
+} __packed;
+
+/**
* struct iwl_fw_ini_fifo_error_dump_range - ini fifo range dump
* @fifo_num: the fifo num. In case of rxf and umac rxf, set BIT(31) to
* distinguish between lmac and umac
* @num_of_registers: num of registers to dump, dword size each
- * @range_data_size: the size of the registers and fifo data
- * @data: fifo data
+ * @range_data_size: the size of the data
+ * @data: consist of
+ * num_of_registers * (register address + register value) + fifo data
*/
struct iwl_fw_ini_fifo_error_dump_range {
__le32 fifo_num;
@@ -351,13 +374,13 @@ struct iwl_fw_error_dump_rb {
};
/**
- * struct iwl_fw_ini_monitor_dram_dump - ini dram monitor dump
+ * struct iwl_fw_ini_monitor_dump - ini monitor dump
* @header - header of the region
- * @write_ptr - write pointer position in the dram
+ * @write_ptr - write pointer position in the buffer
* @cycle_cnt - cycles count
* @ranges - the memory ranges of this this region
*/
-struct iwl_fw_ini_monitor_dram_dump {
+struct iwl_fw_ini_monitor_dump {
struct iwl_fw_ini_error_dump_header header;
__le32 write_ptr;
__le32 cycle_cnt;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index e06407dc088b..de9243d30135 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -142,12 +142,14 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_FW_DBG_DEST = 38,
IWL_UCODE_TLV_FW_DBG_CONF = 39,
IWL_UCODE_TLV_FW_DBG_TRIGGER = 40,
+ IWL_UCODE_TLV_CMD_VERSIONS = 48,
IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
IWL_UCODE_TLV_FW_MEM_SEG = 51,
IWL_UCODE_TLV_IML = 52,
IWL_UCODE_TLV_UMAC_DEBUG_ADDRS = 54,
IWL_UCODE_TLV_LMAC_DEBUG_ADDRS = 55,
IWL_UCODE_TLV_FW_RECOVERY_INFO = 57,
+ IWL_UCODE_TLV_FW_FSEQ_VERSION = 60,
IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP + 0x1,
IWL_UCODE_TLV_DEBUG_BASE = IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION,
@@ -275,8 +277,15 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* version of the beacon notification.
* @IWL_UCODE_TLV_API_BEACON_FILTER_V4: This ucode supports v4 of
* BEACON_FILTER_CONFIG_API_S_VER_4.
+ * @IWL_UCODE_TLV_API_REGULATORY_NVM_INFO: This ucode supports v4 of
+ * REGULATORY_NVM_GET_INFO_RSP_API_S.
* @IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ: This ucode supports v7 of
* LOCATION_RANGE_REQ_CMD_API_S and v6 of LOCATION_RANGE_RESP_NTFY_API_S.
+ * @IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS: This ucode supports v2 of
+ * SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S and v3 of
+ * SCAN_OFFLOAD_PROFILES_QUERY_RSP_S.
+ * @IWL_UCODE_TLV_API_MBSSID_HE: This ucode supports v2 of
+ * STA_CONTEXT_DOT11AX_API_S
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
@@ -303,7 +312,12 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_REDUCE_TX_POWER = (__force iwl_ucode_tlv_api_t)45,
IWL_UCODE_TLV_API_SHORT_BEACON_NOTIF = (__force iwl_ucode_tlv_api_t)46,
IWL_UCODE_TLV_API_BEACON_FILTER_V4 = (__force iwl_ucode_tlv_api_t)47,
+ IWL_UCODE_TLV_API_REGULATORY_NVM_INFO = (__force iwl_ucode_tlv_api_t)48,
IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ = (__force iwl_ucode_tlv_api_t)49,
+ IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS = (__force iwl_ucode_tlv_api_t)50,
+ IWL_UCODE_TLV_API_MBSSID_HE = (__force iwl_ucode_tlv_api_t)52,
+ IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE = (__force iwl_ucode_tlv_api_t)53,
+ IWL_UCODE_TLV_API_FTM_RTT_ACCURACY = (__force iwl_ucode_tlv_api_t)54,
NUM_IWL_UCODE_TLV_API
#ifdef __CHECKER__
@@ -353,6 +367,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD: firmware supports CSA command
* @IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS: firmware supports ultra high band
* (6 GHz).
+ * @IWL_UCODE_TLV_CAPA_CS_MODIFY: firmware supports modify action CSA command
* @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
* @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
* @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
@@ -423,6 +438,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD = (__force iwl_ucode_tlv_capa_t)46,
IWL_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS = (__force iwl_ucode_tlv_capa_t)48,
IWL_UCODE_TLV_CAPA_FTM_CALIBRATED = (__force iwl_ucode_tlv_capa_t)47,
+ IWL_UCODE_TLV_CAPA_CS_MODIFY = (__force iwl_ucode_tlv_capa_t)49,
/* set 2 */
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
@@ -928,4 +944,20 @@ struct iwl_fw_dbg_conf_tlv {
struct iwl_fw_dbg_conf_hcmd hcmd;
} __packed;
+#define IWL_FW_CMD_VER_UNKNOWN 99
+
+/**
+ * struct iwl_fw_cmd_version - firmware command version entry
+ * @cmd: command ID
+ * @group: group ID
+ * @cmd_ver: command version
+ * @notif_ver: notification version
+ */
+struct iwl_fw_cmd_version {
+ u8 cmd;
+ u8 group;
+ u8 cmd_ver;
+ u8 notif_ver;
+} __packed;
+
#endif /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index f4c5a4d73206..18ca5f152be6 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -8,7 +8,7 @@
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -109,6 +109,9 @@ struct iwl_ucode_capabilities {
u32 error_log_size;
unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)];
unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)];
+
+ const struct iwl_fw_cmd_version *cmd_versions;
+ u32 n_cmd_versions;
};
static inline bool
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index 12310e3d2fc5..4435c0ce3013 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -76,6 +76,8 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
fwrt->ops_ctx = ops_ctx;
INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk);
iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir);
+ timer_setup(&fwrt->dump.periodic_trig,
+ iwl_fw_dbg_periodic_trig_handler, 0);
}
IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index a5fe1a8ca426..a6402a0b3854 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -145,6 +145,8 @@ struct iwl_fw_runtime {
u32 lmac_err_id[MAX_NUM_LMAC];
u32 umac_err_id;
void *fifo_iter;
+ enum iwl_fw_ini_trigger_id ini_trig_id;
+ struct timer_list periodic_trig;
} dump;
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 93070848280a..f3e69edf8907 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -383,6 +383,9 @@ struct iwl_csr_params {
* @bisr_workaround: BISR hardware workaround (for 22260 series devices)
* @min_txq_size: minimum number of slots required in a TX queue
* @umac_prph_offset: offset to add to UMAC periphery address
+ * @uhb_supported: ultra high band channels supported
+ * @min_256_ba_txq_size: minimum number of slots required in a TX queue which
+ * supports 256 BA aggregation
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -433,7 +436,8 @@ struct iwl_cfg {
gen2:1,
cdb:1,
dbgc_supported:1,
- bisr_workaround:1;
+ bisr_workaround:1,
+ uhb_supported:1;
u8 valid_tx_ant;
u8 valid_rx_ant;
u8 non_shared_ant;
@@ -450,6 +454,12 @@ struct iwl_cfg {
u32 d3_debug_data_length;
u32 min_txq_size;
u32 umac_prph_offset;
+ u32 fw_mon_smem_write_ptr_addr;
+ u32 fw_mon_smem_write_ptr_msk;
+ u32 fw_mon_smem_cycle_cnt_ptr_addr;
+ u32 fw_mon_smem_cycle_cnt_ptr_msk;
+ u32 gp2_reg_addr;
+ u32 min_256_ba_txq_size;
};
extern const struct iwl_csr_params iwl_csr_v1;
@@ -568,11 +578,11 @@ extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0_f0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
extern const struct iwl_cfg iwl9560_2ac_cfg_qnj_jf_b0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
-extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_jf_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_so_gf_a0;
extern const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0;
+extern const struct iwl_cfg iwlax210_2ax_cfg_so_gf4_a0;
#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index e539bc94eff7..553554846009 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -8,7 +8,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -290,6 +290,7 @@
/* HW REV */
#define CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0)
#define CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2)
+#define CSR_HW_REV_TYPE(_val) (((_val) & 0x000FFF0) >> 4)
/* HW RFID */
#define CSR_HW_RFID_FLAVOR(_val) (((_val) & 0x000000F) >> 0)
@@ -337,6 +338,7 @@ enum {
#define CSR_HW_RF_ID_TYPE_HR (0x0010A000)
#define CSR_HW_RF_ID_TYPE_HRCDB (0x00109F00)
#define CSR_HW_RF_ID_TYPE_GF (0x0010D000)
+#define CSR_HW_RF_ID_TYPE_GF4 (0x0010E000)
/* HW_RF CHIP ID */
#define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index c7070760a10a..ba66f7fba064 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,7 +28,7 @@
*
* BSD LICENSE
*
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -73,6 +73,9 @@ void iwl_fw_dbg_copy_tlv(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
int copy_size = le32_to_cpu(tlv->length) + sizeof(*tlv);
int offset_size = copy_size;
+ if (le32_to_cpu(header->tlv_version) != 1)
+ return;
+
if (WARN_ONCE(apply_point >= IWL_FW_INI_APPLY_NUM,
"Invalid apply point id %d\n", apply_point))
return;
@@ -133,7 +136,10 @@ void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data,
hdr = (void *)&tlv->data[0];
apply = le32_to_cpu(hdr->apply_point);
- IWL_DEBUG_FW(trans, "Read TLV %x, apply point %d\n",
+ if (le32_to_cpu(hdr->tlv_version) != 1)
+ continue;
+
+ IWL_DEBUG_FW(trans, "WRT: read TLV 0x%x, apply point %d\n",
le32_to_cpu(tlv->type), apply);
if (WARN_ON(apply >= IWL_FW_INI_APPLY_NUM))
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
index 655ff5694560..d3ba6a1422ee 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
@@ -218,5 +218,7 @@ do { \
#define IWL_DEBUG_TPT(p, f, a...) IWL_DEBUG(p, IWL_DL_TPT, f, ## a)
#define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a)
#define IWL_DEBUG_LAR(p, f, a...) IWL_DEBUG(p, IWL_DL_LAR, f, ## a)
+#define IWL_DEBUG_FW_INFO(p, f, a...) \
+ IWL_DEBUG(p, IWL_DL_INFO | IWL_DL_FW, f, ## a)
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 689a65b11cc3..852d3cbfc719 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -179,6 +179,7 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
kfree(drv->fw.dbg.trigger_tlv[i]);
kfree(drv->fw.dbg.mem_tlv);
kfree(drv->fw.iml);
+ kfree(drv->fw.ucode_capa.cmd_versions);
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
iwl_free_fw_img(drv, drv->fw.img + i);
@@ -252,8 +253,8 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
cfg->fw_name_pre, tag);
- IWL_DEBUG_INFO(drv, "attempting to load firmware '%s'\n",
- drv->firmware_name);
+ IWL_DEBUG_FW_INFO(drv, "attempting to load firmware '%s'\n",
+ drv->firmware_name);
return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
drv->trans->dev,
@@ -1144,6 +1145,23 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (iwlwifi_mod_params.enable_ini)
iwl_fw_dbg_copy_tlv(drv->trans, tlv, false);
break;
+ case IWL_UCODE_TLV_CMD_VERSIONS:
+ if (tlv_len % sizeof(struct iwl_fw_cmd_version)) {
+ IWL_ERR(drv,
+ "Invalid length for command versions: %u\n",
+ tlv_len);
+ tlv_len /= sizeof(struct iwl_fw_cmd_version);
+ tlv_len *= sizeof(struct iwl_fw_cmd_version);
+ }
+ if (WARN_ON(capa->cmd_versions))
+ return -EINVAL;
+ capa->cmd_versions = kmemdup(tlv_data, tlv_len,
+ GFP_KERNEL);
+ if (!capa->cmd_versions)
+ return -ENOMEM;
+ capa->n_cmd_versions =
+ tlv_len / sizeof(struct iwl_fw_cmd_version);
+ break;
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break;
@@ -1318,8 +1336,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
if (!ucode_raw)
goto try_again;
- IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n",
- drv->firmware_name, ucode_raw->size);
+ IWL_DEBUG_FW_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n",
+ drv->firmware_name, ucode_raw->size);
/* Make sure that we got at least the API version number */
if (ucode_raw->size < 4) {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 87d6de7efdd2..d87a6bb3e456 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -130,7 +130,7 @@ enum nvm_sku_bits {
/*
* These are the channel numbers in the order that they are stored in the NVM
*/
-static const u8 iwl_nvm_channels[] = {
+static const u16 iwl_nvm_channels[] = {
/* 2.4 GHz */
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
/* 5 GHz */
@@ -139,7 +139,7 @@ static const u8 iwl_nvm_channels[] = {
149, 153, 157, 161, 165
};
-static const u8 iwl_ext_nvm_channels[] = {
+static const u16 iwl_ext_nvm_channels[] = {
/* 2.4 GHz */
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
/* 5 GHz */
@@ -148,14 +148,27 @@ static const u8 iwl_ext_nvm_channels[] = {
149, 153, 157, 161, 165, 169, 173, 177, 181
};
+static const u16 iwl_uhb_nvm_channels[] = {
+ /* 2.4 GHz */
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ /* 5 GHz */
+ 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
+ 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
+ 149, 153, 157, 161, 165, 169, 173, 177, 181,
+ /* 6-7 GHz */
+ 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233, 237, 241,
+ 245, 249, 253, 257, 261, 265, 269, 273, 277, 281, 285, 289, 293, 297,
+ 301, 305, 309, 313, 317, 321, 325, 329, 333, 337, 341, 345, 349, 353,
+ 357, 361, 365, 369, 373, 377, 381, 385, 389, 393, 397, 401, 405, 409,
+ 413, 417, 421
+};
+
#define IWL_NVM_NUM_CHANNELS ARRAY_SIZE(iwl_nvm_channels)
#define IWL_NVM_NUM_CHANNELS_EXT ARRAY_SIZE(iwl_ext_nvm_channels)
+#define IWL_NVM_NUM_CHANNELS_UHB ARRAY_SIZE(iwl_uhb_nvm_channels)
#define NUM_2GHZ_CHANNELS 14
-#define NUM_2GHZ_CHANNELS_EXT 14
#define FIRST_2GHZ_HT_MINUS 5
#define LAST_2GHZ_HT_PLUS 9
-#define LAST_5GHZ_HT 165
-#define LAST_5GHZ_HT_FAMILY_8000 181
#define N_HW_ADDR_MASK 0xF
/* rate data (static) */
@@ -213,7 +226,7 @@ enum iwl_nvm_channel_flags {
};
static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
- int chan, u16 flags)
+ int chan, u32 flags)
{
#define CHECK_AND_PRINT_I(x) \
((flags & NVM_CHANNEL_##x) ? " " #x : "")
@@ -244,20 +257,16 @@ static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
}
static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
- u16 nvm_flags, const struct iwl_cfg *cfg)
+ u32 nvm_flags, const struct iwl_cfg *cfg)
{
u32 flags = IEEE80211_CHAN_NO_HT40;
- u32 last_5ghz_ht = LAST_5GHZ_HT;
-
- if (cfg->nvm_type == IWL_NVM_EXT)
- last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
if (ch_num <= LAST_2GHZ_HT_PLUS)
flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
if (ch_num >= FIRST_2GHZ_HT_MINUS)
flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
- } else if (ch_num <= last_5ghz_ht && (nvm_flags & NVM_CHANNEL_40MHZ)) {
+ } else if (nvm_flags & NVM_CHANNEL_40MHZ) {
if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
else
@@ -292,30 +301,36 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
- const __le16 * const nvm_ch_flags,
- u32 sbands_flags)
+ const void * const nvm_ch_flags,
+ u32 sbands_flags, bool v4)
{
int ch_idx;
int n_channels = 0;
struct ieee80211_channel *channel;
- u16 ch_flags;
- int num_of_ch, num_2ghz_channels;
- const u8 *nvm_chan;
-
- if (cfg->nvm_type != IWL_NVM_EXT) {
- num_of_ch = IWL_NVM_NUM_CHANNELS;
- nvm_chan = &iwl_nvm_channels[0];
- num_2ghz_channels = NUM_2GHZ_CHANNELS;
- } else {
+ u32 ch_flags;
+ int num_of_ch, num_2ghz_channels = NUM_2GHZ_CHANNELS;
+ const u16 *nvm_chan;
+
+ if (cfg->uhb_supported) {
+ num_of_ch = IWL_NVM_NUM_CHANNELS_UHB;
+ nvm_chan = iwl_uhb_nvm_channels;
+ } else if (cfg->nvm_type == IWL_NVM_EXT) {
num_of_ch = IWL_NVM_NUM_CHANNELS_EXT;
- nvm_chan = &iwl_ext_nvm_channels[0];
- num_2ghz_channels = NUM_2GHZ_CHANNELS_EXT;
+ nvm_chan = iwl_ext_nvm_channels;
+ } else {
+ num_of_ch = IWL_NVM_NUM_CHANNELS;
+ nvm_chan = iwl_nvm_channels;
}
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
bool is_5ghz = (ch_idx >= num_2ghz_channels);
- ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
+ if (v4)
+ ch_flags =
+ __le32_to_cpup((__le32 *)nvm_ch_flags + ch_idx);
+ else
+ ch_flags =
+ __le16_to_cpup((__le16 *)nvm_ch_flags + ch_idx);
if (is_5ghz && !data->sku_cap_band_52ghz_enable)
continue;
@@ -636,12 +651,7 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband,
u8 tx_chains, u8 rx_chains)
{
- if (sband->band == NL80211_BAND_2GHZ ||
- sband->band == NL80211_BAND_5GHZ)
- sband->iftype_data = iwl_he_capa;
- else
- return;
-
+ sband->iftype_data = iwl_he_capa;
sband->n_iftype_data = ARRAY_SIZE(iwl_he_capa);
/* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */
@@ -661,15 +671,15 @@ static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband,
static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
- const __le16 *nvm_ch_flags, u8 tx_chains,
- u8 rx_chains, u32 sbands_flags)
+ const void *nvm_ch_flags, u8 tx_chains,
+ u8 rx_chains, u32 sbands_flags, bool v4)
{
int n_channels;
int n_used = 0;
struct ieee80211_supported_band *sband;
n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags,
- sbands_flags);
+ sbands_flags, v4);
sband = &data->bands[NL80211_BAND_2GHZ];
sband->band = NL80211_BAND_2GHZ;
sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
@@ -1006,22 +1016,18 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ;
iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains,
- sbands_flags);
+ sbands_flags, false);
data->calib_version = 255;
return data;
}
IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
-static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
+static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
int ch_idx, u16 nvm_flags,
const struct iwl_cfg *cfg)
{
u32 flags = NL80211_RRF_NO_HT40;
- u32 last_5ghz_ht = LAST_5GHZ_HT;
-
- if (cfg->nvm_type == IWL_NVM_EXT)
- last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
if (ch_idx < NUM_2GHZ_CHANNELS &&
(nvm_flags & NVM_CHANNEL_40MHZ)) {
@@ -1029,8 +1035,7 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
flags &= ~NL80211_RRF_NO_HT40PLUS;
if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS)
flags &= ~NL80211_RRF_NO_HT40MINUS;
- } else if (nvm_chan[ch_idx] <= last_5ghz_ht &&
- (nvm_flags & NVM_CHANNEL_40MHZ)) {
+ } else if (nvm_flags & NVM_CHANNEL_40MHZ) {
if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
flags &= ~NL80211_RRF_NO_HT40PLUS;
else
@@ -1074,18 +1079,26 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int ch_idx;
u16 ch_flags;
u32 reg_rule_flags, prev_reg_rule_flags = 0;
- const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ?
- iwl_ext_nvm_channels : iwl_nvm_channels;
+ const u16 *nvm_chan;
struct ieee80211_regdomain *regd, *copy_rd;
- int size_of_regd, regd_to_copy;
struct ieee80211_reg_rule *rule;
struct regdb_ptrs *regdb_ptrs;
enum nl80211_band band;
int center_freq, prev_center_freq = 0;
int valid_rules = 0;
bool new_rule;
- int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
- IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS;
+ int max_num_ch;
+
+ if (cfg->uhb_supported) {
+ max_num_ch = IWL_NVM_NUM_CHANNELS_UHB;
+ nvm_chan = iwl_uhb_nvm_channels;
+ } else if (cfg->nvm_type == IWL_NVM_EXT) {
+ max_num_ch = IWL_NVM_NUM_CHANNELS_EXT;
+ nvm_chan = iwl_ext_nvm_channels;
+ } else {
+ max_num_ch = IWL_NVM_NUM_CHANNELS;
+ nvm_chan = iwl_nvm_channels;
+ }
if (WARN_ON(num_of_ch > max_num_ch))
num_of_ch = max_num_ch;
@@ -1097,11 +1110,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
num_of_ch);
/* build a regdomain rule for every valid channel */
- size_of_regd =
- sizeof(struct ieee80211_regdomain) +
- num_of_ch * sizeof(struct ieee80211_reg_rule);
-
- regd = kzalloc(size_of_regd, GFP_KERNEL);
+ regd = kzalloc(struct_size(regd, reg_rules, num_of_ch), GFP_KERNEL);
if (!regd)
return ERR_PTR(-ENOMEM);
@@ -1177,14 +1186,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
* Narrow down regdom for unused regulatory rules to prevent hole
* between reg rules to wmm rules.
*/
- regd_to_copy = sizeof(struct ieee80211_regdomain) +
- valid_rules * sizeof(struct ieee80211_reg_rule);
-
- copy_rd = kmemdup(regd, regd_to_copy, GFP_KERNEL);
- if (!copy_rd) {
+ copy_rd = kmemdup(regd, struct_size(regd, reg_rules, valid_rules),
+ GFP_KERNEL);
+ if (!copy_rd)
copy_rd = ERR_PTR(-ENOMEM);
- goto out;
- }
out:
kfree(regdb_ptrs);
@@ -1393,7 +1398,6 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
const struct iwl_fw *fw)
{
struct iwl_nvm_get_info cmd = {};
- struct iwl_nvm_get_info_rsp *rsp;
struct iwl_nvm_data *nvm;
struct iwl_host_cmd hcmd = {
.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
@@ -1408,12 +1412,24 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
bool empty_otp;
u32 mac_flags;
u32 sbands_flags = 0;
+ /*
+ * All the values in iwl_nvm_get_info_rsp v4 are the same as
+ * in v3, except for the channel profile part of the
+ * regulatory. So we can just access the new struct, with the
+ * exception of the latter.
+ */
+ struct iwl_nvm_get_info_rsp *rsp;
+ struct iwl_nvm_get_info_rsp_v3 *rsp_v3;
+ bool v4 = fw_has_api(&fw->ucode_capa,
+ IWL_UCODE_TLV_API_REGULATORY_NVM_INFO);
+ size_t rsp_size = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
+ void *channel_profile;
ret = iwl_trans_send_cmd(trans, &hcmd);
if (ret)
return ERR_PTR(ret);
- if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp),
+ if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != rsp_size,
"Invalid payload len in NVM response from FW %d",
iwl_rx_packet_payload_len(hcmd.resp_pkt))) {
ret = -EINVAL;
@@ -1475,11 +1491,15 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
}
+ rsp_v3 = (void *)rsp;
+ channel_profile = v4 ? (void *)rsp->regulatory.channel_profile :
+ (void *)rsp_v3->regulatory.channel_profile;
+
iwl_init_sbands(trans->dev, trans->cfg, nvm,
- rsp->regulatory.channel_profile,
+ channel_profile,
nvm->valid_tx_ant & fw->valid_tx_ant,
nvm->valid_rx_ant & fw->valid_rx_ant,
- sbands_flags);
+ sbands_flags, v4);
iwl_free_resp(&hcmd);
return nvm;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 1af9f9e1ecd4..8e6a0c363c0d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -368,6 +368,12 @@
#define MON_BUFF_WRPTR_VER2 (0xa03c24)
#define MON_BUFF_CYCLE_CNT_VER2 (0xa03c28)
#define MON_BUFF_SHIFT_VER2 (0x8)
+/* FW monitor familiy AX210 and on */
+#define DBGC_CUR_DBGBUF_BASE_ADDR_LSB (0xd03c20)
+#define DBGC_CUR_DBGBUF_BASE_ADDR_MSB (0xd03c24)
+#define DBGC_CUR_DBGBUF_STATUS (0xd03c1c)
+#define DBGC_DBGBUF_WRAP_AROUND (0xd03c2c)
+#define DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK (0x00ffffff)
#define MON_DMARB_RD_CTL_ADDR (0xa03c60)
#define MON_DMARB_RD_DATA_ADDR (0xa03c5c)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index d8690acee40c..1e4c9ef548cc 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -274,7 +274,6 @@ struct iwl_rx_cmd_buffer {
bool _page_stolen;
u32 _rx_page_order;
unsigned int truesize;
- u8 status;
};
static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
@@ -768,6 +767,7 @@ struct iwl_self_init_dram {
* @umac_error_event_table: addr of umac error table
* @error_event_table_tlv_status: bitmap that indicates what error table
* pointers was recevied via TLV. use enum &iwl_error_event_table_status
+ * @hw_error: equals true if hw error interrupt was received from the FW
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
@@ -830,6 +830,7 @@ struct iwl_trans {
u32 lmac_error_event_table[2];
u32 umac_error_event_table;
unsigned int error_event_table_tlv_status;
+ bool hw_error;
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 808bc6f363d0..60f5d337f16d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -385,10 +385,10 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
}
}
-static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
- struct cfg80211_wowlan *wowlan)
+static int iwl_mvm_send_patterns_v1(struct iwl_mvm *mvm,
+ struct cfg80211_wowlan *wowlan)
{
- struct iwl_wowlan_patterns_cmd *pattern_cmd;
+ struct iwl_wowlan_patterns_cmd_v1 *pattern_cmd;
struct iwl_host_cmd cmd = {
.id = WOWLAN_PATTERNS,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
@@ -399,7 +399,7 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
return 0;
cmd.len[0] = sizeof(*pattern_cmd) +
- wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern);
+ wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v1);
pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
if (!pattern_cmd)
@@ -426,6 +426,50 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
return err;
}
+static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct iwl_wowlan_patterns_cmd *pattern_cmd;
+ struct iwl_host_cmd cmd = {
+ .id = WOWLAN_PATTERNS,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ };
+ int i, err;
+
+ if (!wowlan->n_patterns)
+ return 0;
+
+ cmd.len[0] = sizeof(*pattern_cmd) +
+ wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern_v2);
+
+ pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
+ if (!pattern_cmd)
+ return -ENOMEM;
+
+ pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+
+ pattern_cmd->patterns[i].pattern_type =
+ WOWLAN_PATTERN_TYPE_BITMASK;
+
+ memcpy(&pattern_cmd->patterns[i].u.bitmask.mask,
+ wowlan->patterns[i].mask, mask_len);
+ memcpy(&pattern_cmd->patterns[i].u.bitmask.pattern,
+ wowlan->patterns[i].pattern,
+ wowlan->patterns[i].pattern_len);
+ pattern_cmd->patterns[i].u.bitmask.mask_size = mask_len;
+ pattern_cmd->patterns[i].u.bitmask.pattern_size =
+ wowlan->patterns[i].pattern_len;
+ }
+
+ cmd.data[0] = pattern_cmd;
+ err = iwl_mvm_send_cmd(mvm, &cmd);
+ kfree(pattern_cmd);
+ return err;
+}
+
static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *ap_sta)
{
@@ -851,7 +895,11 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
if (ret)
return ret;
- ret = iwl_mvm_send_patterns(mvm, wowlan);
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE))
+ ret = iwl_mvm_send_patterns(mvm, wowlan);
+ else
+ ret = iwl_mvm_send_patterns_v1(mvm, wowlan);
if (ret)
return ret;
@@ -1728,9 +1776,12 @@ void iwl_mvm_d0i3_update_keys(struct iwl_mvm *mvm,
iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, &gtkdata);
}
+#define ND_QUERY_BUF_LEN (sizeof(struct iwl_scan_offload_profile_match) * \
+ IWL_SCAN_MAX_PROFILES)
+
struct iwl_mvm_nd_query_results {
u32 matched_profiles;
- struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
+ u8 matches[ND_QUERY_BUF_LEN];
};
static int
@@ -1743,6 +1794,7 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
.flags = CMD_WANT_SKB,
};
int ret, len;
+ size_t query_len, matches_len;
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
@@ -1750,8 +1802,19 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
return ret;
}
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
+ query_len = sizeof(struct iwl_scan_offload_profiles_query);
+ matches_len = sizeof(struct iwl_scan_offload_profile_match) *
+ IWL_SCAN_MAX_PROFILES;
+ } else {
+ query_len = sizeof(struct iwl_scan_offload_profiles_query_v1);
+ matches_len = sizeof(struct iwl_scan_offload_profile_match_v1) *
+ IWL_SCAN_MAX_PROFILES;
+ }
+
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
- if (len < sizeof(*query)) {
+ if (len < query_len) {
IWL_ERR(mvm, "Invalid scan offload profiles query response!\n");
ret = -EIO;
goto out_free_resp;
@@ -1760,7 +1823,7 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
query = (void *)cmd.resp_pkt->data;
results->matched_profiles = le32_to_cpu(query->matched_profiles);
- memcpy(results->matches, query->matches, sizeof(results->matches));
+ memcpy(results->matches, query->matches, matches_len);
#ifdef CONFIG_IWLWIFI_DEBUGFS
mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done);
@@ -1771,6 +1834,57 @@ out_free_resp:
return ret;
}
+static int iwl_mvm_query_num_match_chans(struct iwl_mvm *mvm,
+ struct iwl_mvm_nd_query_results *query,
+ int idx)
+{
+ int n_chans = 0, i;
+
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
+ struct iwl_scan_offload_profile_match *matches =
+ (struct iwl_scan_offload_profile_match *)query->matches;
+
+ for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; i++)
+ n_chans += hweight8(matches[idx].matching_channels[i]);
+ } else {
+ struct iwl_scan_offload_profile_match_v1 *matches =
+ (struct iwl_scan_offload_profile_match_v1 *)query->matches;
+
+ for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1; i++)
+ n_chans += hweight8(matches[idx].matching_channels[i]);
+ }
+
+ return n_chans;
+}
+
+static void iwl_mvm_query_set_freqs(struct iwl_mvm *mvm,
+ struct iwl_mvm_nd_query_results *query,
+ struct cfg80211_wowlan_nd_match *match,
+ int idx)
+{
+ int i;
+
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_SCAN_OFFLOAD_CHANS)) {
+ struct iwl_scan_offload_profile_match *matches =
+ (struct iwl_scan_offload_profile_match *)query->matches;
+
+ for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; i++)
+ if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
+ match->channels[match->n_channels++] =
+ mvm->nd_channels[i]->center_freq;
+ } else {
+ struct iwl_scan_offload_profile_match_v1 *matches =
+ (struct iwl_scan_offload_profile_match_v1 *)query->matches;
+
+ for (i = 0; i < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN_V1 * 8; i++)
+ if (matches[idx].matching_channels[i / 8] & (BIT(i % 8)))
+ match->channels[match->n_channels++] =
+ mvm->nd_channels[i]->center_freq;
+ }
+}
+
static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
@@ -1783,7 +1897,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
struct iwl_wowlan_status *fw_status;
unsigned long matched_profiles;
u32 reasons = 0;
- int i, j, n_matches, ret;
+ int i, n_matches, ret;
fw_status = iwl_mvm_get_wakeup_status(mvm);
if (!IS_ERR_OR_NULL(fw_status)) {
@@ -1817,14 +1931,10 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
goto out_report_nd;
for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
- struct iwl_scan_offload_profile_match *fw_match;
struct cfg80211_wowlan_nd_match *match;
int idx, n_channels = 0;
- fw_match = &query.matches[i];
-
- for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; j++)
- n_channels += hweight8(fw_match->matching_channels[j]);
+ n_channels = iwl_mvm_query_num_match_chans(mvm, &query, i);
match = kzalloc(struct_size(match, channels, n_channels),
GFP_KERNEL);
@@ -1844,10 +1954,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
if (mvm->n_nd_channels < n_channels)
continue;
- for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; j++)
- if (fw_match->matching_channels[j / 8] & (BIT(j % 8)))
- match->channels[match->n_channels++] =
- mvm->nd_channels[j]->center_freq;
+ iwl_mvm_query_set_freqs(mvm, &query, match, i);
}
out_report_nd:
@@ -2030,7 +2137,6 @@ out:
* 2. We are using a unified image but had an error while exiting D3
*/
set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
- set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
/*
* When switching images we return 1, which causes mac80211
* to do a reconfig with IEEE80211_RECONFIG_TYPE_RESTART.
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 6925527d8457..f043eefabb4e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -743,9 +743,8 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do { \
- if (!debugfs_create_file(#name, mode, parent, vif, \
- &iwl_dbgfs_##name##_ops)) \
- goto err; \
+ debugfs_create_file(#name, mode, parent, vif, \
+ &iwl_dbgfs_##name##_ops); \
} while (0)
MVM_DEBUGFS_READ_FILE_OPS(mac_params);
@@ -811,12 +810,6 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name,
mvm->debugfs_dir, buf);
- if (!mvmvif->dbgfs_slink)
- IWL_ERR(mvm, "Can't create debugfs symbolic link under %pd\n",
- dbgfs_dir);
- return;
-err:
- IWL_ERR(mvm, "Can't create debugfs entity\n");
}
void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 776b24f54200..d4ff6b44de2c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1349,7 +1349,7 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
return 0;
iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf,
- (count - 1));
+ (count - 1), NULL);
iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
@@ -1696,9 +1696,8 @@ static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf,
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
#define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \
- if (!debugfs_create_file(alias, mode, parent, mvm, \
- &iwl_dbgfs_##name##_ops)) \
- goto err; \
+ debugfs_create_file(alias, mode, parent, mvm, \
+ &iwl_dbgfs_##name##_ops); \
} while (0)
#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \
MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
@@ -1709,9 +1708,8 @@ static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf,
_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta)
#define MVM_DEBUGFS_ADD_STA_FILE_ALIAS(alias, name, parent, mode) do { \
- if (!debugfs_create_file(alias, mode, parent, sta, \
- &iwl_dbgfs_##name##_ops)) \
- goto err; \
+ debugfs_create_file(alias, mode, parent, sta, \
+ &iwl_dbgfs_##name##_ops); \
} while (0)
#define MVM_DEBUGFS_ADD_STA_FILE(name, parent, mode) \
MVM_DEBUGFS_ADD_STA_FILE_ALIAS(#name, name, parent, mode)
@@ -2092,13 +2090,9 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
if (iwl_mvm_has_tlc_offload(mvm))
MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, 0400);
-
- return;
-err:
- IWL_ERR(mvm, "Can't create the mvm station debugfs entry\n");
}
-int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
+void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
{
struct dentry *bcast_dir __maybe_unused;
char buf[100];
@@ -2142,14 +2136,10 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
#endif
MVM_DEBUGFS_ADD_FILE(he_sniffer_params, mvm->debugfs_dir, 0600);
- if (!debugfs_create_bool("enable_scan_iteration_notif",
- 0600,
- mvm->debugfs_dir,
- &mvm->scan_iter_notif_enabled))
- goto err;
- if (!debugfs_create_bool("drop_bcn_ap_mode", 0600,
- mvm->debugfs_dir, &mvm->drop_bcn_ap_mode))
- goto err;
+ debugfs_create_bool("enable_scan_iteration_notif", 0600,
+ mvm->debugfs_dir, &mvm->scan_iter_notif_enabled);
+ debugfs_create_bool("drop_bcn_ap_mode", 0600, mvm->debugfs_dir,
+ &mvm->drop_bcn_ap_mode);
MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR);
@@ -2157,13 +2147,9 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
bcast_dir = debugfs_create_dir("bcast_filtering",
mvm->debugfs_dir);
- if (!bcast_dir)
- goto err;
- if (!debugfs_create_bool("override", 0600,
- bcast_dir,
- &mvm->dbgfs_bcast_filtering.override))
- goto err;
+ debugfs_create_bool("override", 0600, bcast_dir,
+ &mvm->dbgfs_bcast_filtering.override);
MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters,
bcast_dir, 0600);
@@ -2175,35 +2161,26 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400);
- if (!debugfs_create_bool("d3_wake_sysassert", 0600,
- mvm->debugfs_dir, &mvm->d3_wake_sysassert))
- goto err;
- if (!debugfs_create_u32("last_netdetect_scans", 0400,
- mvm->debugfs_dir, &mvm->last_netdetect_scans))
- goto err;
+ debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir,
+ &mvm->d3_wake_sysassert);
+ debugfs_create_u32("last_netdetect_scans", 0400, mvm->debugfs_dir,
+ &mvm->last_netdetect_scans);
#endif
- if (!debugfs_create_u8("ps_disabled", 0400,
- mvm->debugfs_dir, &mvm->ps_disabled))
- goto err;
- if (!debugfs_create_blob("nvm_hw", 0400,
- mvm->debugfs_dir, &mvm->nvm_hw_blob))
- goto err;
- if (!debugfs_create_blob("nvm_sw", 0400,
- mvm->debugfs_dir, &mvm->nvm_sw_blob))
- goto err;
- if (!debugfs_create_blob("nvm_calib", 0400,
- mvm->debugfs_dir, &mvm->nvm_calib_blob))
- goto err;
- if (!debugfs_create_blob("nvm_prod", 0400,
- mvm->debugfs_dir, &mvm->nvm_prod_blob))
- goto err;
- if (!debugfs_create_blob("nvm_phy_sku", 0400,
- mvm->debugfs_dir, &mvm->nvm_phy_sku_blob))
- goto err;
- if (!debugfs_create_blob("nvm_reg", S_IRUSR,
- mvm->debugfs_dir, &mvm->nvm_reg_blob))
- goto err;
+ debugfs_create_u8("ps_disabled", 0400, mvm->debugfs_dir,
+ &mvm->ps_disabled);
+ debugfs_create_blob("nvm_hw", 0400, mvm->debugfs_dir,
+ &mvm->nvm_hw_blob);
+ debugfs_create_blob("nvm_sw", 0400, mvm->debugfs_dir,
+ &mvm->nvm_sw_blob);
+ debugfs_create_blob("nvm_calib", 0400, mvm->debugfs_dir,
+ &mvm->nvm_calib_blob);
+ debugfs_create_blob("nvm_prod", 0400, mvm->debugfs_dir,
+ &mvm->nvm_prod_blob);
+ debugfs_create_blob("nvm_phy_sku", 0400, mvm->debugfs_dir,
+ &mvm->nvm_phy_sku_blob);
+ debugfs_create_blob("nvm_reg", S_IRUSR,
+ mvm->debugfs_dir, &mvm->nvm_reg_blob);
debugfs_create_file("mem", 0600, dbgfs_dir, mvm, &iwl_dbgfs_mem_ops);
@@ -2212,11 +2189,5 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
* exists (before the opmode exists which removes the target.)
*/
snprintf(buf, 100, "../../%pd2", dbgfs_dir->d_parent);
- if (!debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf))
- goto err;
-
- return 0;
-err:
- IWL_ERR(mvm, "Can't create the mvm debugfs directory\n");
- return -ENOMEM;
+ debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 94132cfd1f56..fec38a47696e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -187,12 +187,24 @@ static void iwl_mvm_ftm_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
for (i = 0; i < ETH_ALEN; i++)
cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
- if (vif->bss_conf.assoc)
+ if (vif->bss_conf.assoc) {
memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
- else
+
+ /* AP's TSF is only relevant if associated */
+ for (i = 0; i < req->n_peers; i++) {
+ if (req->peers[i].report_ap_tsf) {
+ struct iwl_mvm_vif *mvmvif =
+ iwl_mvm_vif_from_mac80211(vif);
+
+ cmd->tsf_mac_id = cpu_to_le32(mvmvif->id);
+ return;
+ }
+ }
+ } else {
eth_broadcast_addr(cmd->range_req_bssid);
+ }
- /* TODO: fill in tsf_mac_id if needed */
+ /* Don't report AP's TSF */
cmd->tsf_mac_id = cpu_to_le32(0xff);
}
@@ -480,6 +492,7 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data;
+ struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data;
struct iwl_tof_range_rsp_ntfy *fw_resp = (void *)pkt->data;
int i;
bool new_api = fw_has_api(&mvm->fw->ucode_capa,
@@ -519,8 +532,15 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
int peer_idx;
if (new_api) {
- fw_ap = &fw_resp->ap[i];
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
+ fw_ap = &fw_resp->ap[i];
+ else
+ fw_ap = (void *)&fw_resp_v6->ap[i];
+
result.final = fw_resp->ap[i].last_burst;
+ result.ap_tsf = le32_to_cpu(fw_ap->start_tsf);
+ result.ap_tsf_valid = 1;
} else {
/* the first part is the same for old and new APIs */
fw_ap = (void *)&fw_resp_v5->ap[i];
@@ -588,6 +608,11 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
mvm->ftm_initiator.req,
&result, GFP_KERNEL);
+ if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
+ IWL_DEBUG_INFO(mvm, "RTT confidence: %hhu\n",
+ fw_ap->rttConfidence);
+
iwl_mvm_debug_range_resp(mvm, i, &result);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 6a70dece447d..53c217af13c8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -262,9 +262,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
.preferred_tsf = NUM_TSF_IDS,
.found_vif = false,
};
- u32 ac;
- int ret, i, queue_limit;
- unsigned long used_hw_queues;
+ int ret, i;
lockdep_assert_held(&mvm->mutex);
@@ -341,37 +339,9 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
INIT_LIST_HEAD(&mvmvif->time_event_data.list);
mvmvif->time_event_data.id = TE_MAX;
- /* No need to allocate data queues to P2P Device MAC.*/
- if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
- vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE;
-
+ /* No need to allocate data queues to P2P Device MAC and NAN.*/
+ if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
return 0;
- }
-
- /*
- * queues in mac80211 almost entirely independent of
- * the ones here - no real limit
- */
- queue_limit = IEEE80211_MAX_QUEUES;
-
- /*
- * Find available queues, and allocate them to the ACs. When in
- * DQA-mode they aren't really used, and this is done only so the
- * mac80211 ieee80211_check_queues() function won't fail
- */
- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
- u8 queue = find_first_zero_bit(&used_hw_queues, queue_limit);
-
- if (queue >= queue_limit) {
- IWL_ERR(mvm, "Failed to allocate queue\n");
- ret = -EIO;
- goto exit_fail;
- }
-
- __set_bit(queue, &used_hw_queues);
- vif->hw_queue[ac] = queue;
- }
/* Allocate the CAB queue for softAP and GO interfaces */
if (vif->type == NL80211_IFTYPE_AP ||
@@ -1143,9 +1113,7 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
ieee80211_tu_to_usec(data.beacon_int * rand /
100);
} else {
- mvmvif->ap_beacon_time =
- iwl_read_prph(mvm->trans,
- DEVICE_SYSTEM_TIME_REG);
+ mvmvif->ap_beacon_time = iwl_mvm_get_systime(mvm);
}
}
@@ -1573,6 +1541,7 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
rcu_read_lock();
vif = rcu_dereference(mvm->vif_id_to_mac[mac_id]);
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
switch (vif->type) {
case NL80211_IFTYPE_AP:
@@ -1581,7 +1550,6 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
csa_vif != vif))
goto out_unlock;
- mvmvif = iwl_mvm_vif_from_mac80211(csa_vif);
csa_id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
if (WARN(csa_id != id_n_color,
"channel switch noa notification on unexpected vif (csa_vif=%d, notif=%d)",
@@ -1602,6 +1570,7 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
return;
case NL80211_IFTYPE_STATION:
iwl_mvm_csa_client_absent(mvm, vif);
+ cancel_delayed_work(&mvmvif->csa_work);
ieee80211_chswitch_done(vif, true);
break;
default:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 6a3b11dd2edf..5c52469288be 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -420,6 +420,7 @@ int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
const static u8 he_if_types_ext_capa_sta[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
[9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
};
@@ -597,6 +598,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
BIT(NL80211_IFTYPE_ADHOC);
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+ hw->wiphy->features |= NL80211_FEATURE_HT_IBSS;
+
hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
if (iwl_mvm_is_lar_supported(mvm))
hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
@@ -732,6 +736,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->iftype_ext_capab = he_iftypes_ext_capa;
hw->wiphy->num_iftype_ext_capab =
ARRAY_SIZE(he_iftypes_ext_capa);
+
+ ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
+ ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID);
}
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
@@ -1191,15 +1198,6 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
{
- /* clear the D3 reconfig, we only need it to avoid dumping a
- * firmware coredump on reconfiguration, we shouldn't do that
- * on D3->D0 transition
- */
- if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
- mvm->fwrt.dump.desc = &iwl_dump_desc_assert;
- iwl_fw_error_dump(&mvm->fwrt);
- }
-
/* cleanup all stale references (scan, roc), but keep the
* ucode_down ref until reconfig is complete
*/
@@ -1500,6 +1498,91 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}
+static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ if (mvmvif->csa_failed) {
+ mvmvif->csa_failed = false;
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ struct iwl_mvm_sta *mvmsta;
+
+ mvmvif->csa_bcn_pending = false;
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
+ mvmvif->ap_sta_id);
+
+ if (WARN_ON(!mvmsta)) {
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
+
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+ if (ret)
+ goto out_unlock;
+
+ iwl_mvm_stop_session_protection(mvm, vif);
+ }
+
+ mvmvif->ps_disabled = false;
+
+ ret = iwl_mvm_power_update_ps(mvm);
+
+out_unlock:
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
+static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_chan_switch_te_cmd cmd = {
+ .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ };
+
+ IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id);
+
+ mutex_lock(&mvm->mutex);
+ WARN_ON(iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP,
+ CHANNEL_SWITCH_TIME_EVENT_CMD),
+ 0, sizeof(cmd), &cmd));
+ mutex_unlock(&mvm->mutex);
+
+ WARN_ON(iwl_mvm_post_channel_switch(hw, vif));
+}
+
+static void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm;
+ struct iwl_mvm_vif *mvmvif;
+ struct ieee80211_vif *vif;
+
+ mvmvif = container_of(wk, struct iwl_mvm_vif, csa_work.work);
+ vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
+ mvm = mvmvif->mvm;
+
+ iwl_mvm_abort_channel_switch(mvm->hw, vif);
+ ieee80211_chswitch_done(vif, false);
+}
+
static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -1626,6 +1709,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
}
iwl_mvm_tcm_add_vif(mvm, vif);
+ INIT_DELAYED_WORK(&mvmvif->csa_work,
+ iwl_mvm_channel_switch_disconnect_wk);
if (vif->type == NL80211_IFTYPE_MONITOR)
mvm->monitor_on = true;
@@ -2127,6 +2212,10 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
.frame_time_rts_th =
cpu_to_le16(vif->bss_conf.frame_time_rts_th),
};
+ int size = fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_MBSSID_HE) ?
+ sizeof(sta_ctxt_cmd) :
+ sizeof(struct iwl_he_sta_context_cmd_v1);
struct ieee80211_sta *sta;
u32 flags;
int i;
@@ -2254,16 +2343,18 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
/* Set the PPE thresholds accordingly */
if (low_th >= 0 && high_th >= 0) {
- u8 ***pkt_ext_qam =
- (void *)sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th;
+ struct iwl_he_pkt_ext *pkt_ext =
+ (struct iwl_he_pkt_ext *)&sta_ctxt_cmd.pkt_ext;
for (i = 0; i < MAX_HE_SUPP_NSS; i++) {
u8 bw;
for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX;
bw++) {
- pkt_ext_qam[i][bw][0] = low_th;
- pkt_ext_qam[i][bw][1] = high_th;
+ pkt_ext->pkt_ext_qam_th[i][bw][0] =
+ low_th;
+ pkt_ext->pkt_ext_qam_th[i][bw][1] =
+ high_th;
}
}
@@ -2308,13 +2399,23 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
(vif->bss_conf.uora_ocw_range >> 3) & 0x7;
}
- /* TODO: support Multi BSSID IE */
+ if (vif->bss_conf.nontransmitted) {
+ flags |= STA_CTXT_HE_REF_BSSID_VALID;
+ ether_addr_copy(sta_ctxt_cmd.ref_bssid_addr,
+ vif->bss_conf.transmitter_bssid);
+ sta_ctxt_cmd.max_bssid_indicator =
+ vif->bss_conf.bssid_indicator;
+ sta_ctxt_cmd.bssid_index = vif->bss_conf.bssid_index;
+ sta_ctxt_cmd.ema_ap = vif->bss_conf.ema_ap;
+ sta_ctxt_cmd.profile_periodicity =
+ vif->bss_conf.profile_periodicity;
+ }
sta_ctxt_cmd.flags = cpu_to_le32(flags);
if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD,
DATA_PATH_GROUP, 0),
- 0, sizeof(sta_ctxt_cmd), &sta_ctxt_cmd))
+ 0, size, &sta_ctxt_cmd))
IWL_ERR(mvm, "Failed to config FW to work HE!\n");
}
@@ -3612,7 +3713,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
int duration)
{
- int res, time_reg = DEVICE_SYSTEM_TIME_REG;
+ int res;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
static const u16 time_event_response[] = { HOT_SPOT_CMD };
@@ -3638,7 +3739,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
0);
/* Set the time and duration */
- tail->apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg));
+ tail->apply_time = cpu_to_le32(iwl_mvm_get_systime(mvm));
delay = AUX_ROC_MIN_DELAY;
req_dur = MSEC_TO_TU(duration);
@@ -4364,8 +4465,8 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
int err;
u32 noa_duration;
- err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy,
- NULL);
+ err = nla_parse_deprecated(tb, IWL_MVM_TM_ATTR_MAX, data, len,
+ iwl_mvm_tm_policy, NULL);
if (err)
return err;
@@ -4442,16 +4543,22 @@ static int iwl_mvm_schedule_client_csa(struct iwl_mvm *mvm,
.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
.tsf = cpu_to_le32(chsw->timestamp),
.cs_count = chsw->count,
+ .cs_mode = chsw->block_tx,
};
lockdep_assert_held(&mvm->mutex);
+ if (chsw->delay)
+ cmd.cs_delayed_bcn_count =
+ DIV_ROUND_UP(chsw->delay, vif->bss_conf.beacon_int);
+
return iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(MAC_CONF_GROUP,
CHANNEL_SWITCH_TIME_EVENT_CMD),
0, sizeof(cmd), &cmd);
}
+#define IWL_MAX_CSA_BLOCK_TX 1500
static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw)
@@ -4516,8 +4623,18 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
((vif->bss_conf.beacon_int * (chsw->count - 1) -
IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
- if (chsw->block_tx)
+ if (chsw->block_tx) {
iwl_mvm_csa_client_absent(mvm, vif);
+ /*
+ * In case of undetermined / long time with immediate
+ * quiet monitor status to gracefully disconnect
+ */
+ if (!chsw->count ||
+ chsw->count * vif->bss_conf.beacon_int >
+ IWL_MAX_CSA_BLOCK_TX)
+ schedule_delayed_work(&mvmvif->csa_work,
+ msecs_to_jiffies(IWL_MAX_CSA_BLOCK_TX));
+ }
if (mvmvif->bf_data.bf_enabled) {
ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
@@ -4532,6 +4649,9 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
iwl_mvm_schedule_csa_period(mvm, vif,
vif->bss_conf.beacon_int,
apply_time);
+
+ mvmvif->csa_count = chsw->count;
+ mvmvif->csa_misbehave = false;
break;
default:
break;
@@ -4552,52 +4672,42 @@ out_unlock:
return ret;
}
-static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
-
- mutex_lock(&mvm->mutex);
-
- if (mvmvif->csa_failed) {
- mvmvif->csa_failed = false;
- ret = -EIO;
- goto out_unlock;
- }
-
- if (vif->type == NL80211_IFTYPE_STATION) {
- struct iwl_mvm_sta *mvmsta;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_chan_switch_te_cmd cmd = {
+ .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color)),
+ .action = cpu_to_le32(FW_CTXT_ACTION_MODIFY),
+ .tsf = cpu_to_le32(chsw->timestamp),
+ .cs_count = chsw->count,
+ .cs_mode = chsw->block_tx,
+ };
- mvmvif->csa_bcn_pending = false;
- mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
- mvmvif->ap_sta_id);
+ if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY))
+ return;
- if (WARN_ON(!mvmsta)) {
- ret = -EIO;
- goto out_unlock;
+ if (chsw->count >= mvmvif->csa_count && chsw->block_tx) {
+ if (mvmvif->csa_misbehave) {
+ /* Second time, give up on this AP*/
+ iwl_mvm_abort_channel_switch(hw, vif);
+ ieee80211_chswitch_done(vif, false);
+ mvmvif->csa_misbehave = false;
+ return;
}
-
- iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
-
- iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
-
- ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
- if (ret)
- goto out_unlock;
-
- iwl_mvm_stop_session_protection(mvm, vif);
+ mvmvif->csa_misbehave = true;
}
+ mvmvif->csa_count = chsw->count;
- mvmvif->ps_disabled = false;
-
- ret = iwl_mvm_power_update_ps(mvm);
-
-out_unlock:
- mutex_unlock(&mvm->mutex);
+ IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d\n", mvmvif->id);
- return ret;
+ WARN_ON(iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP,
+ CHANNEL_SWITCH_TIME_EVENT_CMD),
+ CMD_ASYNC, sizeof(cmd), &cmd));
}
static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
@@ -5056,6 +5166,8 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.channel_switch = iwl_mvm_channel_switch,
.pre_channel_switch = iwl_mvm_pre_channel_switch,
.post_channel_switch = iwl_mvm_post_channel_switch,
+ .abort_channel_switch = iwl_mvm_abort_channel_switch,
+ .channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon,
.tdls_channel_switch = iwl_mvm_tdls_channel_switch,
.tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index a50dc53df086..8dc2a9850bc5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -490,6 +490,9 @@ struct iwl_mvm_vif {
bool csa_countdown;
bool csa_failed;
u16 csa_target_freq;
+ u16 csa_count;
+ u16 csa_misbehave;
+ struct delayed_work csa_work;
/* Indicates that we are waiting for a beacon on a new channel */
bool csa_bcn_pending;
@@ -1199,7 +1202,6 @@ struct iwl_mvm {
* @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active
* @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3
* @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
- * @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done
* @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
* @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
*/
@@ -1211,7 +1213,6 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_IN_HW_RESTART,
IWL_MVM_STATUS_IN_D0I3,
IWL_MVM_STATUS_ROC_AUX_RUNNING,
- IWL_MVM_STATUS_D3_RECONFIG,
IWL_MVM_STATUS_FIRMWARE_RUNNING,
IWL_MVM_STATUS_NEED_FLUSH_P2P,
};
@@ -1537,6 +1538,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
u8 first_antenna(u8 mask);
u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime);
+u32 iwl_mvm_get_systime(struct iwl_mvm *mvm);
/* Tx / Host Commands */
int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
@@ -1649,8 +1651,8 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue);
-void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi,
- struct iwl_rx_cmd_buffer *rxb, int queue);
+void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue);
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue);
int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
@@ -1784,14 +1786,13 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
/* MVM debugfs */
#ifdef CONFIG_IWLWIFI_DEBUGFS
-int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
+void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
#else
-static inline int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm,
- struct dentry *dbgfs_dir)
+static inline void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm,
+ struct dentry *dbgfs_dir)
{
- return 0;
}
static inline void
iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -2023,17 +2024,6 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
{
lockdep_assert_held(&mvm->mutex);
- /* If IWL_MVM_STATUS_HW_RESTART_REQUESTED bit is set then we received
- * an assert. Since we failed to bring the interface up, mac80211
- * will not attempt to reconfig the device,
- * which handles the dump collection in assert flow,
- * so trigger dump collection here.
- */
- if (test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
- &mvm->status))
- iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
- false, 0);
-
iwl_fw_cancel_timestamp(&mvm->fwrt);
clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
iwl_fwrt_stop_device(&mvm->fwrt);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 13681b03c10e..acd2fda12466 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -862,9 +862,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
min_backoff = iwl_mvm_min_backoff(mvm);
iwl_mvm_thermal_initialize(mvm, min_backoff);
- err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
- if (err)
- goto out_unregister;
+ iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
if (!iwl_mvm_has_new_rx_stats_api(mvm))
memset(&mvm->rx_stats_v3, 0,
@@ -881,14 +879,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
return op_mode;
- out_unregister:
- if (iwlmvm_mod_params.init_dbg)
- return op_mode;
-
- ieee80211_unregister_hw(mvm->hw);
- mvm->hw_registered = false;
- iwl_mvm_leds_exit(mvm);
- iwl_mvm_thermal_exit(mvm);
out_free:
iwl_fw_flush_dump(&mvm->fwrt);
iwl_fw_runtime_free(&mvm->fwrt);
@@ -1105,7 +1095,7 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF))
- iwl_mvm_rx_monitor_ndp(mvm, napi, rxb, 0);
+ iwl_mvm_rx_monitor_no_data(mvm, napi, rxb, 0);
else
iwl_mvm_rx_common(mvm, rxb, pkt);
}
@@ -1271,6 +1261,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk)
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
{
iwl_abort_notification_waits(&mvm->notif_wait);
+ del_timer(&mvm->fwrt.dump.periodic_trig);
/*
* This is a bit racy, but worst case we tell mac80211 about
@@ -1291,8 +1282,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
* can't recover this since we're already half suspended.
*/
if (!mvm->fw_restart && fw_error) {
- iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
- false, 0);
+ iwl_fw_error_collect(&mvm->fwrt);
} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
struct iwl_mvm_reprobe *reprobe;
@@ -1340,6 +1330,8 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
}
}
+ iwl_fw_error_collect(&mvm->fwrt);
+
if (fw_error && mvm->fw_restart > 0)
mvm->fw_restart--;
set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index a28283ff7295..659e21b2d4e7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -27,7 +27,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -116,8 +116,9 @@ static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta)
return supp;
}
-static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta)
+static u16 rs_fw_get_config_flags(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct ieee80211_supported_band *sband)
{
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
@@ -147,6 +148,12 @@ static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
(vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+ /* consider our LDPC support in case of HE */
+ if (sband->iftype_data && sband->iftype_data->he_cap.has_he &&
+ !(sband->iftype_data->he_cap.he_cap_elem.phy_cap_info[1] &
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
+ flags &= ~IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+
if (he_cap && he_cap->has_he &&
(he_cap->he_cap_elem.phy_cap_info[3] &
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK))
@@ -223,19 +230,43 @@ static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs)
static void
rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
- const struct ieee80211_sta_he_cap *he_cap,
+ struct ieee80211_supported_band *sband,
struct iwl_tlc_config_cmd *cmd)
{
- u16 mcs_160 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_160);
- u16 mcs_80 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80);
+ const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ u16 mcs_160 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+ u16 mcs_80 = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+ u16 tx_mcs_80 =
+ le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_80);
+ u16 tx_mcs_160 =
+ le16_to_cpu(sband->iftype_data->he_cap.he_mcs_nss_supp.tx_mcs_160);
int i;
for (i = 0; i < sta->rx_nss && i < MAX_NSS; i++) {
u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
-
+ u16 _tx_mcs_160 = (tx_mcs_160 >> (2 * i)) & 0x3;
+ u16 _tx_mcs_80 = (tx_mcs_80 >> (2 * i)) & 0x3;
+
+ /* If one side doesn't support - mark both as not supporting */
+ if (_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ _tx_mcs_80 == IEEE80211_HE_MCS_NOT_SUPPORTED) {
+ _mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ _tx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ }
+ if (_mcs_80 > _tx_mcs_80)
+ _mcs_80 = _tx_mcs_80;
cmd->ht_rates[i][0] =
cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80));
+
+ /* If one side doesn't support - mark both as not supporting */
+ if (_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ _tx_mcs_160 == IEEE80211_HE_MCS_NOT_SUPPORTED) {
+ _mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ _tx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ }
+ if (_mcs_160 > _tx_mcs_160)
+ _mcs_160 = _tx_mcs_160;
cmd->ht_rates[i][1] =
cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160));
}
@@ -264,7 +295,7 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
/* HT/VHT rates */
if (he_cap && he_cap->has_he) {
cmd->mode = IWL_TLC_MNG_MODE_HE;
- rs_fw_he_set_enabled_rates(sta, he_cap, cmd);
+ rs_fw_he_set_enabled_rates(sta, sband, cmd);
} else if (vht_cap && vht_cap->vht_supported) {
cmd->mode = IWL_TLC_MNG_MODE_VHT;
rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
@@ -345,6 +376,37 @@ out:
rcu_read_unlock();
}
+static u16 rs_fw_get_max_amsdu_len(struct ieee80211_sta *sta)
+{
+ const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+
+ if (vht_cap && vht_cap->vht_supported) {
+ switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) {
+ case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
+ return IEEE80211_MAX_MPDU_LEN_VHT_11454;
+ case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991:
+ return IEEE80211_MAX_MPDU_LEN_VHT_7991;
+ default:
+ return IEEE80211_MAX_MPDU_LEN_VHT_3895;
+ }
+
+ } else if (ht_cap && ht_cap->ht_supported) {
+ if (ht_cap->cap & IEEE80211_HT_CAP_MAX_AMSDU)
+ /*
+ * agg is offloaded so we need to assume that agg
+ * are enabled and max mpdu in ampdu is 4095
+ * (spec 802.11-2016 9.3.2.1)
+ */
+ return IEEE80211_MAX_MPDU_LEN_HT_BA;
+ else
+ return IEEE80211_MAX_MPDU_LEN_HT_3839;
+ }
+
+ /* in legacy mode no amsdu is enabled so return zero */
+ return 0;
+}
+
void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
enum nl80211_band band, bool update)
{
@@ -352,15 +414,16 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0);
- struct ieee80211_supported_band *sband;
+ struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
+ u16 max_amsdu_len = rs_fw_get_max_amsdu_len(sta);
struct iwl_tlc_config_cmd cfg_cmd = {
.sta_id = mvmsta->sta_id,
.max_ch_width = update ?
rs_fw_bw_from_sta_bw(sta) : RATE_MCS_CHAN_WIDTH_20,
- .flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)),
+ .flags = cpu_to_le16(rs_fw_get_config_flags(mvm, sta, sband)),
.chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
- .max_mpdu_len = cpu_to_le16(sta->max_amsdu_len),
.sgi_ch_width_supp = rs_fw_sgi_cw_support(sta),
+ .max_mpdu_len = cpu_to_le16(max_amsdu_len),
.amsdu = iwl_mvm_is_csum_supported(mvm),
};
int ret;
@@ -370,9 +433,14 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_mvm_reset_frame_stats(mvm);
#endif
- sband = hw->wiphy->bands[band];
rs_fw_set_supp_rates(sta, sband, &cfg_cmd);
+ /*
+ * since TLC offload works with one mode we can assume
+ * that only vht/ht is used and also set it as station max amsdu
+ */
+ sta->max_amsdu_len = max_amsdu_len;
+
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd);
if (ret)
IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index e231a44d2423..c182821ab22b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -4078,9 +4078,8 @@ static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_lq_sta)
#define MVM_DEBUGFS_ADD_FILE_RS(name, parent, mode) do { \
- if (!debugfs_create_file(#name, mode, parent, lq_sta, \
- &iwl_dbgfs_##name##_ops)) \
- goto err; \
+ debugfs_create_file(#name, mode, parent, lq_sta, \
+ &iwl_dbgfs_##name##_ops); \
} while (0)
MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
@@ -4108,9 +4107,6 @@ static void rs_drv_add_sta_debugfs(void *mvm, void *priv_sta,
&lq_sta->pers.dbg_fixed_txp_reduction);
MVM_DEBUGFS_ADD_FILE_RS(ss_force, dir, 0600);
- return;
-err:
- IWL_ERR((struct iwl_mvm *)mvm, "Can't create debugfs entity\n");
}
void rs_remove_sta_debugfs(void *mvm, void *mvm_sta)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index b516fd1867ec..1824566d08fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -1699,8 +1699,8 @@ out:
rcu_read_unlock();
}
-void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi,
- struct iwl_rx_cmd_buffer *rxb, int queue)
+void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue)
{
struct ieee80211_rx_status *rx_status;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
@@ -1721,10 +1721,6 @@ void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi,
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
- /* Currently only NDP type is supported */
- if (info_type != RX_NO_DATA_INFO_TYPE_NDP)
- return;
-
energy_a = (rssi & RX_NO_DATA_CHAIN_A_MSK) >> RX_NO_DATA_CHAIN_A_POS;
energy_b = (rssi & RX_NO_DATA_CHAIN_B_MSK) >> RX_NO_DATA_CHAIN_B_POS;
channel = (rssi & RX_NO_DATA_CHANNEL_MSK) >> RX_NO_DATA_CHANNEL_POS;
@@ -1746,9 +1742,22 @@ void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi,
/* 0-length PSDU */
rx_status->flag |= RX_FLAG_NO_PSDU;
- /* currently this is the only type for which we get this notif */
- rx_status->zero_length_psdu_type =
- IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING;
+
+ switch (info_type) {
+ case RX_NO_DATA_INFO_TYPE_NDP:
+ rx_status->zero_length_psdu_type =
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING;
+ break;
+ case RX_NO_DATA_INFO_TYPE_MU_UNMATCHED:
+ case RX_NO_DATA_INFO_TYPE_HE_TB_UNMATCHED:
+ rx_status->zero_length_psdu_type =
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_NOT_CAPTURED;
+ break;
+ default:
+ rx_status->zero_length_psdu_type =
+ IEEE80211_RADIOTAP_ZERO_LEN_PSDU_VENDOR;
+ break;
+ }
/* This may be overridden by iwl_mvm_rx_he() to HE_RU */
switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 78694bc38e76..d9ddf9ff6428 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1082,21 +1082,23 @@ static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm,
dwell->extended = IWL_SCAN_DWELL_EXTENDED;
}
-static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
+static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels,
+ u32 max_channels)
{
struct ieee80211_supported_band *band;
int i, j = 0;
band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
- for (i = 0; i < band->n_channels; i++, j++)
+ for (i = 0; i < band->n_channels && j < max_channels; i++, j++)
channels[j] = band->channels[i].hw_value;
band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
- for (i = 0; i < band->n_channels; i++, j++)
+ for (i = 0; i < band->n_channels && j < max_channels; i++, j++)
channels[j] = band->channels[i].hw_value;
}
static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
- u32 flags, u8 channel_flags)
+ u32 flags, u8 channel_flags,
+ u32 max_channels)
{
enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL);
struct iwl_scan_config_v1 *cfg = config;
@@ -1115,11 +1117,12 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
cfg->channel_flags = channel_flags;
- iwl_mvm_fill_channels(mvm, cfg->channel_array);
+ iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
}
static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
- u32 flags, u8 channel_flags)
+ u32 flags, u8 channel_flags,
+ u32 max_channels)
{
struct iwl_scan_config *cfg = config;
@@ -1162,7 +1165,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
cfg->channel_flags = channel_flags;
- iwl_mvm_fill_channels(mvm, cfg->channel_array);
+ iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
}
int iwl_mvm_config_scan(struct iwl_mvm *mvm)
@@ -1181,7 +1184,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
u8 channel_flags;
if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
- return -ENOBUFS;
+ num_channels = mvm->fw->ucode_capa.n_scan_channels;
if (iwl_mvm_is_cdb_supported(mvm)) {
type = iwl_mvm_get_scan_type_band(mvm, NULL,
@@ -1234,9 +1237,11 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ?
SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
- iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
+ iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags,
+ num_channels);
} else {
- iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags);
+ iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags,
+ num_channels);
}
cmd.data[0] = cfg;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 98d123dd7177..f545a737a92d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -746,7 +746,8 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
u8 sta_id, u8 tid, unsigned int timeout)
{
- int queue, size = IWL_DEFAULT_QUEUE_SIZE;
+ int queue, size = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
+ mvm->trans->cfg->min_256_ba_txq_size);
if (tid == IWL_MAX_TID_COUNT) {
tid = IWL_MGMT_TID;
@@ -2109,12 +2110,14 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
if (!iwl_mvm_has_new_tx_api(mvm)) {
if (vif->type == NL80211_IFTYPE_AP ||
- vif->type == NL80211_IFTYPE_ADHOC)
+ vif->type == NL80211_IFTYPE_ADHOC) {
queue = mvm->probe_queue;
- else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
queue = mvm->p2p_dev_queue;
- else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
+ } else {
+ WARN(1, "Missing required TXQ for adding bcast STA\n");
return -EINVAL;
+ }
bsta->tfd_queue_msk |= BIT(queue);
@@ -2277,7 +2280,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
const u8 *maddr = _maddr;
struct iwl_trans_txq_scd_cfg cfg = {
- .fifo = IWL_MVM_TX_FIFO_MCAST,
+ .fifo = vif->type == NL80211_IFTYPE_AP ?
+ IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
.sta_id = msta->sta_id,
.tid = 0,
.aggregate = false,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index 859aa5a4e6b5..9df21a8d1fc1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2014 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(C) 2018 Intel Corporation
+ * Copyright(C) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
*
* Copyright(c) 2014 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(C) 2018 Intel Corporation
+ * Copyright(C) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -252,8 +252,7 @@ static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
/* we only send requests to our switching peer - update sent time */
if (state == IWL_MVM_TDLS_SW_REQ_SENT)
- mvm->tdls_cs.peer.sent_timestamp =
- iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
+ mvm->tdls_cs.peer.sent_timestamp = iwl_mvm_get_systime(mvm);
if (state == IWL_MVM_TDLS_SW_IDLE)
mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 9693fa4cdc39..4d34e5ab1bff 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -234,6 +234,7 @@ iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
break;
}
iwl_mvm_csa_client_absent(mvm, te_data->vif);
+ cancel_delayed_work(&mvmvif->csa_work);
ieee80211_chswitch_done(te_data->vif, true);
break;
default:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 4649327abb45..b9914efc55c4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -1418,6 +1418,16 @@ void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk);
}
+u32 iwl_mvm_get_systime(struct iwl_mvm *mvm)
+{
+ u32 reg_addr = DEVICE_SYSTEM_TIME_REG;
+
+ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000 &&
+ mvm->trans->cfg->gp2_reg_addr)
+ reg_addr = mvm->trans->cfg->gp2_reg_addr;
+
+ return iwl_read_prph(mvm->trans, reg_addr);
+}
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
{
@@ -1432,7 +1442,7 @@ void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
iwl_mvm_power_update_device(mvm);
}
- *gp2 = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
+ *gp2 = iwl_mvm_get_systime(mvm);
*boottime = ktime_get_boot_ns();
if (!ps_disabled) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 1e36459948db..f496d1bcb643 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -66,7 +66,8 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
void *iml_img;
u32 control_flags = 0;
int ret;
- int cmdq_size = max_t(u32, TFD_CMD_SLOTS, trans->cfg->min_txq_size);
+ int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
+ trans->cfg->min_txq_size);
/* Allocate prph scratch */
prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 9274e317cc77..8969b47bacf2 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -210,7 +210,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
ctxt_info->hcmd_cfg.cmd_queue_addr =
cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
ctxt_info->hcmd_cfg.cmd_queue_size =
- TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);
+ TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);
/* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 9f1af8da9dc1..cd035061cdd5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -928,11 +928,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0x34F0, 0x4070, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)},
- {IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)},
- {IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)},
- {IWL_PCI_DEVICE(0x40C0, 0x0310, iwl22560_2ax_cfg_su_cdb)},
- {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22560_2ax_cfg_su_cdb)},
{IWL_PCI_DEVICE(0x43F0, 0x0040, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0070, iwl_ax101_cfg_qu_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0074, iwl_ax101_cfg_qu_hr)},
@@ -963,9 +958,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)},
{IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)},
- {IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)},
- {IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)},
-
{IWL_PCI_DEVICE(0x2725, 0x0090, iwlax210_2ax_cfg_so_hr_a0)},
{IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax210_2ax_cfg_so_hr_a0)},
{IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax210_2ax_cfg_so_hr_a0)},
@@ -1047,9 +1039,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* register transport layer debugfs here */
- ret = iwl_trans_pcie_dbgfs_register(iwl_trans);
- if (ret)
- goto out_free_drv;
+ iwl_trans_pcie_dbgfs_register(iwl_trans);
/* if RTPM is in use, enable it in our device */
if (iwl_trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) {
@@ -1078,8 +1068,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
-out_free_drv:
- iwl_drv_stop(iwl_trans->drv);
out_free_trans:
iwl_trans_pcie_free(iwl_trans);
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 59213164f35e..b513037dc066 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -106,7 +106,6 @@ struct iwl_host_cmd;
* @page: driver's pointer to the rxb page
* @invalid: rxb is in driver ownership - not owned by HW
* @vid: index of this rxb in the global table
- * @size: size used from the buffer
*/
struct iwl_rx_mem_buffer {
dma_addr_t page_dma;
@@ -114,7 +113,6 @@ struct iwl_rx_mem_buffer {
u16 vid;
bool invalid;
struct list_head list;
- u32 size;
};
/**
@@ -135,46 +133,32 @@ struct isr_statistics {
u32 unhandled;
};
-#define IWL_RX_TD_TYPE_MSK 0xff000000
-#define IWL_RX_TD_SIZE_MSK 0x00ffffff
-#define IWL_RX_TD_SIZE_2K BIT(11)
-#define IWL_RX_TD_TYPE 0
-
/**
* struct iwl_rx_transfer_desc - transfer descriptor
- * @type_n_size: buffer type (bit 0: external buff valid,
- * bit 1: optional footer valid, bit 2-7: reserved)
- * and buffer size
* @addr: ptr to free buffer start address
* @rbid: unique tag of the buffer
* @reserved: reserved
*/
struct iwl_rx_transfer_desc {
- __le32 type_n_size;
- __le64 addr;
__le16 rbid;
- __le16 reserved;
+ __le16 reserved[3];
+ __le64 addr;
} __packed;
-#define IWL_RX_CD_SIZE 0xffffff00
+#define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0)
/**
* struct iwl_rx_completion_desc - completion descriptor
- * @type: buffer type (bit 0: external buff valid,
- * bit 1: optional footer valid, bit 2-7: reserved)
- * @status: status of the completion
* @reserved1: reserved
* @rbid: unique tag of the received buffer
- * @size: buffer size, masked by IWL_RX_CD_SIZE
+ * @flags: flags (0: fragmented, all others: reserved)
* @reserved2: reserved
*/
struct iwl_rx_completion_desc {
- u8 type;
- u8 status;
- __le16 reserved1;
+ __le32 reserved1;
__le16 rbid;
- __le32 size;
- u8 reserved2[22];
+ u8 flags;
+ u8 reserved2[25];
} __packed;
/**
@@ -306,10 +290,6 @@ struct iwl_cmd_meta {
u32 tbs;
};
-
-#define TFD_TX_CMD_SLOTS 256
-#define TFD_CMD_SLOTS 32
-
/*
* The FH will write back to the first TB only, so we need to copy some data
* into the buffer regardless of whether it should be mapped or not.
@@ -556,7 +536,7 @@ struct iwl_trans_pcie {
int ict_index;
bool use_ict;
bool is_down, opmode_down;
- bool debug_rfkill;
+ s8 debug_rfkill;
struct isr_statistics isr_stats;
spinlock_t irq_lock;
@@ -1002,7 +982,7 @@ static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
lockdep_assert_held(&trans_pcie->mutex);
- if (trans_pcie->debug_rfkill)
+ if (trans_pcie->debug_rfkill == 1)
return true;
return !(iwl_read32(trans, CSR_GP_CNTRL) &
@@ -1046,12 +1026,9 @@ void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
#ifdef CONFIG_IWLWIFI_DEBUGFS
-int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
+void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
#else
-static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
-{
- return 0;
-}
+static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
#endif
int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 8d4f0628622b..31b3591f71d1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -282,9 +282,8 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans,
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
struct iwl_rx_transfer_desc *bd = rxq->bd;
- bd[rxq->write].type_n_size =
- cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) |
- ((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK));
+ BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
+
bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
} else {
@@ -435,7 +434,7 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
/*
* Issue an error if we don't have enough pre-allocated
* buffers.
-` */
+ */
if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
IWL_CRIT(trans,
"Failed to alloc_pages\n");
@@ -1265,9 +1264,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
.truesize = max_len,
};
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
- rxcb.status = rxq->cd[i].status;
-
pkt = rxb_addr(&rxcb);
if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
@@ -1394,6 +1390,8 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
struct iwl_rx_mem_buffer *rxb;
u16 vid;
+ BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
+
if (!trans->cfg->mq_rx_supported) {
rxb = rxq->queue[i];
rxq->queue[i] = NULL;
@@ -1415,9 +1413,6 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
- if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
- rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE;
-
rxb->invalid = true;
return rxb;
@@ -1434,10 +1429,15 @@ out_err:
static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
+ struct iwl_rxq *rxq;
u32 r, i, count = 0;
bool emergency = false;
+ if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
+ return;
+
+ rxq = &trans_pcie->rxq[queue];
+
restart:
spin_lock(&rxq->lock);
/* uCode's read index (stored in shared DRAM) indicates the last Rx
@@ -2212,6 +2212,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
"Hardware error detected. Restarting.\n");
isr_stats->hw++;
+ trans->hw_error = true;
iwl_pcie_irq_handle_error(trans);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 9c203ca75de9..8507a7bdcfdd 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -234,7 +234,8 @@ void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int queue_size = max_t(u32, TFD_CMD_SLOTS, trans->cfg->min_txq_size);
+ int queue_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
+ trans->cfg->min_txq_size);
/* TODO: most of the logic can be removed in A0 - but not in Z0 */
spin_lock(&trans_pcie->irq_lock);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 4f5eec7e44bd..803fcbac4152 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -896,6 +896,8 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans)
if (!trans->num_blocks)
return;
+ IWL_DEBUG_FW(trans,
+ "WRT: applying DRAM buffer[0] destination\n");
iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
trans->fw_mon[0].physical >>
MON_BUFF_SHIFT_VER2);
@@ -2441,9 +2443,8 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans)
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* create and remove of files */
#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
- if (!debugfs_create_file(#name, mode, parent, trans, \
- &iwl_dbgfs_##name##_ops)) \
- goto err; \
+ debugfs_create_file(#name, mode, parent, trans, \
+ &iwl_dbgfs_##name##_ops); \
} while (0)
/* file operation */
@@ -2686,16 +2687,17 @@ static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- bool old = trans_pcie->debug_rfkill;
+ bool new_value;
int ret;
- ret = kstrtobool_from_user(user_buf, count, &trans_pcie->debug_rfkill);
+ ret = kstrtobool_from_user(user_buf, count, &new_value);
if (ret)
return ret;
- if (old == trans_pcie->debug_rfkill)
+ if (new_value == trans_pcie->debug_rfkill)
return count;
IWL_WARN(trans, "changing debug rfkill %d->%d\n",
- old, trans_pcie->debug_rfkill);
+ trans_pcie->debug_rfkill, new_value);
+ trans_pcie->debug_rfkill = new_value;
iwl_pcie_handle_rfkill_irq(trans);
return count;
@@ -2846,7 +2848,7 @@ static const struct file_operations iwl_dbgfs_monitor_data_ops = {
};
/* Create the debugfs files and directories */
-int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
+void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
{
struct dentry *dir = trans->dbgfs_dir;
@@ -2857,11 +2859,6 @@ int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
DEBUGFS_ADD_FILE(rfkill, dir, 0600);
DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
- return 0;
-
-err:
- IWL_ERR(trans, "failed to create the trans debugfs entry\n");
- return -ENOMEM;
}
static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
@@ -3011,10 +3008,14 @@ static void
iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
struct iwl_fw_error_dump_fw_mon *fw_mon_data)
{
- u32 base, write_ptr, wrap_cnt;
+ u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt;
- /* If there was a dest TLV - use the values from there */
- if (trans->ini_valid) {
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB;
+ base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;
+ write_ptr = DBGC_CUR_DBGBUF_STATUS;
+ wrap_cnt = DBGC_DBGBUF_WRAP_AROUND;
+ } else if (trans->ini_valid) {
base = iwl_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2);
write_ptr = iwl_umac_prph(trans, MON_BUFF_WRPTR_VER2);
wrap_cnt = iwl_umac_prph(trans, MON_BUFF_CYCLE_CNT_VER2);
@@ -3027,12 +3028,18 @@ iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
write_ptr = MON_BUFF_WRPTR;
wrap_cnt = MON_BUFF_CYCLE_CNT;
}
- fw_mon_data->fw_mon_wr_ptr =
- cpu_to_le32(iwl_read_prph(trans, write_ptr));
+
+ write_ptr_val = iwl_read_prph(trans, write_ptr);
fw_mon_data->fw_mon_cycle_cnt =
cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
fw_mon_data->fw_mon_base_ptr =
cpu_to_le32(iwl_read_prph(trans, base));
+ if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ fw_mon_data->fw_mon_base_high_ptr =
+ cpu_to_le32(iwl_read_prph(trans, base_high));
+ write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK;
+ }
+ fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val);
}
static u32
@@ -3043,9 +3050,10 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
u32 len = 0;
if ((trans->num_blocks &&
- trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
- (trans->dbg_dest_tlv && !trans->ini_valid) ||
- (trans->ini_valid && trans->num_blocks)) {
+ (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
+ trans->cfg->device_family >= IWL_DEVICE_FAMILY_AX210 ||
+ trans->ini_valid)) ||
+ (trans->dbg_dest_tlv && !trans->ini_valid)) {
struct iwl_fw_error_dump_fw_mon *fw_mon_data;
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
@@ -3164,8 +3172,10 @@ static struct iwl_trans_dump_data
len = sizeof(*dump_data);
/* host commands */
- len += sizeof(*data) +
- cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
+ if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD))
+ len += sizeof(*data) +
+ cmdq->n_window * (sizeof(*txcmd) +
+ TFD_MAX_PAYLOAD_SIZE);
/* FW monitor */
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
@@ -3411,7 +3421,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
ret = -ENOMEM;
goto out_no_pci;
}
-
+ trans_pcie->debug_rfkill = -1;
if (!cfg->base_params->pcie_l1_allowed) {
/*
@@ -3539,6 +3549,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF)) {
trans->cfg = &iwlax210_2ax_cfg_so_gf_a0;
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_GF4)) {
+ trans->cfg = &iwlax210_2ax_cfg_so_gf4_a0;
}
} else if (cfg == &iwl_ax101_cfg_qu_hr) {
if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 7be73e2c4681..fa4245d0d4a8 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -996,10 +996,11 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
if (cmd_queue)
- slots_num = max_t(u32, TFD_CMD_SLOTS,
+ slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
trans->cfg->min_txq_size);
else
- slots_num = TFD_TX_CMD_SLOTS;
+ slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
+ trans->cfg->min_256_ba_txq_size);
trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id];
ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id],
slots_num, cmd_queue);
@@ -1049,10 +1050,11 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
if (cmd_queue)
- slots_num = max_t(u32, TFD_CMD_SLOTS,
+ slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
trans->cfg->min_txq_size);
else
- slots_num = TFD_TX_CMD_SLOTS;
+ slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
+ trans->cfg->min_256_ba_txq_size);
ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
slots_num, cmd_queue);
if (ret) {
diff --git a/drivers/net/wireless/intersil/p54/p54pci.c b/drivers/net/wireless/intersil/p54/p54pci.c
index 27a49068d32d..57ad56435dda 100644
--- a/drivers/net/wireless/intersil/p54/p54pci.c
+++ b/drivers/net/wireless/intersil/p54/p54pci.c
@@ -554,7 +554,7 @@ static int p54p_probe(struct pci_dev *pdev,
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Cannot enable new PCI device\n");
- return err;
+ goto err_put;
}
mem_addr = pci_resource_start(pdev, 0);
@@ -639,6 +639,7 @@ static int p54p_probe(struct pci_dev *pdev,
pci_release_regions(pdev);
err_disable_dev:
pci_disable_device(pdev);
+err_put:
pci_dev_put(pdev);
return err;
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index c71adb1f1f41..60ca13e0f15b 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -409,8 +409,8 @@ static int mac80211_hwsim_vendor_cmd_test(struct wiphy *wiphy,
int err;
u32 val;
- err = nla_parse(tb, QCA_WLAN_VENDOR_ATTR_MAX, data, data_len,
- hwsim_vendor_test_policy, NULL);
+ err = nla_parse_deprecated(tb, QCA_WLAN_VENDOR_ATTR_MAX, data,
+ data_len, hwsim_vendor_test_policy, NULL);
if (err)
return err;
if (!tb[QCA_WLAN_VENDOR_ATTR_TEST])
@@ -1932,8 +1932,8 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
struct sk_buff *skb;
int err, ps;
- err = nla_parse(tb, HWSIM_TM_ATTR_MAX, data, len,
- hwsim_testmode_policy, NULL);
+ err = nla_parse_deprecated(tb, HWSIM_TM_ATTR_MAX, data, len,
+ hwsim_testmode_policy, NULL);
if (err)
return err;
@@ -2806,6 +2806,12 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, SUPPORTS_PS);
ieee80211_hw_set(hw, TDLS_WIDER_BW);
+
+ /* We only have SW crypto and only implement the A-MPDU API
+ * (but don't really build A-MPDUs) so can have extended key
+ * support
+ */
+ ieee80211_hw_set(hw, EXT_KEY_ID_NATIVE);
if (rctbl)
ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
@@ -3627,35 +3633,35 @@ done:
static const struct genl_ops hwsim_ops[] = {
{
.cmd = HWSIM_CMD_REGISTER,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_register_received_nl,
.flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = HWSIM_CMD_FRAME,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_cloned_frame_received_nl,
},
{
.cmd = HWSIM_CMD_TX_INFO_FRAME,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_tx_info_frame_received_nl,
},
{
.cmd = HWSIM_CMD_NEW_RADIO,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_new_radio_nl,
.flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = HWSIM_CMD_DEL_RADIO,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_del_radio_nl,
.flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = HWSIM_CMD_GET_RADIO,
- .policy = hwsim_genl_policy,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = hwsim_get_radio_nl,
.dumpit = hwsim_dump_radio_nl,
},
@@ -3665,6 +3671,7 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
.name = "MAC80211_HWSIM",
.version = 1,
.maxattr = HWSIM_ATTR_MAX,
+ .policy = hwsim_genl_policy,
.netnsok = true,
.module = THIS_MODULE,
.ops = hwsim_ops,
@@ -3901,6 +3908,8 @@ static int __init init_mac80211_hwsim(void)
param.p2p_device = support_p2p_device;
param.use_chanctx = channels > 1;
param.iftypes = HWSIM_IFTYPE_SUPPORT_MASK;
+ if (param.p2p_device)
+ param.iftypes |= BIT(NL80211_IFTYPE_P2P_DEVICE);
err = mac80211_hwsim_new_radio(NULL, &param);
if (err < 0)
diff --git a/drivers/net/wireless/marvell/mwifiex/Kconfig b/drivers/net/wireless/marvell/mwifiex/Kconfig
index 524fd565cb2a..572d187a99f4 100644
--- a/drivers/net/wireless/marvell/mwifiex/Kconfig
+++ b/drivers/net/wireless/marvell/mwifiex/Kconfig
@@ -9,13 +9,13 @@ config MWIFIEX
mwifiex.
config MWIFIEX_SDIO
- tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8977/SD8997"
+ tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8977/SD8987/SD8997"
depends on MWIFIEX && MMC
select FW_LOADER
select WANT_DEV_COREDUMP
---help---
This adds support for wireless adapters based on Marvell
- 8786/8787/8797/8887/8897/8997 chipsets with SDIO interface.
+ 8786/8787/8797/8887/8897/8977/8987/8997 chipsets with SDIO interface.
If you choose to build it as a module, it will be called
mwifiex_sdio.
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index c46f0a54a0c7..e11a4bb67172 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -4059,8 +4059,8 @@ static int mwifiex_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
if (!priv)
return -EINVAL;
- err = nla_parse(tb, MWIFIEX_TM_ATTR_MAX, data, len, mwifiex_tm_policy,
- NULL);
+ err = nla_parse_deprecated(tb, MWIFIEX_TM_ATTR_MAX, data, len,
+ mwifiex_tm_policy, NULL);
if (err)
return err;
@@ -4082,16 +4082,20 @@ static int mwifiex_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
if (mwifiex_send_cmd(priv, 0, 0, 0, hostcmd, true)) {
dev_err(priv->adapter->dev, "Failed to process hostcmd\n");
+ kfree(hostcmd);
return -EFAULT;
}
/* process hostcmd response*/
skb = cfg80211_testmode_alloc_reply_skb(wiphy, hostcmd->len);
- if (!skb)
+ if (!skb) {
+ kfree(hostcmd);
return -ENOMEM;
+ }
err = nla_put(skb, MWIFIEX_TM_ATTR_DATA,
hostcmd->len, hostcmd->cmd);
if (err) {
+ kfree(hostcmd);
kfree_skb(skb);
return -EMSGSIZE;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c
index bfe84e55df77..f1522fb1c1e8 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfp.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfp.c
@@ -531,5 +531,8 @@ u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
rate_index = (rx_rate > MWIFIEX_RATE_INDEX_OFDM0) ?
rx_rate - 1 : rx_rate;
+ if (rate_index >= MWIFIEX_MAX_AC_RX_RATES)
+ rate_index = MWIFIEX_MAX_AC_RX_RATES - 1;
+
return rate_index;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index 60db2b969e20..8c35441fd9b7 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -341,6 +341,12 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
sleep_cfm_tmp =
dev_alloc_skb(sizeof(struct mwifiex_opt_sleep_confirm)
+ MWIFIEX_TYPE_LEN);
+ if (!sleep_cfm_tmp) {
+ mwifiex_dbg(adapter, ERROR,
+ "SLEEP_CFM: dev_alloc_skb failed\n");
+ return -ENOMEM;
+ }
+
skb_put(sleep_cfm_tmp, sizeof(struct mwifiex_opt_sleep_confirm)
+ MWIFIEX_TYPE_LEN);
put_unaligned_le32(MWIFIEX_USB_TYPE_CMD, sleep_cfm_tmp->data);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 20cee5c397fb..f6da8edab7f1 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -1282,8 +1282,7 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
static u16
mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
skb->priority = cfg80211_classify8021d(skb, NULL);
return mwifiex_1d_to_wmm_queue[skb->priority];
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index d5a70340a945..24c041dad9f6 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -491,6 +491,8 @@ static void mwifiex_sdio_coredump(struct device *dev)
#define SDIO_DEVICE_ID_MARVELL_8801 (0x9139)
/* Device ID for SD8977 */
#define SDIO_DEVICE_ID_MARVELL_8977 (0x9145)
+/* Device ID for SD8987 */
+#define SDIO_DEVICE_ID_MARVELL_8987 (0x9149)
/* Device ID for SD8997 */
#define SDIO_DEVICE_ID_MARVELL_8997 (0x9141)
@@ -511,6 +513,8 @@ static const struct sdio_device_id mwifiex_ids[] = {
.driver_data = (unsigned long)&mwifiex_sdio_sd8801},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8977),
.driver_data = (unsigned long)&mwifiex_sdio_sd8977},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8987),
+ .driver_data = (unsigned long)&mwifiex_sdio_sd8987},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997),
.driver_data = (unsigned long)&mwifiex_sdio_sd8997},
{},
@@ -2731,4 +2735,5 @@ MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8977_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(SD8987_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8997_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
index 912de2cde8d9..f672bdf52cc1 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
@@ -37,6 +37,7 @@
#define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin"
#define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin"
#define SD8977_DEFAULT_FW_NAME "mrvl/sd8977_uapsta.bin"
+#define SD8987_DEFAULT_FW_NAME "mrvl/sd8987_uapsta.bin"
#define SD8997_DEFAULT_FW_NAME "mrvl/sd8997_uapsta.bin"
#define BLOCK_MODE 1
@@ -526,6 +527,58 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = {
0x68, 0x69, 0x6a},
};
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8987 = {
+ .start_rd_port = 0,
+ .start_wr_port = 0,
+ .base_0_reg = 0xF8,
+ .base_1_reg = 0xF9,
+ .poll_reg = 0x5C,
+ .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
+ CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
+ .host_int_rsr_reg = 0x4,
+ .host_int_status_reg = 0x0C,
+ .host_int_mask_reg = 0x08,
+ .status_reg_0 = 0xE8,
+ .status_reg_1 = 0xE9,
+ .sdio_int_mask = 0xff,
+ .data_port_mask = 0xffffffff,
+ .io_port_0_reg = 0xE4,
+ .io_port_1_reg = 0xE5,
+ .io_port_2_reg = 0xE6,
+ .max_mp_regs = 196,
+ .rd_bitmap_l = 0x10,
+ .rd_bitmap_u = 0x11,
+ .rd_bitmap_1l = 0x12,
+ .rd_bitmap_1u = 0x13,
+ .wr_bitmap_l = 0x14,
+ .wr_bitmap_u = 0x15,
+ .wr_bitmap_1l = 0x16,
+ .wr_bitmap_1u = 0x17,
+ .rd_len_p0_l = 0x18,
+ .rd_len_p0_u = 0x19,
+ .card_misc_cfg_reg = 0xd8,
+ .card_cfg_2_1_reg = 0xd9,
+ .cmd_rd_len_0 = 0xc0,
+ .cmd_rd_len_1 = 0xc1,
+ .cmd_rd_len_2 = 0xc2,
+ .cmd_rd_len_3 = 0xc3,
+ .cmd_cfg_0 = 0xc4,
+ .cmd_cfg_1 = 0xc5,
+ .cmd_cfg_2 = 0xc6,
+ .cmd_cfg_3 = 0xc7,
+ .fw_dump_host_ready = 0xcc,
+ .fw_dump_ctrl = 0xf9,
+ .fw_dump_start = 0xf1,
+ .fw_dump_end = 0xf8,
+ .func1_dump_reg_start = 0x10,
+ .func1_dump_reg_end = 0x17,
+ .func1_scratch_reg = 0xE8,
+ .func1_spec_reg_num = 13,
+ .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D, 0x60,
+ 0x61, 0x62, 0x64, 0x65, 0x66,
+ 0x68, 0x69, 0x6a},
+};
+
static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
.firmware = SD8786_DEFAULT_FW_NAME,
.reg = &mwifiex_reg_sd87xx,
@@ -633,6 +686,22 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
.can_ext_scan = true,
};
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8987 = {
+ .firmware = SD8987_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd8987,
+ .max_ports = 32,
+ .mp_agg_pkt_limit = 16,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .supports_sdio_new_mode = true,
+ .has_control_mask = false,
+ .can_dump_fw = true,
+ .fw_dump_enh = true,
+ .can_auto_tdls = true,
+ .can_ext_scan = true,
+};
+
static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = {
.firmware = SD8801_DEFAULT_FW_NAME,
.reg = &mwifiex_reg_sd87xx,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index 69e3b624adbb..24b33e20e7a9 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -1025,17 +1025,14 @@ mwifiex_create_custom_regdomain(struct mwifiex_private *priv,
struct ieee80211_regdomain *regd;
struct ieee80211_reg_rule *rule;
bool new_rule;
- int regd_size, idx, freq, prev_freq = 0;
+ int idx, freq, prev_freq = 0;
u32 bw, prev_bw = 0;
u8 chflags, prev_chflags = 0, valid_rules = 0;
if (WARN_ON_ONCE(num_chan > NL80211_MAX_SUPP_REG_RULES))
return ERR_PTR(-EINVAL);
- regd_size = sizeof(struct ieee80211_regdomain) +
- num_chan * sizeof(struct ieee80211_reg_rule);
-
- regd = kzalloc(regd_size, GFP_KERNEL);
+ regd = kzalloc(struct_size(regd, reg_rules, num_chan), GFP_KERNEL);
if (!regd)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index a327fc5b36e3..8b3123cb84c8 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -27,9 +27,9 @@
#define MWIFIEX_IBSS_CONNECT_EVT_FIX_SIZE 12
-static int mwifiex_check_ibss_peer_capabilties(struct mwifiex_private *priv,
- struct mwifiex_sta_node *sta_ptr,
- struct sk_buff *event)
+static int mwifiex_check_ibss_peer_capabilities(struct mwifiex_private *priv,
+ struct mwifiex_sta_node *sta_ptr,
+ struct sk_buff *event)
{
int evt_len, ele_len;
u8 *curr;
@@ -42,7 +42,7 @@ static int mwifiex_check_ibss_peer_capabilties(struct mwifiex_private *priv,
evt_len = event->len;
curr = event->data;
- mwifiex_dbg_dump(priv->adapter, EVT_D, "ibss peer capabilties:",
+ mwifiex_dbg_dump(priv->adapter, EVT_D, "ibss peer capabilities:",
event->data, event->len);
skb_push(event, MWIFIEX_IBSS_CONNECT_EVT_FIX_SIZE);
@@ -937,8 +937,8 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
ibss_sta_addr);
sta_ptr = mwifiex_add_sta_entry(priv, ibss_sta_addr);
if (sta_ptr && adapter->adhoc_11n_enabled) {
- mwifiex_check_ibss_peer_capabilties(priv, sta_ptr,
- adapter->event_skb);
+ mwifiex_check_ibss_peer_capabilities(priv, sta_ptr,
+ adapter->event_skb);
if (sta_ptr->is_11n_enabled)
for (i = 0; i < MAX_NUM_TID; i++)
sta_ptr->ampdu_sta[i] =
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
index fb28a5c7f441..52a2ce2e78b0 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
@@ -250,7 +250,8 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_private *priv,
local_rx_pd->nf);
}
} else {
- if (rx_pkt_type != PKT_TYPE_BAR)
+ if (rx_pkt_type != PKT_TYPE_BAR &&
+ local_rx_pd->priority < MAX_NUM_TID)
priv->rx_seq[local_rx_pd->priority] = seq_num;
memcpy(ta, priv->curr_bss_params.bss_descriptor.mac_address,
ETH_ALEN);
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_event.c b/drivers/net/wireless/marvell/mwifiex/uap_event.c
index ca759d9c0253..86bfa1b9ef9d 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_event.c
@@ -23,8 +23,8 @@
#define MWIFIEX_BSS_START_EVT_FIX_SIZE 12
-static int mwifiex_check_uap_capabilties(struct mwifiex_private *priv,
- struct sk_buff *event)
+static int mwifiex_check_uap_capabilities(struct mwifiex_private *priv,
+ struct sk_buff *event)
{
int evt_len;
u8 *curr;
@@ -38,7 +38,7 @@ static int mwifiex_check_uap_capabilties(struct mwifiex_private *priv,
evt_len = event->len;
curr = event->data;
- mwifiex_dbg_dump(priv->adapter, EVT_D, "uap capabilties:",
+ mwifiex_dbg_dump(priv->adapter, EVT_D, "uap capabilities:",
event->data, event->len);
skb_push(event, MWIFIEX_BSS_START_EVT_FIX_SIZE);
@@ -201,7 +201,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
ETH_ALEN);
if (priv->hist_data)
mwifiex_hist_data_reset(priv);
- mwifiex_check_uap_capabilties(priv, adapter->event_skb);
+ mwifiex_check_uap_capabilities(priv, adapter->event_skb);
break;
case EVENT_UAP_MIC_COUNTERMEASURES:
/* For future development */
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 8e4e9b6919e0..c4db6417748f 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -441,6 +441,9 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
#define MWL8K_CMD_UPDATE_STADB 0x1123
#define MWL8K_CMD_BASTREAM 0x1125
+#define MWL8K_LEGACY_5G_RATE_OFFSET \
+ (ARRAY_SIZE(mwl8k_rates_24) - ARRAY_SIZE(mwl8k_rates_50))
+
static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
{
u16 command = le16_to_cpu(cmd);
@@ -1016,8 +1019,9 @@ mwl8k_rxd_ap_process(void *_rxd, struct ieee80211_rx_status *status,
if (rxd->channel > 14) {
status->band = NL80211_BAND_5GHZ;
- if (!(status->encoding == RX_ENC_HT))
- status->rate_idx -= 5;
+ if (!(status->encoding == RX_ENC_HT) &&
+ status->rate_idx >= MWL8K_LEGACY_5G_RATE_OFFSET)
+ status->rate_idx -= MWL8K_LEGACY_5G_RATE_OFFSET;
} else {
status->band = NL80211_BAND_2GHZ;
}
@@ -1124,8 +1128,9 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
if (rxd->channel > 14) {
status->band = NL80211_BAND_5GHZ;
- if (!(status->encoding == RX_ENC_HT))
- status->rate_idx -= 5;
+ if (!(status->encoding == RX_ENC_HT) &&
+ status->rate_idx >= MWL8K_LEGACY_5G_RATE_OFFSET)
+ status->rate_idx -= MWL8K_LEGACY_5G_RATE_OFFSET;
} else {
status->band = NL80211_BAND_2GHZ;
}
@@ -2234,8 +2239,10 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
dma_size = le16_to_cpu(cmd->length);
dma_addr = pci_map_single(priv->pdev, cmd, dma_size,
PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(priv->pdev, dma_addr))
- return -ENOMEM;
+ if (pci_dma_mapping_error(priv->pdev, dma_addr)) {
+ rc = -ENOMEM;
+ goto exit;
+ }
priv->hostcmd_wait = &cmd_wait;
iowrite32(dma_addr, regs + MWL8K_HIU_GEN_PTR);
@@ -2275,6 +2282,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
ms);
}
+exit:
if (bitmap)
mwl8k_enable_bsses(hw, true, bitmap);
@@ -4631,7 +4639,7 @@ static void mwl8k_tx_poll(unsigned long data)
limit = 32;
- spin_lock_bh(&priv->tx_lock);
+ spin_lock(&priv->tx_lock);
for (i = 0; i < mwl8k_tx_queues(priv); i++)
limit -= mwl8k_txq_reclaim(hw, i, limit, 0);
@@ -4641,7 +4649,7 @@ static void mwl8k_tx_poll(unsigned long data)
priv->tx_wait = NULL;
}
- spin_unlock_bh(&priv->tx_lock);
+ spin_unlock(&priv->tx_lock);
if (limit) {
writel(~MWL8K_A2H_INT_TX_DONE,
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
index dbe8c70a8f73..30e44e4c3c7d 100644
--- a/drivers/net/wireless/mediatek/mt76/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -22,3 +22,4 @@ config MT76x02_USB
source "drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig"
source "drivers/net/wireless/mediatek/mt76/mt7603/Kconfig"
+source "drivers/net/wireless/mediatek/mt76/mt7615/Kconfig"
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index 3fd1b64b4aa7..7beae2354a24 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -16,10 +16,11 @@ CFLAGS_mt76x02_trace.o := -I$(src)
mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02_mcu.o \
mt76x02_eeprom.o mt76x02_phy.o mt76x02_mmio.o \
mt76x02_txrx.o mt76x02_trace.o mt76x02_debugfs.o \
- mt76x02_dfs.o
+ mt76x02_dfs.o mt76x02_beacon.o
mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o
obj-$(CONFIG_MT76x0_COMMON) += mt76x0/
obj-$(CONFIG_MT76x2_COMMON) += mt76x2/
obj-$(CONFIG_MT7603E) += mt7603/
+obj-$(CONFIG_MT7615E) += mt7615/
diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
index 73c8b2805c97..27e3ff039c48 100644
--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
+++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
@@ -135,7 +135,7 @@ mt76_rx_aggr_check_ctl(struct sk_buff *skb, struct sk_buff_head *frames)
return;
status->tid = le16_to_cpu(bar->control) >> 12;
- seqno = le16_to_cpu(bar->start_seq_num) >> 4;
+ seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
tid = rcu_dereference(wcid->aggr[status->tid]);
if (!tid)
return;
diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c
index a5adf22c3ffa..c6a9fe2aef9d 100644
--- a/drivers/net/wireless/mediatek/mt76/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/debugfs.c
@@ -43,14 +43,15 @@ mt76_queues_read(struct seq_file *s, void *data)
int i;
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
- struct mt76_queue *q = &dev->q_tx[i];
+ struct mt76_sw_queue *q = &dev->q_tx[i];
- if (!q->ndesc)
+ if (!q->q)
continue;
seq_printf(s,
"%d: queued=%d head=%d tail=%d swq_queued=%d\n",
- i, q->queued, q->head, q->tail, q->swq_queued);
+ i, q->q->queued, q->q->head, q->q->tail,
+ q->swq_queued);
}
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 76629b98c78d..4381155375e1 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -18,16 +18,20 @@
#include "mt76.h"
#include "dma.h"
-#define DMA_DUMMY_TXWI ((void *) ~0)
-
static int
-mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q)
+mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
+ int idx, int n_desc, int bufsize,
+ u32 ring_base)
{
int size;
int i;
spin_lock_init(&q->lock);
- INIT_LIST_HEAD(&q->swq);
+
+ q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
+ q->ndesc = n_desc;
+ q->buf_size = bufsize;
+ q->hw_idx = idx;
size = q->ndesc * sizeof(struct mt76_desc);
q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
@@ -43,10 +47,10 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q)
for (i = 0; i < q->ndesc; i++)
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
- iowrite32(q->desc_dma, &q->regs->desc_base);
- iowrite32(0, &q->regs->cpu_idx);
- iowrite32(0, &q->regs->dma_idx);
- iowrite32(q->ndesc, &q->regs->ring_size);
+ writel(q->desc_dma, &q->regs->desc_base);
+ writel(0, &q->regs->cpu_idx);
+ writel(0, &q->regs->dma_idx);
+ writel(q->ndesc, &q->regs->ring_size);
return 0;
}
@@ -61,7 +65,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
int i, idx = -1;
if (txwi)
- q->entry[q->head].txwi = DMA_DUMMY_TXWI;
+ q->entry[q->head].txwi = DMA_DUMMY_DATA;
for (i = 0; i < nbufs; i += 2, buf += 2) {
u32 buf0 = buf[0].addr, buf1 = 0;
@@ -120,9 +124,12 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
DMA_TO_DEVICE);
}
- if (e->txwi == DMA_DUMMY_TXWI)
+ if (e->txwi == DMA_DUMMY_DATA)
e->txwi = NULL;
+ if (e->skb == DMA_DUMMY_DATA)
+ e->skb = NULL;
+
*prev_e = *e;
memset(e, 0, sizeof(*e));
}
@@ -130,56 +137,64 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
{
- iowrite32(q->desc_dma, &q->regs->desc_base);
- iowrite32(q->ndesc, &q->regs->ring_size);
- q->head = ioread32(&q->regs->dma_idx);
+ writel(q->desc_dma, &q->regs->desc_base);
+ writel(q->ndesc, &q->regs->ring_size);
+ q->head = readl(&q->regs->dma_idx);
q->tail = q->head;
- iowrite32(q->head, &q->regs->cpu_idx);
+ writel(q->head, &q->regs->cpu_idx);
}
static void
mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
{
- struct mt76_queue *q = &dev->q_tx[qid];
+ struct mt76_sw_queue *sq = &dev->q_tx[qid];
+ struct mt76_queue *q = sq->q;
struct mt76_queue_entry entry;
+ unsigned int n_swq_queued[4] = {};
+ unsigned int n_queued = 0;
bool wake = false;
- int last;
+ int i, last;
- if (!q->ndesc)
+ if (!q)
return;
- spin_lock_bh(&q->lock);
if (flush)
last = -1;
else
- last = ioread32(&q->regs->dma_idx);
+ last = readl(&q->regs->dma_idx);
- while (q->queued && q->tail != last) {
+ while ((q->queued > n_queued) && q->tail != last) {
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
if (entry.schedule)
- q->swq_queued--;
+ n_swq_queued[entry.qid]++;
q->tail = (q->tail + 1) % q->ndesc;
- q->queued--;
+ n_queued++;
- if (entry.skb) {
- spin_unlock_bh(&q->lock);
- dev->drv->tx_complete_skb(dev, q, &entry, flush);
- spin_lock_bh(&q->lock);
- }
+ if (entry.skb)
+ dev->drv->tx_complete_skb(dev, qid, &entry);
if (entry.txwi) {
- mt76_put_txwi(dev, entry.txwi);
+ if (!(dev->drv->txwi_flags & MT_TXWI_NO_FREE))
+ mt76_put_txwi(dev, entry.txwi);
wake = !flush;
}
if (!flush && q->tail == last)
- last = ioread32(&q->regs->dma_idx);
+ last = readl(&q->regs->dma_idx);
}
- if (!flush)
- mt76_txq_schedule(dev, q);
- else
+ spin_lock_bh(&q->lock);
+
+ q->queued -= n_queued;
+ for (i = 0; i < ARRAY_SIZE(n_swq_queued); i++) {
+ if (!n_swq_queued[i])
+ continue;
+
+ dev->q_tx[i].swq_queued -= n_swq_queued[i];
+ }
+
+ if (flush)
mt76_dma_sync_idx(dev, q);
wake = wake && q->stopped &&
@@ -244,20 +259,20 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
static void
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- iowrite32(q->head, &q->regs->cpu_idx);
+ writel(q->head, &q->regs->cpu_idx);
}
static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, u32 tx_info)
{
- struct mt76_queue *q = &dev->q_tx[qid];
+ struct mt76_queue *q = dev->q_tx[qid].q;
struct mt76_queue_buf buf;
dma_addr_t addr;
addr = dma_map_single(dev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, addr))
+ if (unlikely(dma_mapping_error(dev->dev, addr)))
return -ENOMEM;
buf.addr = addr;
@@ -271,80 +286,85 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
return 0;
}
-int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta)
+static int
+mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta)
{
+ struct mt76_queue *q = dev->q_tx[qid].q;
+ struct mt76_tx_info tx_info = {
+ .skb = skb,
+ };
+ int len, n = 0, ret = -ENOMEM;
struct mt76_queue_entry e;
struct mt76_txwi_cache *t;
- struct mt76_queue_buf buf[32];
struct sk_buff *iter;
dma_addr_t addr;
- int len;
- u32 tx_info = 0;
- int n, ret;
+ u8 *txwi;
t = mt76_get_txwi(dev);
if (!t) {
ieee80211_free_txskb(dev->hw, skb);
return -ENOMEM;
}
+ txwi = mt76_get_txwi_ptr(dev, t);
skb->prev = skb->next = NULL;
- dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi),
- DMA_TO_DEVICE);
- ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta,
- &tx_info);
- dma_sync_single_for_device(dev->dev, t->dma_addr, sizeof(t->txwi),
- DMA_TO_DEVICE);
- if (ret < 0)
- goto free;
+ if (dev->drv->tx_aligned4_skbs)
+ mt76_insert_hdr_pad(skb);
- len = skb->len - skb->data_len;
+ len = skb_headlen(skb);
addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, addr)) {
- ret = -ENOMEM;
+ if (unlikely(dma_mapping_error(dev->dev, addr)))
goto free;
- }
- n = 0;
- buf[n].addr = t->dma_addr;
- buf[n++].len = dev->drv->txwi_size;
- buf[n].addr = addr;
- buf[n++].len = len;
+ tx_info.buf[n].addr = t->dma_addr;
+ tx_info.buf[n++].len = dev->drv->txwi_size;
+ tx_info.buf[n].addr = addr;
+ tx_info.buf[n++].len = len;
skb_walk_frags(skb, iter) {
- if (n == ARRAY_SIZE(buf))
+ if (n == ARRAY_SIZE(tx_info.buf))
goto unmap;
addr = dma_map_single(dev->dev, iter->data, iter->len,
DMA_TO_DEVICE);
- if (dma_mapping_error(dev->dev, addr))
+ if (unlikely(dma_mapping_error(dev->dev, addr)))
goto unmap;
- buf[n].addr = addr;
- buf[n++].len = iter->len;
+ tx_info.buf[n].addr = addr;
+ tx_info.buf[n++].len = iter->len;
}
+ tx_info.nbuf = n;
+
+ dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
+ DMA_TO_DEVICE);
+ ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
+ dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
+ DMA_TO_DEVICE);
+ if (ret < 0)
+ goto unmap;
- if (q->queued + (n + 1) / 2 >= q->ndesc - 1)
+ if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
+ ret = -ENOMEM;
goto unmap;
+ }
- return mt76_dma_add_buf(dev, q, buf, n, tx_info, skb, t);
+ return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
+ tx_info.info, tx_info.skb, t);
unmap:
- ret = -ENOMEM;
for (n--; n > 0; n--)
- dma_unmap_single(dev->dev, buf[n].addr, buf[n].len,
- DMA_TO_DEVICE);
+ dma_unmap_single(dev->dev, tx_info.buf[n].addr,
+ tx_info.buf[n].len, DMA_TO_DEVICE);
free:
- e.skb = skb;
+ e.skb = tx_info.skb;
e.txwi = t;
- dev->drv->tx_complete_skb(dev, q, &e, true);
+ dev->drv->tx_complete_skb(dev, qid, &e);
mt76_put_txwi(dev, t);
return ret;
}
-EXPORT_SYMBOL_GPL(mt76_dma_tx_queue_skb);
static int
mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
@@ -366,7 +386,7 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
break;
addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev->dev, addr)) {
+ if (unlikely(dma_mapping_error(dev->dev, addr))) {
skb_free_frag(buf);
break;
}
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
index e3292df5e9b2..03dd2bafa4e8 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.h
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -16,6 +16,8 @@
#ifndef __MT76_DMA_H
#define __MT76_DMA_H
+#define DMA_DUMMY_DATA ((void *)~0)
+
#define MT_RING_SIZE 0x10
#define MT_DMA_CTL_SD_LEN1 GENMASK(13, 0)
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
index a1529920d877..04964937a3af 100644
--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -94,7 +94,7 @@ mt76_eeprom_override(struct mt76_dev *dev)
return;
mac = of_get_mac_address(np);
- if (mac)
+ if (!IS_ERR(mac))
memcpy(dev->macaddr, mac, ETH_ALEN);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 316167404729..5b6a81ee457e 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -214,6 +214,8 @@ mt76_init_sband(struct mt76_dev *dev, struct mt76_sband *msband,
vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_RXSTBC_1 |
IEEE80211_VHT_CAP_SHORT_GI_80 |
+ IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
+ IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
(3 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
return 0;
@@ -369,10 +371,16 @@ void mt76_unregister_device(struct mt76_dev *dev)
mt76_tx_status_check(dev, NULL, true);
ieee80211_unregister_hw(hw);
- mt76_tx_free(dev);
}
EXPORT_SYMBOL_GPL(mt76_unregister_device);
+void mt76_free_device(struct mt76_dev *dev)
+{
+ mt76_tx_free(dev);
+ ieee80211_free_hw(dev->hw);
+}
+EXPORT_SYMBOL_GPL(mt76_free_device);
+
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
{
if (!test_bit(MT76_STATE_RUNNING, &dev->state)) {
@@ -384,17 +392,20 @@ void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(mt76_rx);
-static bool mt76_has_tx_pending(struct mt76_dev *dev)
+bool mt76_has_tx_pending(struct mt76_dev *dev)
{
+ struct mt76_queue *q;
int i;
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
- if (dev->q_tx[i].queued)
+ q = dev->q_tx[i].q;
+ if (q && q->queued)
return true;
}
return false;
}
+EXPORT_SYMBOL_GPL(mt76_has_tx_pending);
void mt76_set_channel(struct mt76_dev *dev)
{
@@ -560,6 +571,7 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
struct ieee80211_sta *sta;
struct mt76_wcid *wcid = status->wcid;
bool ps;
+ int i;
if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) {
sta = ieee80211_find_sta_by_ifaddr(dev->hw, hdr->addr2, NULL);
@@ -606,6 +618,20 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
dev->drv->sta_ps(dev, sta, ps);
ieee80211_sta_ps_transition(sta, ps);
+
+ if (ps)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct mt76_txq *mtxq;
+
+ if (!sta->txq[i])
+ continue;
+
+ mtxq = (struct mt76_txq *) sta->txq[i]->drv_priv;
+ if (!skb_queue_empty(&mtxq->retry_q))
+ ieee80211_schedule_txq(dev->hw, sta->txq[i]);
+ }
}
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
@@ -737,7 +763,7 @@ int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt76_dev *dev = hw->priv;
int n_chains = hweight8(dev->antenna_mask);
- *dbm = dev->txpower_cur / 2;
+ *dbm = DIV_ROUND_UP(dev->txpower_cur, 2);
/* convert from per-chain power to combined
* output on 2x2 devices
@@ -787,3 +813,10 @@ void mt76_csa_check(struct mt76_dev *dev)
__mt76_csa_check, dev);
}
EXPORT_SYMBOL_GPL(mt76_csa_check);
+
+int
+mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_set_tim);
diff --git a/drivers/net/wireless/mediatek/mt76/mmio.c b/drivers/net/wireless/mediatek/mt76/mmio.c
index 1d6bbce76041..38368d19aa6f 100644
--- a/drivers/net/wireless/mediatek/mt76/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mmio.c
@@ -21,7 +21,7 @@ static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
{
u32 val;
- val = ioread32(dev->mmio.regs + offset);
+ val = readl(dev->mmio.regs + offset);
trace_reg_rr(dev, offset, val);
return val;
@@ -30,7 +30,7 @@ static u32 mt76_mmio_rr(struct mt76_dev *dev, u32 offset)
static void mt76_mmio_wr(struct mt76_dev *dev, u32 offset, u32 val)
{
trace_reg_wr(dev, offset, val);
- iowrite32(val, dev->mmio.regs + offset);
+ writel(val, dev->mmio.regs + offset);
}
static u32 mt76_mmio_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
@@ -70,6 +70,19 @@ static int mt76_mmio_rd_rp(struct mt76_dev *dev, u32 base,
return 0;
}
+void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr,
+ u32 clear, u32 set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->mmio.irq_lock, flags);
+ dev->mmio.irqmask &= ~clear;
+ dev->mmio.irqmask |= set;
+ mt76_mmio_wr(dev, addr, dev->mmio.irqmask);
+ spin_unlock_irqrestore(&dev->mmio.irq_lock, flags);
+}
+EXPORT_SYMBOL_GPL(mt76_set_irq_mask);
+
void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs)
{
static const struct mt76_bus_ops mt76_mmio_ops = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index bcbfd3c4a44b..8ecbf81a906f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -69,6 +69,7 @@ enum mt76_txq_id {
MT_TXQ_MCU,
MT_TXQ_BEACON,
MT_TXQ_CAB,
+ MT_TXQ_FWDL,
__MT_TXQ_MAX
};
@@ -83,12 +84,11 @@ struct mt76_queue_buf {
int len;
};
-struct mt76u_buf {
- struct mt76_dev *dev;
- struct urb *urb;
- size_t len;
- void *buf;
- bool done;
+struct mt76_tx_info {
+ struct mt76_queue_buf buf[32];
+ struct sk_buff *skb;
+ int nbuf;
+ u32 info;
};
struct mt76_queue_entry {
@@ -98,9 +98,11 @@ struct mt76_queue_entry {
};
union {
struct mt76_txwi_cache *txwi;
- struct mt76u_buf ubuf;
+ struct urb *urb;
};
+ enum mt76_txq_id qid;
bool schedule;
+ bool done;
};
struct mt76_queue_regs {
@@ -117,9 +119,6 @@ struct mt76_queue {
struct mt76_queue_entry *entry;
struct mt76_desc *desc;
- struct list_head swq;
- int swq_queued;
-
u16 first;
u16 head;
u16 tail;
@@ -134,7 +133,13 @@ struct mt76_queue {
dma_addr_t desc_dma;
struct sk_buff *rx_head;
struct page_frag_cache rx_page;
- spinlock_t rx_page_lock;
+};
+
+struct mt76_sw_queue {
+ struct mt76_queue *q;
+
+ struct list_head swq;
+ int swq_queued;
};
struct mt76_mcu_ops {
@@ -150,13 +155,15 @@ struct mt76_mcu_ops {
struct mt76_queue_ops {
int (*init)(struct mt76_dev *dev);
- int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q);
+ int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q,
+ int idx, int n_desc, int bufsize,
+ u32 ring_base);
int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_buf *buf, int nbufs, u32 info,
struct sk_buff *skb, void *txwi);
- int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
+ int (*tx_queue_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta);
@@ -183,6 +190,11 @@ enum mt76_wcid_flags {
DECLARE_EWMA(signal, 10, 8);
+#define MT_WCID_TX_INFO_RATE GENMASK(15, 0)
+#define MT_WCID_TX_INFO_NSS GENMASK(17, 16)
+#define MT_WCID_TX_INFO_TXPWR_ADJ GENMASK(25, 18)
+#define MT_WCID_TX_INFO_SET BIT(31)
+
struct mt76_wcid {
struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
@@ -201,18 +213,14 @@ struct mt76_wcid {
u8 rx_check_pn;
u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
- __le16 tx_rate;
- bool tx_rate_set;
- u8 tx_rate_nss;
- s8 max_txpwr_adj;
+ u32 tx_info;
bool sw_iv;
u8 packet_id;
};
struct mt76_txq {
- struct list_head list;
- struct mt76_queue *hwq;
+ struct mt76_sw_queue *swq;
struct mt76_wcid *wcid;
struct sk_buff_head retry_q;
@@ -223,11 +231,11 @@ struct mt76_txq {
};
struct mt76_txwi_cache {
- u32 txwi[8];
- dma_addr_t dma_addr;
struct list_head list;
-};
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+};
struct mt76_rx_tid {
struct rcu_head rcu_head;
@@ -280,18 +288,22 @@ struct mt76_hw_cap {
bool has_5ghz;
};
+#define MT_TXWI_NO_FREE BIT(0)
+
struct mt76_driver_ops {
+ bool tx_aligned4_skbs;
+ u32 txwi_flags;
u16 txwi_size;
void (*update_survey)(struct mt76_dev *dev);
int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
- struct sk_buff *skb, struct mt76_queue *q,
- struct mt76_wcid *wcid,
- struct ieee80211_sta *sta, u32 *tx_info);
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
- void (*tx_complete_skb)(struct mt76_dev *dev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush);
+ void (*tx_complete_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e);
bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
@@ -378,7 +390,6 @@ struct mt76_usb {
u8 data[32];
struct tasklet_struct rx_tasklet;
- struct tasklet_struct tx_tasklet;
struct delayed_work stat_work;
u8 out_ep[__MT_EP_OUT_MAX];
@@ -435,11 +446,14 @@ struct mt76_dev {
struct sk_buff_head rx_skb[__MT_RXQ_MAX];
struct list_head txwi_cache;
- struct mt76_queue q_tx[__MT_TXQ_MAX];
+ struct mt76_sw_queue q_tx[__MT_TXQ_MAX];
struct mt76_queue q_rx[__MT_RXQ_MAX];
const struct mt76_queue_ops *queue_ops;
int tx_dma_idx[4];
+ struct tasklet_struct tx_tasklet;
+ struct delayed_work mac_work;
+
wait_queue_head_t tx_wait;
struct sk_buff_head status_list;
@@ -455,6 +469,10 @@ struct mt76_dev {
u8 antenna_mask;
u16 chainmask;
+ struct tasklet_struct pre_tbtt_tasklet;
+ int beacon_int;
+ u8 beacon_mask;
+
struct mt76_sband sband_2g;
struct mt76_sband sband_5g;
struct debugfs_blob_wrapper eeprom;
@@ -529,6 +547,9 @@ struct mt76_rx_status {
#define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
#define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__)
+#define __mt76_mcu_send_msg(dev, ...) (dev)->mcu_ops->mcu_send_msg((dev), __VA_ARGS__)
+#define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76))
+#define __mt76_mcu_restart(dev, ...) (dev)->mcu_ops->mcu_restart((dev))
#define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
#define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0)
@@ -572,6 +593,7 @@ static inline u16 mt76_rev(struct mt76_dev *dev)
#define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76))
#define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
#define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
+#define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_kick(dev, ...) (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
@@ -597,6 +619,7 @@ struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
int mt76_register_device(struct mt76_dev *dev, bool vht,
struct ieee80211_rate *rates, int n_rates);
void mt76_unregister_device(struct mt76_dev *dev);
+void mt76_free_device(struct mt76_dev *dev);
struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
void mt76_seq_puts_array(struct seq_file *file, const char *str,
@@ -605,6 +628,12 @@ void mt76_seq_puts_array(struct seq_file *file, const char *str,
int mt76_eeprom_init(struct mt76_dev *dev, int len);
void mt76_eeprom_override(struct mt76_dev *dev);
+static inline u8 *
+mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+ return (u8 *)t - dev->drv->txwi_size;
+}
+
/* increment with wrap-around */
static inline int mt76_incr(int val, int size)
{
@@ -645,9 +674,19 @@ static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
return ((void *) IEEE80211_SKB_CB(skb)->status.status_driver_data);
}
-int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta);
+static inline void mt76_insert_hdr_pad(struct sk_buff *skb)
+{
+ int len = ieee80211_get_hdrlen_from_skb(skb);
+
+ if (len % 4 == 0)
+ return;
+
+ skb_push(skb, 2);
+ memmove(skb->data, skb->data + 2, len);
+
+ skb->data[len] = 0;
+ skb->data[len + 1] = 0;
+}
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
@@ -657,13 +696,14 @@ void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
bool send_bar);
-void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq);
+void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid);
void mt76_txq_schedule_all(struct mt76_dev *dev);
void mt76_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u16 tids, int nframes,
enum ieee80211_frame_release_type reason,
bool more_data);
+bool mt76_has_tx_pending(struct mt76_dev *dev);
void mt76_set_channel(struct mt76_dev *dev);
int mt76_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
@@ -708,6 +748,8 @@ int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void mt76_csa_check(struct mt76_dev *dev);
void mt76_csa_finish(struct mt76_dev *dev);
+int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
+
/* internal */
void mt76_tx_free(struct mt76_dev *dev);
struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
@@ -738,8 +780,7 @@ static inline int
mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
int timeout)
{
- struct usb_interface *intf = to_usb_interface(dev->dev);
- struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_device *udev = to_usb_device(dev->dev);
struct mt76_usb *usb = &dev->usb;
unsigned int pipe;
@@ -757,10 +798,10 @@ int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
const u16 offset, const u32 val);
int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
-int mt76u_submit_rx_buffers(struct mt76_dev *dev);
int mt76u_alloc_queues(struct mt76_dev *dev);
-void mt76u_stop_queues(struct mt76_dev *dev);
-void mt76u_stop_stat_wk(struct mt76_dev *dev);
+void mt76u_stop_tx(struct mt76_dev *dev);
+void mt76u_stop_rx(struct mt76_dev *dev);
+int mt76u_resume_rx(struct mt76_dev *dev);
void mt76u_queues_deinit(struct mt76_dev *dev);
struct sk_buff *
@@ -770,4 +811,6 @@ void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
unsigned long expires);
+void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set);
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
index 4dcb465095d1..58e68fbdbf75 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
@@ -16,21 +16,20 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
struct sk_buff *skb = NULL;
- if (!(dev->beacon_mask & BIT(mvif->idx)))
+ if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
return;
skb = ieee80211_beacon_get(mt76_hw(dev), vif);
if (!skb)
return;
- mt76_dma_tx_queue_skb(&dev->mt76, &dev->mt76.q_tx[MT_TXQ_BEACON], skb,
- &mvif->sta.wcid, NULL);
+ mt76_tx_queue_skb(dev, MT_TXQ_BEACON, skb, &mvif->sta.wcid, NULL);
spin_lock_bh(&dev->ps_lock);
mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) |
FIELD_PREP(MT_DMA_FQCR0_TARGET_QID,
- dev->mt76.q_tx[MT_TXQ_CAB].hw_idx) |
+ dev->mt76.q_tx[MT_TXQ_CAB].q->hw_idx) |
FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8));
@@ -49,7 +48,7 @@ mt7603_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
struct ieee80211_tx_info *info;
struct sk_buff *skb;
- if (!(dev->beacon_mask & BIT(mvif->idx)))
+ if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
return;
skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
@@ -73,10 +72,13 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
struct sk_buff *skb;
int i, nframes;
+ if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ return;
+
data.dev = dev;
__skb_queue_head_init(&data.q);
- q = &dev->mt76.q_tx[MT_TXQ_BEACON];
+ q = dev->mt76.q_tx[MT_TXQ_BEACON].q;
spin_lock_bh(&q->lock);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
@@ -93,7 +95,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
if (dev->mt76.csa_complete)
goto out;
- q = &dev->mt76.q_tx[MT_TXQ_CAB];
+ q = dev->mt76.q_tx[MT_TXQ_CAB].q;
do {
nframes = skb_queue_len(&data.q);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
@@ -118,8 +120,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
struct ieee80211_vif *vif = info->control.vif;
struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv;
- mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->sta.wcid,
- NULL);
+ mt76_tx_queue_skb(dev, MT_TXQ_CAB, skb, &mvif->sta.wcid, NULL);
}
mt76_queue_kick(dev, q);
spin_unlock_bh(&q->lock);
@@ -135,7 +136,8 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
out:
mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false);
- if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > hweight8(dev->beacon_mask))
+ if (dev->mt76.q_tx[MT_TXQ_BEACON].q->queued >
+ hweight8(dev->mt76.beacon_mask))
dev->beacon_check++;
}
@@ -145,19 +147,19 @@ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval)
if (idx >= 0) {
if (intval)
- dev->beacon_mask |= BIT(idx);
+ dev->mt76.beacon_mask |= BIT(idx);
else
- dev->beacon_mask &= ~BIT(idx);
+ dev->mt76.beacon_mask &= ~BIT(idx);
}
- if (!dev->beacon_mask || (!intval && idx < 0)) {
+ if (!dev->mt76.beacon_mask || (!intval && idx < 0)) {
mt7603_irq_disable(dev, MT_INT_MAC_IRQ3);
mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_BCNQ_OPMODE_MASK);
mt76_wr(dev, MT_HW_INT_MASK(3), 0);
return;
}
- dev->beacon_int = intval;
+ dev->mt76.beacon_int = intval;
mt76_wr(dev, MT_TBTT,
FIELD_PREP(MT_TBTT_PERIOD, intval) | MT_TBTT_CAL_ENABLE);
@@ -175,10 +177,11 @@ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval)
mt76_set(dev, MT_WF_ARB_BCN_START,
MT_WF_ARB_BCN_START_BSSn(0) |
- ((dev->beacon_mask >> 1) * MT_WF_ARB_BCN_START_BSS0n(1)));
+ ((dev->mt76.beacon_mask >> 1) *
+ MT_WF_ARB_BCN_START_BSS0n(1)));
mt7603_irq_enable(dev, MT_INT_MAC_IRQ3);
- if (dev->beacon_mask & ~BIT(0))
+ if (dev->mt76.beacon_mask & ~BIT(0))
mt76_set(dev, MT_LPON_SBTOR(0), MT_LPON_SBTOR_SUB_BSS_EN);
else
mt76_clear(dev, MT_LPON_SBTOR(0), MT_LPON_SBTOR_SUB_BSS_EN);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/core.c b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
index 1086dcd376a0..37e5644b45ef 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/core.c
@@ -2,17 +2,6 @@
#include "mt7603.h"
-void mt7603_set_irq_mask(struct mt7603_dev *dev, u32 clear, u32 set)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
- dev->mt76.mmio.irqmask &= ~clear;
- dev->mt76.mmio.irqmask |= set;
- mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
- spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
-}
-
void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
@@ -38,7 +27,7 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
mt76_wr(dev, MT_HW_INT_STATUS(3), hwintr);
if (hwintr & MT_HW_INT3_PRE_TBTT0)
- tasklet_schedule(&dev->pre_tbtt_tasklet);
+ tasklet_schedule(&dev->mt76.pre_tbtt_tasklet);
if ((hwintr & MT_HW_INT3_TBTT0) && dev->mt76.csa_complete)
mt76_csa_finish(&dev->mt76);
@@ -46,7 +35,7 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
if (intr & MT_INT_TX_DONE_ALL) {
mt7603_irq_disable(dev, MT_INT_TX_DONE_ALL);
- tasklet_schedule(&dev->tx_tasklet);
+ tasklet_schedule(&dev->mt76.tx_tasklet);
}
if (intr & MT_INT_RX_DONE(0)) {
@@ -64,8 +53,8 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
u32 mt7603_reg_map(struct mt7603_dev *dev, u32 addr)
{
- u32 base = addr & GENMASK(31, 19);
- u32 offset = addr & GENMASK(18, 0);
+ u32 base = addr & MT_MCU_PCIE_REMAP_2_BASE;
+ u32 offset = addr & MT_MCU_PCIE_REMAP_2_OFFSET;
dev->bus_ops->wr(&dev->mt76, MT_MCU_PCIE_REMAP_2, base);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index b3ae0aaea62a..27e2d9f90553 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -5,18 +5,22 @@
#include "../dma.h"
static int
-mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
+mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_sw_queue *q,
int idx, int n_desc)
{
- int ret;
+ struct mt76_queue *hwq;
+ int err;
- q->hw_idx = idx;
- q->regs = dev->mt76.mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
- q->ndesc = n_desc;
+ hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
+ if (!hwq)
+ return -ENOMEM;
- ret = mt76_queue_alloc(dev, q);
- if (ret)
- return ret;
+ err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
+ if (err < 0)
+ return err;
+
+ INIT_LIST_HEAD(&q->swq);
+ q->q = hwq;
mt7603_irq_enable(dev, MT_INT_TX_DONE(idx));
@@ -119,15 +123,12 @@ static int
mt7603_init_rx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize)
{
- int ret;
+ int err;
- q->regs = dev->mt76.mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
- q->ndesc = n_desc;
- q->buf_size = bufsize;
-
- ret = mt76_queue_alloc(dev, q);
- if (ret)
- return ret;
+ err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize,
+ MT_RX_RING_BASE);
+ if (err < 0)
+ return err;
mt7603_irq_enable(dev, MT_INT_RX_DONE(idx));
@@ -144,6 +145,8 @@ mt7603_tx_tasklet(unsigned long data)
for (i = MT_TXQ_MCU; i >= 0; i--)
mt76_queue_tx_cleanup(dev, i, false);
+ mt76_txq_schedule_all(&dev->mt76);
+
mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL);
}
@@ -163,7 +166,7 @@ int mt7603_dma_init(struct mt7603_dev *dev)
init_waitqueue_head(&dev->mt76.mmio.mcu.wait);
skb_queue_head_init(&dev->mt76.mmio.mcu.res_q);
- tasklet_init(&dev->tx_tasklet, mt7603_tx_tasklet, (unsigned long)dev);
+ tasklet_init(&dev->mt76.tx_tasklet, mt7603_tx_tasklet, (unsigned long)dev);
mt76_clear(dev, MT_WPDMA_GLO_CFG,
MT_WPDMA_GLO_CFG_TX_DMA_EN |
@@ -223,6 +226,6 @@ void mt7603_dma_cleanup(struct mt7603_dev *dev)
MT_WPDMA_GLO_CFG_RX_DMA_EN |
MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
- tasklet_kill(&dev->tx_tasklet);
+ tasklet_kill(&dev->mt76.tx_tasklet);
mt76_dma_cleanup(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index 3af45949e868..78cdbb70e178 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -167,7 +167,8 @@ mt7603_mac_init(struct mt7603_dev *dev)
FIELD_PREP(MT_AGG_RETRY_CONTROL_BAR_LIMIT, 1) |
FIELD_PREP(MT_AGG_RETRY_CONTROL_RTS_LIMIT, 15));
- mt76_rmw(dev, MT_DMA_DCR0, ~0xfffc, 4096);
+ mt76_wr(dev, MT_DMA_DCR0, MT_DMA_DCR0_RX_VEC_DROP |
+ FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 4096));
mt76_rmw(dev, MT_DMA_VCFR0, BIT(0), BIT(13));
mt76_rmw(dev, MT_DMA_TMCFR0, BIT(0) | BIT(1), BIT(13));
@@ -488,6 +489,7 @@ mt7603_init_txpower(struct mt7603_dev *dev,
for (i = 0; i < sband->n_channels; i++) {
chan = &sband->channels[i];
chan->max_power = target_power;
+ chan->orig_mpwr = target_power;
}
}
@@ -512,8 +514,8 @@ int mt7603_register_device(struct mt7603_dev *dev)
spin_lock_init(&dev->ps_lock);
- INIT_DELAYED_WORK(&dev->mac_work, mt7603_mac_work);
- tasklet_init(&dev->pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet,
+ INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7603_mac_work);
+ tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet,
(unsigned long)dev);
/* Check for 7688, which only has 1SS */
@@ -572,9 +574,9 @@ int mt7603_register_device(struct mt7603_dev *dev)
void mt7603_unregister_device(struct mt7603_dev *dev)
{
- tasklet_disable(&dev->pre_tbtt_tasklet);
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
mt76_unregister_device(&dev->mt76);
mt7603_mcu_exit(dev);
mt7603_dma_cleanup(dev);
- ieee80211_free_hw(mt76_hw(dev));
+ mt76_free_device(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index 5abc02b57818..6d506e34c3ee 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -590,7 +590,7 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
status->aggr = unicast &&
!ieee80211_is_qos_nullfunc(hdr->frame_control);
status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
- status->seqno = hdr->seq_ctrl >> 4;
+ status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
return 0;
}
@@ -717,11 +717,11 @@ void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
MT_WTBL_UPDATE_RATE_UPDATE |
MT_WTBL_UPDATE_TX_COUNT_CLEAR);
- if (!sta->wcid.tx_rate_set)
+ if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
sta->rate_count = 2 * MT7603_RATE_RETRY * n_rates;
- sta->wcid.tx_rate_set = true;
+ sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
}
static enum mt7603_cipher_type
@@ -783,7 +783,7 @@ int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid,
static int
mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
- struct sk_buff *skb, struct mt76_queue *q,
+ struct sk_buff *skb, enum mt76_txq_id qid,
struct mt76_wcid *wcid, struct ieee80211_sta *sta,
int pid, struct ieee80211_key_conf *key)
{
@@ -792,6 +792,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_queue *q = dev->mt76.q_tx[qid].q;
struct mt7603_vif *mvif;
int wlan_idx;
int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
@@ -806,7 +807,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
if (vif) {
mvif = (struct mt7603_vif *)vif->drv_priv;
vif_idx = mvif->idx;
- if (vif_idx && q >= &dev->mt76.q_tx[MT_TXQ_BEACON])
+ if (vif_idx && qid >= MT_TXQ_BEACON)
vif_idx += 0x10;
}
@@ -880,7 +881,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
}
/* use maximum tx count for beacons and buffered multicast */
- if (q >= &dev->mt76.q_tx[MT_TXQ_BEACON])
+ if (qid >= MT_TXQ_BEACON)
tx_count = 0x1f;
val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) |
@@ -911,13 +912,13 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
}
int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
- struct sk_buff *skb, struct mt76_queue *q,
- struct mt76_wcid *wcid, struct ieee80211_sta *sta,
- u32 *tx_info)
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
struct mt7603_sta *msta = container_of(wcid, struct mt7603_sta, wcid);
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
struct ieee80211_key_conf *key = info->control.hw_key;
int pid;
@@ -933,7 +934,7 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
mt7603_wtbl_set_ps(dev, msta, false);
}
- pid = mt76_tx_status_skb_add(mdev, wcid, skb);
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
spin_lock_bh(&dev->mt76.lock);
@@ -943,7 +944,8 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
spin_unlock_bh(&dev->mt76.lock);
}
- mt7603_mac_write_txwi(dev, txwi_ptr, skb, q, wcid, sta, pid, key);
+ mt7603_mac_write_txwi(dev, txwi_ptr, tx_info->skb, qid, wcid,
+ sta, pid, key);
return 0;
}
@@ -1142,8 +1144,8 @@ out:
rcu_read_unlock();
}
-void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush)
+void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
struct sk_buff *skb = e->skb;
@@ -1153,7 +1155,7 @@ void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
return;
}
- if (q - dev->mt76.q_tx < 4)
+ if (qid < 4)
dev->tx_hang_check = 0;
mt76_tx_complete_skb(mdev, skb);
@@ -1266,7 +1268,7 @@ static void mt7603_dma_sched_reset(struct mt7603_dev *dev)
static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
{
- int beacon_int = dev->beacon_int;
+ int beacon_int = dev->mt76.beacon_int;
u32 mask = dev->mt76.mmio.irqmask;
int i;
@@ -1276,8 +1278,8 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
/* lock/unlock all queues to ensure that no tx is pending */
mt76_txq_schedule_all(&dev->mt76);
- tasklet_disable(&dev->tx_tasklet);
- tasklet_disable(&dev->pre_tbtt_tasklet);
+ tasklet_disable(&dev->mt76.tx_tasklet);
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
napi_disable(&dev->mt76.napi[0]);
napi_disable(&dev->mt76.napi[1]);
@@ -1323,10 +1325,10 @@ skip_dma_reset:
clear_bit(MT76_RESET, &dev->mt76.state);
mutex_unlock(&dev->mt76.mutex);
- tasklet_enable(&dev->tx_tasklet);
- tasklet_schedule(&dev->tx_tasklet);
+ tasklet_enable(&dev->mt76.tx_tasklet);
+ tasklet_schedule(&dev->mt76.tx_tasklet);
- tasklet_enable(&dev->pre_tbtt_tasklet);
+ tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
mt7603_beacon_set_timer(dev, -1, beacon_int);
napi_enable(&dev->mt76.napi[0]);
@@ -1385,17 +1387,17 @@ static bool mt7603_tx_hang(struct mt7603_dev *dev)
int i;
for (i = 0; i < 4; i++) {
- q = &dev->mt76.q_tx[i];
+ q = dev->mt76.q_tx[i].q;
if (!q->queued)
continue;
prev_dma_idx = dev->tx_dma_idx[i];
- dma_idx = ioread32(&q->regs->dma_idx);
+ dma_idx = readl(&q->regs->dma_idx);
dev->tx_dma_idx[i] = dma_idx;
if (dma_idx == prev_dma_idx &&
- dma_idx != ioread32(&q->regs->cpu_idx))
+ dma_idx != readl(&q->regs->cpu_idx))
break;
}
@@ -1666,7 +1668,7 @@ out:
void mt7603_mac_work(struct work_struct *work)
{
struct mt7603_dev *dev = container_of(work, struct mt7603_dev,
- mac_work.work);
+ mt76.mac_work.work);
bool reset = false;
mt76_tx_status_check(&dev->mt76, NULL, false);
@@ -1719,6 +1721,6 @@ void mt7603_mac_work(struct work_struct *work)
if (reset)
mt7603_mac_watchdog_reset(dev);
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
msecs_to_jiffies(MT7603_WATCHDOG_TIME));
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index a3c4ef198bfe..0a0334dc40d5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -16,7 +16,7 @@ mt7603_start(struct ieee80211_hw *hw)
mt7603_mac_start(dev);
dev->survey_time = ktime_get_boottime();
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
- mt7603_mac_work(&dev->mac_work.work);
+ mt7603_mac_work(&dev->mt76.mac_work.work);
return 0;
}
@@ -27,7 +27,7 @@ mt7603_stop(struct ieee80211_hw *hw)
struct mt7603_dev *dev = hw->priv;
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
- cancel_delayed_work_sync(&dev->mac_work);
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
mt7603_mac_stop(dev);
}
@@ -132,11 +132,13 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
u8 bw = MT_BW_20;
bool failed = false;
- cancel_delayed_work_sync(&dev->mac_work);
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
mutex_lock(&dev->mt76.mutex);
set_bit(MT76_RESET, &dev->mt76.state);
+ mt7603_beacon_set_timer(dev, -1, 0);
mt76_set_channel(&dev->mt76);
mt7603_mac_stop(dev);
@@ -171,7 +173,7 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
mt76_txq_schedule_all(&dev->mt76);
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
MT7603_WATCHDOG_TIME);
/* reset channel stats */
@@ -186,10 +188,14 @@ mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def)
mt7603_init_edcca(dev);
out:
+ if (!(mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+ mt7603_beacon_set_timer(dev, -1, dev->mt76.beacon_int);
mutex_unlock(&dev->mt76.mutex);
+ tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
+
if (failed)
- mt7603_mac_work(&dev->mac_work.work);
+ mt7603_mac_work(&dev->mt76.mac_work.work);
return ret;
}
@@ -294,9 +300,9 @@ mt7603_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON_INT)) {
int beacon_int = !!info->enable_beacon * info->beacon_int;
- tasklet_disable(&dev->pre_tbtt_tasklet);
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
mt7603_beacon_set_timer(dev, mvif->idx, beacon_int);
- tasklet_enable(&dev->pre_tbtt_tasklet);
+ tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
}
mutex_unlock(&dev->mt76.mutex);
@@ -492,7 +498,7 @@ mt7603_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
u16 cw_max = (1 << 10) - 1;
u32 val;
- queue = dev->mt76.q_tx[queue].hw_idx;
+ queue = dev->mt76.q_tx[queue].q->hw_idx;
if (params->cw_min)
cw_min = params->cw_min;
@@ -535,7 +541,6 @@ mt7603_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt7603_dev *dev = hw->priv;
set_bit(MT76_SCANNING, &dev->mt76.state);
- mt7603_beacon_set_timer(dev, -1, 0);
}
static void
@@ -544,7 +549,6 @@ mt7603_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
struct mt7603_dev *dev = hw->priv;
clear_bit(MT76_SCANNING, &dev->mt76.state);
- mt7603_beacon_set_timer(dev, -1, dev->beacon_int);
}
static void
@@ -593,7 +597,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
break;
case IEEE80211_AMPDU_TX_START:
- mtxq->agg_ssn = *ssn << 4;
+ mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(*ssn);
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
@@ -664,12 +668,6 @@ static void mt7603_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *cont
mt76_tx(&dev->mt76, control->sta, wcid, skb);
}
-static int
-mt7603_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
-{
- return 0;
-}
-
const struct ieee80211_ops mt7603_ops = {
.tx = mt7603_tx,
.start = mt7603_start,
@@ -691,7 +689,7 @@ const struct ieee80211_ops mt7603_ops = {
.sta_rate_tbl_update = mt7603_sta_rate_tbl_update,
.release_buffered_frames = mt7603_release_buffered_frames,
.set_coverage_class = mt7603_set_coverage_class,
- .set_tim = mt7603_set_tim,
+ .set_tim = mt76_set_tim,
.get_survey = mt76_get_survey,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
index d06905ea8cc6..6357b5658a32 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
@@ -14,17 +14,14 @@ struct mt7603_fw_trailer {
} __packed;
static int
-__mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd,
- int query, int *wait_seq)
+__mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb,
+ int cmd, int *wait_seq)
{
int hdrlen = dev->mcu_running ? sizeof(struct mt7603_mcu_txd) : 12;
struct mt76_dev *mdev = &dev->mt76;
struct mt7603_mcu_txd *txd;
u8 seq;
- if (!skb)
- return -EINVAL;
-
seq = ++mdev->mmio.mcu.msg_seq & 0xf;
if (!seq)
seq = ++mdev->mmio.mcu.msg_seq & 0xf;
@@ -42,15 +39,14 @@ __mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd,
if (cmd < 0) {
txd->cid = -cmd;
+ txd->set_query = MCU_Q_NA;
} else {
txd->cid = MCU_CMD_EXT_CID;
txd->ext_cid = cmd;
- if (query != MCU_Q_NA)
- txd->ext_cid_ack = 1;
+ txd->set_query = MCU_Q_SET;
+ txd->ext_cid_ack = 1;
}
- txd->set_query = query;
-
if (wait_seq)
*wait_seq = seq;
@@ -58,21 +54,26 @@ __mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd,
}
static int
-mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd,
- int query)
+mt7603_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data,
+ int len, bool wait_resp)
{
- struct mt76_dev *mdev = &dev->mt76;
+ struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
unsigned long expires = jiffies + 3 * HZ;
struct mt7603_mcu_rxd *rxd;
+ struct sk_buff *skb;
int ret, seq;
+ skb = mt7603_mcu_msg_alloc(data, len);
+ if (!skb)
+ return -ENOMEM;
+
mutex_lock(&mdev->mmio.mcu.mutex);
- ret = __mt7603_mcu_msg_send(dev, skb, cmd, query, &seq);
+ ret = __mt7603_mcu_msg_send(dev, skb, cmd, &seq);
if (ret)
goto out;
- while (1) {
+ while (wait_resp) {
bool check_seq = false;
skb = mt76_mcu_get_response(&dev->mt76, expires);
@@ -113,28 +114,22 @@ mt7603_mcu_init_download(struct mt7603_dev *dev, u32 addr, u32 len)
.len = cpu_to_le32(len),
.mode = cpu_to_le32(BIT(31)),
};
- struct sk_buff *skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
- return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_TARGET_ADDRESS_LEN_REQ,
- MCU_Q_NA);
+ return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_TARGET_ADDRESS_LEN_REQ,
+ &req, sizeof(req), true);
}
static int
mt7603_mcu_send_firmware(struct mt7603_dev *dev, const void *data, int len)
{
- struct sk_buff *skb;
- int ret = 0;
+ int cur_len, ret = 0;
while (len > 0) {
- int cur_len = min_t(int, 4096 - sizeof(struct mt7603_mcu_txd),
- len);
-
- skb = mt7603_mcu_msg_alloc(data, cur_len);
- if (!skb)
- return -ENOMEM;
+ cur_len = min_t(int, 4096 - sizeof(struct mt7603_mcu_txd),
+ len);
- ret = __mt7603_mcu_msg_send(dev, skb, -MCU_CMD_FW_SCATTER,
- MCU_Q_NA, NULL);
+ ret = __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_SCATTER,
+ data, cur_len, false);
if (ret)
break;
@@ -155,23 +150,19 @@ mt7603_mcu_start_firmware(struct mt7603_dev *dev, u32 addr)
.override = cpu_to_le32(addr ? 1 : 0),
.addr = cpu_to_le32(addr),
};
- struct sk_buff *skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
- return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_FW_START_REQ,
- MCU_Q_NA);
+ return __mt76_mcu_send_msg(&dev->mt76, -MCU_CMD_FW_START_REQ,
+ &req, sizeof(req), true);
}
static int
-mt7603_mcu_restart(struct mt7603_dev *dev)
+mt7603_mcu_restart(struct mt76_dev *dev)
{
- struct sk_buff *skb = mt7603_mcu_msg_alloc(NULL, 0);
-
- return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_RESTART_DL_REQ,
- MCU_Q_NA);
+ return __mt76_mcu_send_msg(dev, -MCU_CMD_RESTART_DL_REQ,
+ NULL, 0, true);
}
-static int
-mt7603_load_firmware(struct mt7603_dev *dev)
+static int mt7603_load_firmware(struct mt7603_dev *dev)
{
const struct firmware *fw;
const struct mt7603_fw_trailer *hdr;
@@ -261,6 +252,9 @@ running:
mt76_clear(dev, MT_SCH_4, BIT(8));
dev->mcu_running = true;
+ snprintf(dev->mt76.hw->wiphy->fw_version,
+ sizeof(dev->mt76.hw->wiphy->fw_version),
+ "%.10s-%.15s", hdr->fw_ver, hdr->build_date);
dev_info(dev->mt76.dev, "firmware init done\n");
out:
@@ -271,14 +265,18 @@ out:
int mt7603_mcu_init(struct mt7603_dev *dev)
{
- mutex_init(&dev->mt76.mmio.mcu.mutex);
+ static const struct mt76_mcu_ops mt7603_mcu_ops = {
+ .mcu_send_msg = mt7603_mcu_msg_send,
+ .mcu_restart = mt7603_mcu_restart,
+ };
+ dev->mt76.mcu_ops = &mt7603_mcu_ops;
return mt7603_load_firmware(dev);
}
void mt7603_mcu_exit(struct mt7603_dev *dev)
{
- mt7603_mcu_restart(dev);
+ __mt76_mcu_restart(&dev->mt76);
skb_queue_purge(&dev->mt76.mmio.mcu.res_q);
}
@@ -360,27 +358,30 @@ int mt7603_mcu_set_eeprom(struct mt7603_dev *dev)
.buffer_mode = 1,
.len = ARRAY_SIZE(req_fields) - 1,
};
- struct sk_buff *skb;
- struct req_data *data;
const int size = 0xff * sizeof(struct req_data);
- u8 *eep = (u8 *)dev->mt76.eeprom.data;
- int i;
+ u8 *req, *eep = (u8 *)dev->mt76.eeprom.data;
+ int i, ret, len = sizeof(req_hdr) + size;
+ struct req_data *data;
BUILD_BUG_ON(ARRAY_SIZE(req_fields) * sizeof(*data) > size);
- skb = mt7603_mcu_msg_alloc(NULL, size + sizeof(req_hdr));
- memcpy(skb_put(skb, sizeof(req_hdr)), &req_hdr, sizeof(req_hdr));
- data = (struct req_data *)skb_put(skb, size);
- memset(data, 0, size);
+ req = kmalloc(len, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+ memcpy(req, &req_hdr, sizeof(req_hdr));
+ data = (struct req_data *)(req + sizeof(req_hdr));
+ memset(data, 0, size);
for (i = 0; i < ARRAY_SIZE(req_fields); i++) {
data[i].addr = cpu_to_le16(req_fields[i]);
data[i].val = eep[req_fields[i]];
- data[i].pad = 0;
}
- return mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
- MCU_Q_SET);
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
+ req, len, true);
+ kfree(req);
+
+ return ret;
}
static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev)
@@ -415,7 +416,6 @@ static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev)
},
#undef EEP_VAL
};
- struct sk_buff *skb;
u8 *eep = (u8 *)dev->mt76.eeprom.data;
memcpy(req.rate_power_delta, eep + MT_EE_TX_POWER_CCK,
@@ -424,9 +424,8 @@ static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev)
memcpy(req.temp_comp_power, eep + MT_EE_STEP_NUM_NEG_6_7,
sizeof(req.temp_comp_power));
- skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
- return mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_SET_TX_POWER_CTRL,
- MCU_Q_SET);
+ return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_TX_POWER_CTRL,
+ &req, sizeof(req), true);
}
int mt7603_mcu_set_channel(struct mt7603_dev *dev)
@@ -450,10 +449,8 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
.tx_streams = n_chains,
.rx_streams = n_chains,
};
- struct sk_buff *skb;
s8 tx_power;
- int ret;
- int i;
+ int i, ret;
if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_40) {
req.bw = MT_BW_40;
@@ -473,9 +470,8 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
for (i = 0; i < ARRAY_SIZE(req.txpower); i++)
req.txpower[i] = tx_power;
- skb = mt7603_mcu_msg_alloc(&req, sizeof(req));
- ret = mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_CHANNEL_SWITCH,
- MCU_Q_SET);
+ ret = __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_CHANNEL_SWITCH,
+ &req, sizeof(req), true);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
index 6049f3b7c8fe..fa64bbaab0d2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -109,7 +109,6 @@ struct mt7603_dev {
ktime_t survey_time;
ktime_t ed_time;
- int beacon_int;
struct mt76_queue q_rx;
@@ -126,8 +125,6 @@ struct mt7603_dev {
s8 sensitivity;
- u8 beacon_mask;
-
u8 beacon_check;
u8 tx_hang_check;
u8 tx_dma_check;
@@ -143,10 +140,6 @@ struct mt7603_dev {
u32 reset_test;
unsigned int reset_cause[__RESET_CAUSE_MAX];
-
- struct delayed_work mac_work;
- struct tasklet_struct tx_tasklet;
- struct tasklet_struct pre_tbtt_tasklet;
};
extern const struct mt76_driver_ops mt7603_drv_ops;
@@ -179,16 +172,14 @@ void mt7603_dma_cleanup(struct mt7603_dev *dev);
int mt7603_mcu_init(struct mt7603_dev *dev);
void mt7603_init_debugfs(struct mt7603_dev *dev);
-void mt7603_set_irq_mask(struct mt7603_dev *dev, u32 clear, u32 set);
-
static inline void mt7603_irq_enable(struct mt7603_dev *dev, u32 mask)
{
- mt7603_set_irq_mask(dev, 0, mask);
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
}
static inline void mt7603_irq_disable(struct mt7603_dev *dev, u32 mask)
{
- mt7603_set_irq_mask(dev, mask, 0);
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
}
void mt7603_mac_dma_start(struct mt7603_dev *dev);
@@ -225,12 +216,12 @@ void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta,
void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort);
int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
- struct sk_buff *skb, struct mt76_queue *q,
- struct mt76_wcid *wcid, struct ieee80211_sta *sta,
- u32 *tx_info);
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
-void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush);
+void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e);
void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
index da6827ae6cee..9d257d5c309d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/regs.h
@@ -233,6 +233,10 @@
#define MT_WF_DMA(ofs) (MT_WF_DMA_BASE + (ofs))
#define MT_DMA_DCR0 MT_WF_DMA(0x000)
+#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 0)
+#define MT_DMA_DCR0_DAMSDU BIT(16)
+#define MT_DMA_DCR0_RX_VEC_DROP BIT(17)
+
#define MT_DMA_DCR1 MT_WF_DMA(0x004)
#define MT_DMA_FQCR0 MT_WF_DMA(0x008)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
new file mode 100644
index 000000000000..3b8aba09bd5e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
@@ -0,0 +1,7 @@
+config MT7615E
+ tristate "MediaTek MT7615E (PCIe) support"
+ select MT76_CORE
+ depends on MAC80211
+ depends on PCI
+ help
+ This adds support for MT7615-based wireless PCIe devices.
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
new file mode 100644
index 000000000000..6397552f6ee3
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
@@ -0,0 +1,5 @@
+#SPDX-License-Identifier: ISC
+
+obj-$(CONFIG_MT7615E) += mt7615e.o
+
+mt7615e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
new file mode 100644
index 000000000000..3ec6582afd8f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ * Roy Luo <royluo@google.com>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
+ * Felix Fietkau <nbd@nbd.name>
+ */
+
+#include "mt7615.h"
+#include "../dma.h"
+#include "mac.h"
+
+static int
+mt7615_init_tx_queues(struct mt7615_dev *dev, int n_desc)
+{
+ struct mt76_sw_queue *q;
+ struct mt76_queue *hwq;
+ int err, i;
+
+ hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
+ if (!hwq)
+ return -ENOMEM;
+
+ err = mt76_queue_alloc(dev, hwq, 0, n_desc, 0, MT_TX_RING_BASE);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < MT_TXQ_MCU; i++) {
+ q = &dev->mt76.q_tx[i];
+ INIT_LIST_HEAD(&q->swq);
+ q->q = hwq;
+ }
+
+ return 0;
+}
+
+static int
+mt7615_init_mcu_queue(struct mt7615_dev *dev, struct mt76_sw_queue *q,
+ int idx, int n_desc)
+{
+ struct mt76_queue *hwq;
+ int err;
+
+ hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
+ if (!hwq)
+ return -ENOMEM;
+
+ err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
+ if (err < 0)
+ return err;
+
+ INIT_LIST_HEAD(&q->swq);
+ q->q = hwq;
+
+ return 0;
+}
+
+void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ __le32 *rxd = (__le32 *)skb->data;
+ __le32 *end = (__le32 *)&skb->data[skb->len];
+ enum rx_pkt_type type;
+
+ type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+
+ switch (type) {
+ case PKT_TYPE_TXS:
+ for (rxd++; rxd + 7 <= end; rxd += 7)
+ mt7615_mac_add_txs(dev, rxd);
+ dev_kfree_skb(skb);
+ break;
+ case PKT_TYPE_TXRX_NOTIFY:
+ mt7615_mac_tx_free(dev, skb);
+ break;
+ case PKT_TYPE_RX_EVENT:
+ mt76_mcu_rx_event(&dev->mt76, skb);
+ break;
+ case PKT_TYPE_NORMAL:
+ if (!mt7615_mac_fill_rx(dev, skb)) {
+ mt76_rx(&dev->mt76, q, skb);
+ return;
+ }
+ /* fall through */
+ default:
+ dev_kfree_skb(skb);
+ break;
+ }
+}
+
+static void mt7615_tx_tasklet(unsigned long data)
+{
+ struct mt7615_dev *dev = (struct mt7615_dev *)data;
+ static const u8 queue_map[] = {
+ MT_TXQ_MCU,
+ MT_TXQ_BE
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(queue_map); i++)
+ mt76_queue_tx_cleanup(dev, queue_map[i], false);
+
+ mt76_txq_schedule_all(&dev->mt76);
+
+ mt7615_irq_enable(dev, MT_INT_TX_DONE_ALL);
+}
+
+int mt7615_dma_init(struct mt7615_dev *dev)
+{
+ int ret;
+
+ mt76_dma_attach(&dev->mt76);
+
+ tasklet_init(&dev->mt76.tx_tasklet, mt7615_tx_tasklet,
+ (unsigned long)dev);
+
+ mt76_wr(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE |
+ MT_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN |
+ MT_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY |
+ MT_WPDMA_GLO_CFG_OMIT_TX_INFO);
+
+ mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT0, 0x1);
+
+ mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT21, 0x1);
+
+ mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 0x3);
+
+ mt76_rmw_field(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_MULTI_DMA_EN, 0x3);
+
+ mt76_wr(dev, MT_WPDMA_GLO_CFG1, 0x1);
+ mt76_wr(dev, MT_WPDMA_TX_PRE_CFG, 0xf0000);
+ mt76_wr(dev, MT_WPDMA_RX_PRE_CFG, 0xf7f0000);
+ mt76_wr(dev, MT_WPDMA_ABT_CFG, 0x4000026);
+ mt76_wr(dev, MT_WPDMA_ABT_CFG1, 0x18811881);
+ mt76_set(dev, 0x7158, BIT(16));
+ mt76_clear(dev, 0x7000, BIT(23));
+ mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+
+ ret = mt7615_init_tx_queues(dev, MT7615_TX_RING_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt7615_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+ MT7615_TXQ_MCU,
+ MT7615_TX_MCU_RING_SIZE);
+ if (ret)
+ return ret;
+
+ ret = mt7615_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_FWDL],
+ MT7615_TXQ_FWDL,
+ MT7615_TX_FWDL_RING_SIZE);
+ if (ret)
+ return ret;
+
+ /* init rx queues */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
+ MT7615_RX_MCU_RING_SIZE, MT_RX_BUF_SIZE,
+ MT_RX_RING_BASE);
+ if (ret)
+ return ret;
+
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
+ MT7615_RX_RING_SIZE, MT_RX_BUF_SIZE,
+ MT_RX_RING_BASE);
+ if (ret)
+ return ret;
+
+ mt76_wr(dev, MT_DELAY_INT_CFG, 0);
+
+ ret = mt76_init_queues(dev);
+ if (ret < 0)
+ return ret;
+
+ mt76_poll(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
+ MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 1000);
+
+ /* start dma engine */
+ mt76_set(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_RX_DMA_EN);
+
+ /* enable interrupts for TX/RX rings */
+ mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL);
+
+ return 0;
+}
+
+void mt7615_dma_cleanup(struct mt7615_dev *dev)
+{
+ mt76_clear(dev, MT_WPDMA_GLO_CFG,
+ MT_WPDMA_GLO_CFG_TX_DMA_EN |
+ MT_WPDMA_GLO_CFG_RX_DMA_EN);
+ mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET);
+
+ tasklet_kill(&dev->mt76.tx_tasklet);
+ mt76_dma_cleanup(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
new file mode 100644
index 000000000000..dd5ab46a4f66
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ * Felix Fietkau <nbd@nbd.name>
+ */
+
+#include "mt7615.h"
+#include "eeprom.h"
+
+static int mt7615_efuse_read(struct mt7615_dev *dev, u32 base,
+ u16 addr, u8 *data)
+{
+ u32 val;
+ int i;
+
+ val = mt76_rr(dev, base + MT_EFUSE_CTRL);
+ val &= ~(MT_EFUSE_CTRL_AIN | MT_EFUSE_CTRL_MODE);
+ val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf);
+ val |= MT_EFUSE_CTRL_KICK;
+ mt76_wr(dev, base + MT_EFUSE_CTRL, val);
+
+ if (!mt76_poll(dev, base + MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000))
+ return -ETIMEDOUT;
+
+ udelay(2);
+
+ val = mt76_rr(dev, base + MT_EFUSE_CTRL);
+ if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT ||
+ WARN_ON_ONCE(!(val & MT_EFUSE_CTRL_VALID))) {
+ memset(data, 0x0, 16);
+ return 0;
+ }
+
+ for (i = 0; i < 4; i++) {
+ val = mt76_rr(dev, base + MT_EFUSE_RDATA(i));
+ put_unaligned_le32(val, data + 4 * i);
+ }
+
+ return 0;
+}
+
+static int mt7615_efuse_init(struct mt7615_dev *dev)
+{
+ u32 base = mt7615_reg_map(dev, MT_EFUSE_BASE);
+ int len = MT7615_EEPROM_SIZE;
+ int ret, i;
+ void *buf;
+
+ if (mt76_rr(dev, base + MT_EFUSE_BASE_CTRL) & MT_EFUSE_BASE_CTRL_EMPTY)
+ return -EINVAL;
+
+ dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, len, GFP_KERNEL);
+ dev->mt76.otp.size = len;
+ if (!dev->mt76.otp.data)
+ return -ENOMEM;
+
+ buf = dev->mt76.otp.data;
+ for (i = 0; i + 16 <= len; i += 16) {
+ ret = mt7615_efuse_read(dev, base, i, buf + i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mt7615_eeprom_load(struct mt7615_dev *dev)
+{
+ int ret;
+
+ ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_SIZE);
+ if (ret < 0)
+ return ret;
+
+ return mt7615_efuse_init(dev);
+}
+
+int mt7615_eeprom_init(struct mt7615_dev *dev)
+{
+ int ret;
+
+ ret = mt7615_eeprom_load(dev);
+ if (ret < 0)
+ return ret;
+
+ memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data, MT7615_EEPROM_SIZE);
+
+ dev->mt76.cap.has_2ghz = true;
+ dev->mt76.cap.has_5ghz = true;
+
+ memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
+ ETH_ALEN);
+
+ mt76_eeprom_override(&dev->mt76);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
new file mode 100644
index 000000000000..a4cf16688171
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2019 MediaTek Inc. */
+
+#ifndef __MT7615_EEPROM_H
+#define __MT7615_EEPROM_H
+
+#include "mt7615.h"
+
+enum mt7615_eeprom_field {
+ MT_EE_CHIP_ID = 0x000,
+ MT_EE_VERSION = 0x002,
+ MT_EE_MAC_ADDR = 0x004,
+ MT_EE_NIC_CONF_0 = 0x034,
+
+ __MT_EE_MAX = 0x3bf
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
new file mode 100644
index 000000000000..3ab3ff553ef2
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Roy Luo <royluo@google.com>
+ * Ryder Lee <ryder.lee@mediatek.com>
+ * Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/etherdevice.h>
+#include "mt7615.h"
+#include "mac.h"
+
+static void mt7615_phy_init(struct mt7615_dev *dev)
+{
+ /* disable band 0 rf low power beacon mode */
+ mt76_rmw(dev, MT_WF_PHY_WF2_RFCTRL0, MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN,
+ MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN);
+}
+
+static void mt7615_mac_init(struct mt7615_dev *dev)
+{
+ /* enable band 0 clk */
+ mt76_rmw(dev, MT_CFG_CCR,
+ MT_CFG_CCR_MAC_D0_1X_GC_EN | MT_CFG_CCR_MAC_D0_2X_GC_EN,
+ MT_CFG_CCR_MAC_D0_1X_GC_EN | MT_CFG_CCR_MAC_D0_2X_GC_EN);
+
+ mt76_rmw_field(dev, MT_TMAC_CTCR0,
+ MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
+ mt76_rmw_field(dev, MT_TMAC_CTCR0,
+ MT_TMAC_CTCR0_INS_DDLMT_DENSITY, 0x3);
+ mt76_rmw(dev, MT_TMAC_CTCR0,
+ MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
+ MT_TMAC_CTCR0_INS_DDLMT_EN,
+ MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
+ MT_TMAC_CTCR0_INS_DDLMT_EN);
+
+ mt7615_mcu_set_rts_thresh(dev, 0x92b);
+
+ mt76_rmw(dev, MT_AGG_SCR, MT_AGG_SCR_NLNAV_MID_PTEC_DIS,
+ MT_AGG_SCR_NLNAV_MID_PTEC_DIS);
+
+ mt7615_mcu_init_mac(dev);
+
+ mt76_wr(dev, MT_DMA_DCR0, MT_DMA_DCR0_RX_VEC_DROP |
+ FIELD_PREP(MT_DMA_DCR0_MAX_RX_LEN, 3072));
+
+ mt76_wr(dev, MT_AGG_ARUCR, FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 7));
+ mt76_wr(dev, MT_AGG_ARDCR,
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 0) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(1),
+ max_t(int, 0, MT7615_RATE_RETRY - 2)) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), MT7615_RATE_RETRY - 1) |
+ FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), MT7615_RATE_RETRY - 1));
+
+ mt76_wr(dev, MT_AGG_ARCR,
+ (MT_AGG_ARCR_INIT_RATE1 |
+ FIELD_PREP(MT_AGG_ARCR_RTS_RATE_THR, 2) |
+ MT_AGG_ARCR_RATE_DOWN_RATIO_EN |
+ FIELD_PREP(MT_AGG_ARCR_RATE_DOWN_RATIO, 1) |
+ FIELD_PREP(MT_AGG_ARCR_RATE_UP_EXTRA_TH, 4)));
+
+ dev->mt76.global_wcid.idx = MT7615_WTBL_RESERVED;
+ dev->mt76.global_wcid.hw_key_idx = -1;
+ rcu_assign_pointer(dev->mt76.wcid[MT7615_WTBL_RESERVED],
+ &dev->mt76.global_wcid);
+}
+
+static int mt7615_init_hardware(struct mt7615_dev *dev)
+{
+ int ret;
+
+ mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
+
+ spin_lock_init(&dev->token_lock);
+ idr_init(&dev->token);
+
+ ret = mt7615_eeprom_init(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = mt7615_dma_init(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
+
+ ret = mt7615_mcu_init(dev);
+ if (ret)
+ return ret;
+
+ mt7615_mcu_set_eeprom(dev);
+ mt7615_mac_init(dev);
+ mt7615_phy_init(dev);
+ mt7615_mcu_ctrl_pm_state(dev, 0);
+ mt7615_mcu_del_wtbl_all(dev);
+
+ return 0;
+}
+
+#define CCK_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \
+ .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \
+ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + (_idx)), \
+}
+
+#define OFDM_RATE(_idx, _rate) { \
+ .bitrate = _rate, \
+ .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
+ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \
+}
+
+static struct ieee80211_rate mt7615_rates[] = {
+ CCK_RATE(0, 10),
+ CCK_RATE(1, 20),
+ CCK_RATE(2, 55),
+ CCK_RATE(3, 110),
+ OFDM_RATE(11, 60),
+ OFDM_RATE(15, 90),
+ OFDM_RATE(10, 120),
+ OFDM_RATE(14, 180),
+ OFDM_RATE(9, 240),
+ OFDM_RATE(13, 360),
+ OFDM_RATE(8, 480),
+ OFDM_RATE(12, 540),
+};
+
+static const struct ieee80211_iface_limit if_limits[] = {
+ {
+ .max = MT7615_MAX_INTERFACES,
+ .types = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_STATION)
+ }
+};
+
+static const struct ieee80211_iface_combination if_comb[] = {
+ {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = 4,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ }
+};
+
+static int mt7615_init_debugfs(struct mt7615_dev *dev)
+{
+ struct dentry *dir;
+
+ dir = mt76_register_debugfs(&dev->mt76);
+ if (!dir)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int mt7615_register_device(struct mt7615_dev *dev)
+{
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct wiphy *wiphy = hw->wiphy;
+ int ret;
+
+ ret = mt7615_init_hardware(dev);
+ if (ret)
+ return ret;
+
+ INIT_DELAYED_WORK(&dev->mt76.mac_work, mt7615_mac_work);
+
+ hw->queues = 4;
+ hw->max_rates = 3;
+ hw->max_report_rates = 7;
+ hw->max_rate_tries = 11;
+
+ hw->sta_data_size = sizeof(struct mt7615_sta);
+ hw->vif_data_size = sizeof(struct mt7615_vif);
+
+ wiphy->iface_combinations = if_comb;
+ wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+
+ ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+ ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN);
+
+ dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ dev->mt76.sband_5g.sband.vht_cap.cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
+ dev->mt76.chainmask = 0x404;
+ dev->mt76.antenna_mask = 0xf;
+
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP);
+
+ ret = mt76_register_device(&dev->mt76, true, mt7615_rates,
+ ARRAY_SIZE(mt7615_rates));
+ if (ret)
+ return ret;
+
+ hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM;
+
+ return mt7615_init_debugfs(dev);
+}
+
+void mt7615_unregister_device(struct mt7615_dev *dev)
+{
+ struct mt76_txwi_cache *txwi;
+ int id;
+
+ spin_lock_bh(&dev->token_lock);
+ idr_for_each_entry(&dev->token, txwi, id) {
+ mt7615_txp_skb_unmap(&dev->mt76, txwi);
+ if (txwi->skb)
+ dev_kfree_skb_any(txwi->skb);
+ mt76_put_txwi(&dev->mt76, txwi);
+ }
+ spin_unlock_bh(&dev->token_lock);
+ idr_destroy(&dev->token);
+ mt76_unregister_device(&dev->mt76);
+ mt7615_mcu_exit(dev);
+ mt7615_dma_cleanup(dev);
+
+ ieee80211_free_hw(mt76_hw(dev));
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
new file mode 100644
index 000000000000..b8f48d10f27a
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -0,0 +1,775 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ * Roy Luo <royluo@google.com>
+ * Felix Fietkau <nbd@nbd.name>
+ * Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/timekeeping.h>
+#include "mt7615.h"
+#include "../dma.h"
+#include "mac.h"
+
+static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
+ u8 idx, bool unicast)
+{
+ struct mt7615_sta *sta;
+ struct mt76_wcid *wcid;
+
+ if (idx >= ARRAY_SIZE(dev->mt76.wcid))
+ return NULL;
+
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ if (unicast || !wcid)
+ return wcid;
+
+ if (!wcid->sta)
+ return NULL;
+
+ sta = container_of(wcid, struct mt7615_sta, wcid);
+ if (!sta->vif)
+ return NULL;
+
+ return &sta->vif->sta.wcid;
+}
+
+static int mt7615_get_rate(struct mt7615_dev *dev,
+ struct ieee80211_supported_band *sband,
+ int idx, bool cck)
+{
+ int offset = 0;
+ int len = sband->n_bitrates;
+ int i;
+
+ if (cck) {
+ if (sband == &dev->mt76.sband_5g.sband)
+ return 0;
+
+ idx &= ~BIT(2); /* short preamble */
+ } else if (sband == &dev->mt76.sband_2g.sband) {
+ offset = 4;
+ }
+
+ for (i = offset; i < len; i++) {
+ if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
+ return i;
+ }
+
+ return 0;
+}
+
+static void mt7615_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
+ u8 *pn = status->iv;
+ u8 *hdr;
+
+ __skb_push(skb, 8);
+ memmove(skb->data, skb->data + 8, hdr_len);
+ hdr = skb->data + hdr_len;
+
+ hdr[0] = pn[5];
+ hdr[1] = pn[4];
+ hdr[2] = 0;
+ hdr[3] = 0x20 | (key_id << 6);
+ hdr[4] = pn[3];
+ hdr[5] = pn[2];
+ hdr[6] = pn[1];
+ hdr[7] = pn[0];
+
+ status->flag &= ~RX_FLAG_IV_STRIPPED;
+}
+
+int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_hdr *hdr;
+ __le32 *rxd = (__le32 *)skb->data;
+ u32 rxd0 = le32_to_cpu(rxd[0]);
+ u32 rxd1 = le32_to_cpu(rxd[1]);
+ u32 rxd2 = le32_to_cpu(rxd[2]);
+ bool unicast, remove_pad, insert_ccmp_hdr = false;
+ int i, idx;
+
+ memset(status, 0, sizeof(*status));
+
+ unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M;
+ idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2);
+ status->wcid = mt7615_rx_get_wcid(dev, idx, unicast);
+
+ /* TODO: properly support DBDC */
+ status->freq = dev->mt76.chandef.chan->center_freq;
+ status->band = dev->mt76.chandef.chan->band;
+ if (status->band == NL80211_BAND_5GHZ)
+ sband = &dev->mt76.sband_5g.sband;
+ else
+ sband = &dev->mt76.sband_2g.sband;
+
+ if (rxd2 & MT_RXD2_NORMAL_FCS_ERR)
+ status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
+ !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
+ }
+
+ remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET;
+
+ if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
+ return -EINVAL;
+
+ if (!sband->channels)
+ return -EINVAL;
+
+ rxd += 4;
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_4) {
+ rxd += 4;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_1) {
+ u8 *data = (u8 *)rxd;
+
+ if (status->flag & RX_FLAG_DECRYPTED) {
+ status->iv[0] = data[5];
+ status->iv[1] = data[4];
+ status->iv[2] = data[3];
+ status->iv[3] = data[2];
+ status->iv[4] = data[1];
+ status->iv[5] = data[0];
+
+ insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+ }
+ rxd += 4;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_2) {
+ rxd += 2;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
+ u32 rxdg0 = le32_to_cpu(rxd[0]);
+ u32 rxdg1 = le32_to_cpu(rxd[1]);
+ u8 stbc = FIELD_GET(MT_RXV1_HT_STBC, rxdg0);
+ bool cck = false;
+
+ i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0);
+ switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ /* fall through */
+ case MT_PHY_TYPE_OFDM:
+ i = mt7615_get_rate(dev, sband, i, cck);
+ break;
+ case MT_PHY_TYPE_HT_GF:
+ case MT_PHY_TYPE_HT:
+ status->encoding = RX_ENC_HT;
+ if (i > 31)
+ return -EINVAL;
+ break;
+ case MT_PHY_TYPE_VHT:
+ status->nss = FIELD_GET(MT_RXV2_NSTS, rxdg1) + 1;
+ status->encoding = RX_ENC_VHT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ status->rate_idx = i;
+
+ switch (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0)) {
+ case MT_PHY_BW_20:
+ break;
+ case MT_PHY_BW_40:
+ status->bw = RATE_INFO_BW_40;
+ break;
+ case MT_PHY_BW_80:
+ status->bw = RATE_INFO_BW_80;
+ break;
+ case MT_PHY_BW_160:
+ status->bw = RATE_INFO_BW_160;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rxdg0 & MT_RXV1_HT_SHORT_GI)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (rxdg0 & MT_RXV1_HT_AD_CODE)
+ status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
+
+ /* TODO: RSSI */
+ rxd += 6;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad);
+
+ if (insert_ccmp_hdr) {
+ u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
+
+ mt7615_insert_ccmp_hdr(skb, key_id);
+ }
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control))
+ return 0;
+
+ status->aggr = unicast &&
+ !ieee80211_is_qos_nullfunc(hdr->frame_control);
+ status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ status->seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+
+ return 0;
+}
+
+void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
+{
+}
+
+void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e)
+{
+ if (!e->txwi) {
+ dev_kfree_skb_any(e->skb);
+ return;
+ }
+
+ /* error path */
+ if (e->skb == DMA_DUMMY_DATA) {
+ struct mt76_txwi_cache *t;
+ struct mt7615_dev *dev;
+ struct mt7615_txp *txp;
+ u8 *txwi_ptr;
+
+ txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi);
+ txp = (struct mt7615_txp *)(txwi_ptr + MT_TXD_SIZE);
+ dev = container_of(mdev, struct mt7615_dev, mt76);
+
+ spin_lock_bh(&dev->token_lock);
+ t = idr_remove(&dev->token, le16_to_cpu(txp->token));
+ spin_unlock_bh(&dev->token_lock);
+ e->skb = t ? t->skb : NULL;
+ }
+
+ if (e->skb)
+ mt76_tx_complete_skb(mdev, e->skb);
+}
+
+u16 mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
+ const struct ieee80211_tx_rate *rate,
+ bool stbc, u8 *bw)
+{
+ u8 phy, nss, rate_idx;
+ u16 rateval;
+
+ *bw = 0;
+
+ if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
+ rate_idx = ieee80211_rate_get_vht_mcs(rate);
+ nss = ieee80211_rate_get_vht_nss(rate);
+ phy = MT_PHY_TYPE_VHT;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ *bw = 1;
+ else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ *bw = 2;
+ else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+ *bw = 3;
+ } else if (rate->flags & IEEE80211_TX_RC_MCS) {
+ rate_idx = rate->idx;
+ nss = 1 + (rate->idx >> 3);
+ phy = MT_PHY_TYPE_HT;
+ if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD)
+ phy = MT_PHY_TYPE_HT_GF;
+ if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ *bw = 1;
+ } else {
+ const struct ieee80211_rate *r;
+ int band = dev->mt76.chandef.chan->band;
+ u16 val;
+
+ nss = 1;
+ r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx];
+ if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
+ val = r->hw_value_short;
+ else
+ val = r->hw_value;
+
+ phy = val >> 8;
+ rate_idx = val & 0xff;
+ }
+
+ rateval = (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
+ FIELD_PREP(MT_TX_RATE_MODE, phy) |
+ FIELD_PREP(MT_TX_RATE_NSS, nss - 1));
+
+ if (stbc && nss == 1)
+ rateval |= MT_TX_RATE_STBC;
+
+ return rateval;
+}
+
+int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta, int pid,
+ struct ieee80211_key_conf *key)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_rate *rate = &info->control.rates[0];
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_vif *vif = info->control.vif;
+ int tx_count = 8;
+ u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0;
+ __le16 fc = hdr->frame_control;
+ u16 seqno = 0;
+ u32 val;
+
+ if (vif) {
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+
+ omac_idx = mvif->omac_idx;
+ }
+
+ if (sta) {
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+
+ tx_count = msta->rate_count;
+ }
+
+ fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
+ fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
+
+ if (ieee80211_is_data(fc)) {
+ q_idx = skb_get_queue_mapping(skb);
+ p_fmt = MT_TX_TYPE_CT;
+ } else if (ieee80211_is_beacon(fc)) {
+ q_idx = MT_LMAC_BCN0;
+ p_fmt = MT_TX_TYPE_FW;
+ } else {
+ q_idx = MT_LMAC_ALTX0;
+ p_fmt = MT_TX_TYPE_CT;
+ }
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
+ FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_LMAC) |
+ FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
+ txwi[0] = cpu_to_le32(val);
+
+ val = MT_TXD1_LONG_FORMAT |
+ FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
+ FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
+ FIELD_PREP(MT_TXD1_HDR_INFO,
+ ieee80211_get_hdrlen_from_skb(skb) / 2) |
+ FIELD_PREP(MT_TXD1_TID,
+ skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
+ FIELD_PREP(MT_TXD1_PKT_FMT, p_fmt) |
+ FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
+ txwi[1] = cpu_to_le32(val);
+
+ val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
+ FIELD_PREP(MT_TXD2_MULTICAST,
+ is_multicast_ether_addr(hdr->addr1));
+ txwi[2] = cpu_to_le32(val);
+
+ if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
+ txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
+
+ txwi[4] = 0;
+ txwi[6] = 0;
+
+ if (rate->idx >= 0 && rate->count &&
+ !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
+ bool stbc = info->flags & IEEE80211_TX_CTL_STBC;
+ u8 bw;
+ u16 rateval = mt7615_mac_tx_rate_val(dev, rate, stbc, &bw);
+
+ txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
+
+ val = MT_TXD6_FIXED_BW |
+ FIELD_PREP(MT_TXD6_BW, bw) |
+ FIELD_PREP(MT_TXD6_TX_RATE, rateval);
+ txwi[6] |= cpu_to_le32(val);
+
+ if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
+ txwi[6] |= cpu_to_le32(MT_TXD6_SGI);
+
+ if (info->flags & IEEE80211_TX_CTL_LDPC)
+ txwi[6] |= cpu_to_le32(MT_TXD6_LDPC);
+
+ if (!(rate->flags & (IEEE80211_TX_RC_MCS |
+ IEEE80211_TX_RC_VHT_MCS)))
+ txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE);
+
+ tx_count = rate->count;
+ }
+
+ if (!ieee80211_is_beacon(fc)) {
+ val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT |
+ FIELD_PREP(MT_TXD5_PID, pid);
+ txwi[5] = cpu_to_le32(val);
+ } else {
+ txwi[5] = 0;
+ /* use maximum tx count for beacons */
+ tx_count = 0x1f;
+ }
+
+ val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ val |= MT_TXD3_SN_VALID;
+ } else if (ieee80211_is_back_req(hdr->frame_control)) {
+ struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
+
+ seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
+ val |= MT_TXD3_SN_VALID;
+ }
+ val |= FIELD_PREP(MT_TXD3_SEQ, seqno);
+
+ txwi[3] = cpu_to_le32(val);
+
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK);
+
+ if (key)
+ txwi[3] |= cpu_to_le32(MT_TXD3_PROTECT_FRAME);
+
+ txwi[7] = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
+
+ return 0;
+}
+
+void mt7615_txp_skb_unmap(struct mt76_dev *dev,
+ struct mt76_txwi_cache *t)
+{
+ struct mt7615_txp *txp;
+ u8 *txwi;
+ int i;
+
+ txwi = mt76_get_txwi_ptr(dev, t);
+ txp = (struct mt7615_txp *)(txwi + MT_TXD_SIZE);
+ for (i = 1; i < txp->nbuf; i++)
+ dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
+ le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
+}
+
+int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ieee80211_vif *vif = info->control.vif;
+ int i, pid, id, nbuf = tx_info->nbuf - 1;
+ u8 *txwi = (u8 *)txwi_ptr;
+ struct mt76_txwi_cache *t;
+ struct mt7615_txp *txp;
+
+ if (!wcid)
+ wcid = &dev->mt76.global_wcid;
+
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+ spin_lock_bh(&dev->mt76.lock);
+ msta->rate_probe = true;
+ mt7615_mcu_set_rates(dev, msta, &info->control.rates[0],
+ msta->rates);
+ spin_unlock_bh(&dev->mt76.lock);
+ }
+
+ mt7615_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, sta,
+ pid, key);
+
+ txp = (struct mt7615_txp *)(txwi + MT_TXD_SIZE);
+ for (i = 0; i < nbuf; i++) {
+ txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
+ txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
+ }
+ txp->nbuf = nbuf;
+
+ /* pass partial skb header to fw */
+ tx_info->buf[1].len = MT_CT_PARSE_LEN;
+ tx_info->nbuf = MT_CT_DMA_BUF_NUM;
+
+ txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
+
+ if (!key)
+ txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
+
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
+
+ if (vif) {
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+
+ txp->bss_idx = mvif->idx;
+ }
+
+ t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
+ t->skb = tx_info->skb;
+
+ spin_lock_bh(&dev->token_lock);
+ id = idr_alloc(&dev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC);
+ spin_unlock_bh(&dev->token_lock);
+ if (id < 0)
+ return id;
+
+ txp->token = cpu_to_le16(id);
+ txp->rept_wds_wcid = 0xff;
+ tx_info->skb = DMA_DUMMY_DATA;
+
+ return 0;
+}
+
+static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
+ struct ieee80211_tx_info *info, __le32 *txs_data)
+{
+ struct ieee80211_supported_band *sband;
+ int i, idx, count, final_idx = 0;
+ bool fixed_rate, final_mpdu, ack_timeout;
+ bool probe, ampdu, cck = false;
+ u32 final_rate, final_rate_flags, final_nss, txs;
+ u8 pid;
+
+ fixed_rate = info->status.rates[0].count;
+ probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
+
+ txs = le32_to_cpu(txs_data[1]);
+ final_mpdu = txs & MT_TXS1_ACKED_MPDU;
+ ampdu = !fixed_rate && (txs & MT_TXS1_AMPDU);
+
+ txs = le32_to_cpu(txs_data[3]);
+ count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
+
+ txs = le32_to_cpu(txs_data[0]);
+ pid = FIELD_GET(MT_TXS0_PID, txs);
+ final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
+ ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
+
+ if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT))
+ return false;
+
+ if (txs & MT_TXS0_QUEUE_TIMEOUT)
+ return false;
+
+ if (!ack_timeout)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ info->status.ampdu_len = 1;
+ info->status.ampdu_ack_len = !!(info->flags &
+ IEEE80211_TX_STAT_ACK);
+
+ if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
+ info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
+
+ if (fixed_rate && !probe) {
+ info->status.rates[0].count = count;
+ goto out;
+ }
+
+ for (i = 0, idx = 0; i < ARRAY_SIZE(info->status.rates); i++) {
+ int cur_count = min_t(int, count, 2 * MT7615_RATE_RETRY);
+
+ if (!i && probe) {
+ cur_count = 1;
+ } else {
+ info->status.rates[i] = sta->rates[idx];
+ idx++;
+ }
+
+ if (i && info->status.rates[i].idx < 0) {
+ info->status.rates[i - 1].count += count;
+ break;
+ }
+
+ if (!count) {
+ info->status.rates[i].idx = -1;
+ break;
+ }
+
+ info->status.rates[i].count = cur_count;
+ final_idx = i;
+ count -= cur_count;
+ }
+
+out:
+ final_rate_flags = info->status.rates[final_idx].flags;
+
+ switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ /* fall through */
+ case MT_PHY_TYPE_OFDM:
+ if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ)
+ sband = &dev->mt76.sband_5g.sband;
+ else
+ sband = &dev->mt76.sband_2g.sband;
+ final_rate &= MT_TX_RATE_IDX;
+ final_rate = mt7615_get_rate(dev, sband, final_rate, cck);
+ final_rate_flags = 0;
+ break;
+ case MT_PHY_TYPE_HT_GF:
+ case MT_PHY_TYPE_HT:
+ final_rate_flags |= IEEE80211_TX_RC_MCS;
+ final_rate &= MT_TX_RATE_IDX;
+ if (final_rate > 31)
+ return false;
+ break;
+ case MT_PHY_TYPE_VHT:
+ final_nss = FIELD_GET(MT_TX_RATE_NSS, final_rate);
+ final_rate_flags |= IEEE80211_TX_RC_VHT_MCS;
+ final_rate = (final_rate & MT_TX_RATE_IDX) | (final_nss << 4);
+ break;
+ default:
+ return false;
+ }
+
+ info->status.rates[final_idx].idx = final_rate;
+ info->status.rates[final_idx].flags = final_rate_flags;
+
+ return true;
+}
+
+static bool mt7615_mac_add_txs_skb(struct mt7615_dev *dev,
+ struct mt7615_sta *sta, int pid,
+ __le32 *txs_data)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ struct sk_buff_head list;
+ struct sk_buff *skb;
+
+ if (pid < MT_PACKET_ID_FIRST)
+ return false;
+
+ mt76_tx_status_lock(mdev, &list);
+ skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list);
+ if (skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
+ spin_lock_bh(&dev->mt76.lock);
+ if (sta->rate_probe) {
+ mt7615_mcu_set_rates(dev, sta, NULL,
+ sta->rates);
+ sta->rate_probe = false;
+ }
+ spin_unlock_bh(&dev->mt76.lock);
+ }
+
+ if (!mt7615_fill_txs(dev, sta, info, txs_data)) {
+ ieee80211_tx_info_clear_status(info);
+ info->status.rates[0].idx = -1;
+ }
+
+ mt76_tx_status_skb_done(mdev, skb, &list);
+ }
+ mt76_tx_status_unlock(mdev, &list);
+
+ return !!skb;
+}
+
+void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data)
+{
+ struct ieee80211_tx_info info = {};
+ struct ieee80211_sta *sta = NULL;
+ struct mt7615_sta *msta = NULL;
+ struct mt76_wcid *wcid;
+ __le32 *txs_data = data;
+ u32 txs;
+ u8 wcidx;
+ u8 pid;
+
+ txs = le32_to_cpu(txs_data[0]);
+ pid = FIELD_GET(MT_TXS0_PID, txs);
+ txs = le32_to_cpu(txs_data[2]);
+ wcidx = FIELD_GET(MT_TXS2_WCID, txs);
+
+ if (pid == MT_PACKET_ID_NO_ACK)
+ return;
+
+ if (wcidx >= ARRAY_SIZE(dev->mt76.wcid))
+ return;
+
+ rcu_read_lock();
+
+ wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
+ if (!wcid)
+ goto out;
+
+ msta = container_of(wcid, struct mt7615_sta, wcid);
+ sta = wcid_to_sta(wcid);
+
+ if (mt7615_mac_add_txs_skb(dev, msta, pid, txs_data))
+ goto out;
+
+ if (wcidx >= MT7615_WTBL_STA || !sta)
+ goto out;
+
+ if (mt7615_fill_txs(dev, msta, &info, txs_data))
+ ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info);
+
+out:
+ rcu_read_unlock();
+}
+
+void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
+{
+ struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_txwi_cache *txwi;
+ u8 i, count;
+
+ count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
+ for (i = 0; i < count; i++) {
+ spin_lock_bh(&dev->token_lock);
+ txwi = idr_remove(&dev->token, le16_to_cpu(free->token[i]));
+ spin_unlock_bh(&dev->token_lock);
+
+ if (!txwi)
+ continue;
+
+ mt7615_txp_skb_unmap(mdev, txwi);
+ if (txwi->skb) {
+ mt76_tx_complete_skb(mdev, txwi->skb);
+ txwi->skb = NULL;
+ }
+
+ mt76_put_txwi(mdev, txwi);
+ }
+ dev_kfree_skb(skb);
+}
+
+void mt7615_mac_work(struct work_struct *work)
+{
+ struct mt7615_dev *dev;
+
+ dev = (struct mt7615_dev *)container_of(work, struct mt76_dev,
+ mac_work.work);
+
+ mt76_tx_status_check(&dev->mt76, NULL, false);
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+ MT7615_WATCHDOG_TIME);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
new file mode 100644
index 000000000000..18ad4b8a3807
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2019 MediaTek Inc. */
+
+#ifndef __MT7615_MAC_H
+#define __MT7615_MAC_H
+
+#define MT_CT_PARSE_LEN 72
+#define MT_CT_DMA_BUF_NUM 2
+
+#define MT_RXD0_LENGTH GENMASK(15, 0)
+#define MT_RXD0_PKT_TYPE GENMASK(31, 29)
+
+#define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16)
+#define MT_RXD0_NORMAL_IP_SUM BIT(23)
+#define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24)
+#define MT_RXD0_NORMAL_GROUP_1 BIT(25)
+#define MT_RXD0_NORMAL_GROUP_2 BIT(26)
+#define MT_RXD0_NORMAL_GROUP_3 BIT(27)
+#define MT_RXD0_NORMAL_GROUP_4 BIT(28)
+
+enum rx_pkt_type {
+ PKT_TYPE_TXS,
+ PKT_TYPE_TXRXV,
+ PKT_TYPE_NORMAL,
+ PKT_TYPE_RX_DUP_RFB,
+ PKT_TYPE_RX_TMR,
+ PKT_TYPE_RETRIEVE,
+ PKT_TYPE_TXRX_NOTIFY,
+ PKT_TYPE_RX_EVENT
+};
+
+#define MT_RXD1_NORMAL_BSSID GENMASK(31, 26)
+#define MT_RXD1_NORMAL_PAYLOAD_FORMAT GENMASK(25, 24)
+#define MT_RXD1_NORMAL_HDR_TRANS BIT(23)
+#define MT_RXD1_NORMAL_HDR_OFFSET BIT(22)
+#define MT_RXD1_NORMAL_MAC_HDR_LEN GENMASK(21, 16)
+#define MT_RXD1_NORMAL_CH_FREQ GENMASK(15, 8)
+#define MT_RXD1_NORMAL_KEY_ID GENMASK(7, 6)
+#define MT_RXD1_NORMAL_BEACON_UC BIT(5)
+#define MT_RXD1_NORMAL_BEACON_MC BIT(4)
+#define MT_RXD1_NORMAL_BF_REPORT BIT(3)
+#define MT_RXD1_NORMAL_ADDR_TYPE GENMASK(2, 1)
+#define MT_RXD1_NORMAL_BCAST GENMASK(2, 1)
+#define MT_RXD1_NORMAL_MCAST BIT(2)
+#define MT_RXD1_NORMAL_U2M BIT(1)
+#define MT_RXD1_NORMAL_HTC_VLD BIT(0)
+
+#define MT_RXD2_NORMAL_NON_AMPDU BIT(31)
+#define MT_RXD2_NORMAL_NON_AMPDU_SUB BIT(30)
+#define MT_RXD2_NORMAL_NDATA BIT(29)
+#define MT_RXD2_NORMAL_NULL_FRAME BIT(28)
+#define MT_RXD2_NORMAL_FRAG BIT(27)
+#define MT_RXD2_NORMAL_INT_FRAME BIT(26)
+#define MT_RXD2_NORMAL_HDR_TRANS_ERROR BIT(25)
+#define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24)
+#define MT_RXD2_NORMAL_AMSDU_ERR BIT(23)
+#define MT_RXD2_NORMAL_LEN_MISMATCH BIT(22)
+#define MT_RXD2_NORMAL_TKIP_MIC_ERR BIT(21)
+#define MT_RXD2_NORMAL_ICV_ERR BIT(20)
+#define MT_RXD2_NORMAL_CLM BIT(19)
+#define MT_RXD2_NORMAL_CM BIT(18)
+#define MT_RXD2_NORMAL_FCS_ERR BIT(17)
+#define MT_RXD2_NORMAL_SW_BIT BIT(16)
+#define MT_RXD2_NORMAL_SEC_MODE GENMASK(15, 12)
+#define MT_RXD2_NORMAL_TID GENMASK(11, 8)
+#define MT_RXD2_NORMAL_WLAN_IDX GENMASK(7, 0)
+
+#define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30)
+#define MT_RXD3_NORMAL_PF_MODE BIT(29)
+#define MT_RXD3_NORMAL_CLS_BITMAP GENMASK(28, 19)
+#define MT_RXD3_NORMAL_WOL GENMASK(18, 14)
+#define MT_RXD3_NORMAL_MAGIC_PKT BIT(13)
+#define MT_RXD3_NORMAL_OFLD GENMASK(12, 11)
+#define MT_RXD3_NORMAL_CLS BIT(10)
+#define MT_RXD3_NORMAL_PATTERN_DROP BIT(9)
+#define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(8)
+#define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0)
+
+#define MT_RXV1_ACID_DET_H BIT(31)
+#define MT_RXV1_ACID_DET_L BIT(30)
+#define MT_RXV1_VHTA2_B8_B3 GENMASK(29, 24)
+#define MT_RXV1_NUM_RX GENMASK(23, 22)
+#define MT_RXV1_HT_NO_SOUND BIT(21)
+#define MT_RXV1_HT_SMOOTH BIT(20)
+#define MT_RXV1_HT_SHORT_GI BIT(19)
+#define MT_RXV1_HT_AGGR BIT(18)
+#define MT_RXV1_VHTA1_B22 BIT(17)
+#define MT_RXV1_FRAME_MODE GENMASK(16, 15)
+#define MT_RXV1_TX_MODE GENMASK(14, 12)
+#define MT_RXV1_HT_EXT_LTF GENMASK(11, 10)
+#define MT_RXV1_HT_AD_CODE BIT(9)
+#define MT_RXV1_HT_STBC GENMASK(8, 7)
+#define MT_RXV1_TX_RATE GENMASK(6, 0)
+
+#define MT_RXV2_SEL_ANT BIT(31)
+#define MT_RXV2_VALID_BIT BIT(30)
+#define MT_RXV2_NSTS GENMASK(29, 27)
+#define MT_RXV2_GROUP_ID GENMASK(26, 21)
+#define MT_RXV2_LENGTH GENMASK(20, 0)
+
+enum tx_header_format {
+ MT_HDR_FORMAT_802_3,
+ MT_HDR_FORMAT_CMD,
+ MT_HDR_FORMAT_802_11,
+ MT_HDR_FORMAT_802_11_EXT,
+};
+
+enum tx_pkt_type {
+ MT_TX_TYPE_CT,
+ MT_TX_TYPE_SF,
+ MT_TX_TYPE_CMD,
+ MT_TX_TYPE_FW,
+};
+
+enum tx_pkt_queue_idx {
+ MT_LMAC_AC00,
+ MT_LMAC_AC01,
+ MT_LMAC_AC02,
+ MT_LMAC_AC03,
+ MT_LMAC_ALTX0 = 0x10,
+ MT_LMAC_BMC0,
+ MT_LMAC_BCN0,
+ MT_LMAC_PSMP0,
+};
+
+enum tx_port_idx {
+ MT_TX_PORT_IDX_LMAC,
+ MT_TX_PORT_IDX_MCU
+};
+
+enum tx_mcu_port_q_idx {
+ MT_TX_MCU_PORT_RX_Q0 = 0,
+ MT_TX_MCU_PORT_RX_Q1,
+ MT_TX_MCU_PORT_RX_Q2,
+ MT_TX_MCU_PORT_RX_Q3,
+ MT_TX_MCU_PORT_RX_FWDL = 0x1e
+};
+
+enum tx_phy_bandwidth {
+ MT_PHY_BW_20,
+ MT_PHY_BW_40,
+ MT_PHY_BW_80,
+ MT_PHY_BW_160,
+};
+
+#define MT_CT_INFO_APPLY_TXD BIT(0)
+#define MT_CT_INFO_COPY_HOST_TXD_ALL BIT(1)
+#define MT_CT_INFO_MGMT_FRAME BIT(2)
+#define MT_CT_INFO_NONE_CIPHER_FRAME BIT(3)
+#define MT_CT_INFO_HSR2_TX BIT(4)
+
+#define MT_TXD_SIZE (8 * 4)
+
+#define MT_TXD0_P_IDX BIT(31)
+#define MT_TXD0_Q_IDX GENMASK(30, 26)
+#define MT_TXD0_UDP_TCP_SUM BIT(24)
+#define MT_TXD0_IP_SUM BIT(23)
+#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16)
+#define MT_TXD0_TX_BYTES GENMASK(15, 0)
+
+#define MT_TXD1_OWN_MAC GENMASK(31, 26)
+#define MT_TXD1_PKT_FMT GENMASK(25, 24)
+#define MT_TXD1_TID GENMASK(23, 21)
+#define MT_TXD1_AMSDU BIT(20)
+#define MT_TXD1_UNXV BIT(19)
+#define MT_TXD1_HDR_PAD GENMASK(18, 17)
+#define MT_TXD1_TXD_LEN BIT(16)
+#define MT_TXD1_LONG_FORMAT BIT(15)
+#define MT_TXD1_HDR_FORMAT GENMASK(14, 13)
+#define MT_TXD1_HDR_INFO GENMASK(12, 8)
+#define MT_TXD1_WLAN_IDX GENMASK(7, 0)
+
+#define MT_TXD2_FIX_RATE BIT(31)
+#define MT_TXD2_TIMING_MEASURE BIT(30)
+#define MT_TXD2_BA_DISABLE BIT(29)
+#define MT_TXD2_POWER_OFFSET GENMASK(28, 24)
+#define MT_TXD2_MAX_TX_TIME GENMASK(23, 16)
+#define MT_TXD2_FRAG GENMASK(15, 14)
+#define MT_TXD2_HTC_VLD BIT(13)
+#define MT_TXD2_DURATION BIT(12)
+#define MT_TXD2_BIP BIT(11)
+#define MT_TXD2_MULTICAST BIT(10)
+#define MT_TXD2_RTS BIT(9)
+#define MT_TXD2_SOUNDING BIT(8)
+#define MT_TXD2_NDPA BIT(7)
+#define MT_TXD2_NDP BIT(6)
+#define MT_TXD2_FRAME_TYPE GENMASK(5, 4)
+#define MT_TXD2_SUB_TYPE GENMASK(3, 0)
+
+#define MT_TXD3_SN_VALID BIT(31)
+#define MT_TXD3_PN_VALID BIT(30)
+#define MT_TXD3_SEQ GENMASK(27, 16)
+#define MT_TXD3_REM_TX_COUNT GENMASK(15, 11)
+#define MT_TXD3_TX_COUNT GENMASK(10, 6)
+#define MT_TXD3_PROTECT_FRAME BIT(1)
+#define MT_TXD3_NO_ACK BIT(0)
+
+#define MT_TXD4_PN_LOW GENMASK(31, 0)
+
+#define MT_TXD5_PN_HIGH GENMASK(31, 16)
+#define MT_TXD5_SW_POWER_MGMT BIT(13)
+#define MT_TXD5_DA_SELECT BIT(11)
+#define MT_TXD5_TX_STATUS_HOST BIT(10)
+#define MT_TXD5_TX_STATUS_MCU BIT(9)
+#define MT_TXD5_TX_STATUS_FMT BIT(8)
+#define MT_TXD5_PID GENMASK(7, 0)
+
+#define MT_TXD6_FIXED_RATE BIT(31)
+#define MT_TXD6_SGI BIT(30)
+#define MT_TXD6_LDPC BIT(29)
+#define MT_TXD6_TX_BF BIT(28)
+#define MT_TXD6_TX_RATE GENMASK(27, 16)
+#define MT_TXD6_ANT_ID GENMASK(15, 4)
+#define MT_TXD6_DYN_BW BIT(3)
+#define MT_TXD6_FIXED_BW BIT(2)
+#define MT_TXD6_BW GENMASK(1, 0)
+
+#define MT_TXD7_TYPE GENMASK(21, 20)
+#define MT_TXD7_SUB_TYPE GENMASK(19, 16)
+
+#define MT_TX_RATE_STBC BIT(11)
+#define MT_TX_RATE_NSS GENMASK(10, 9)
+#define MT_TX_RATE_MODE GENMASK(8, 6)
+#define MT_TX_RATE_IDX GENMASK(5, 0)
+
+#define MT_TXP_MAX_BUF_NUM 6
+
+struct mt7615_txp {
+ __le16 flags;
+ __le16 token;
+ u8 bss_idx;
+ u8 rept_wds_wcid;
+ u8 rsv;
+ u8 nbuf;
+ __le32 buf[MT_TXP_MAX_BUF_NUM];
+ __le16 len[MT_TXP_MAX_BUF_NUM];
+} __packed;
+
+struct mt7615_tx_free {
+ __le16 rx_byte_cnt;
+ __le16 ctrl;
+ u8 txd_cnt;
+ u8 rsv[3];
+ __le16 token[];
+} __packed;
+
+#define MT_TX_FREE_MSDU_ID_CNT GENMASK(6, 0)
+
+#define MT_TXS0_PID GENMASK(31, 24)
+#define MT_TXS0_BA_ERROR BIT(22)
+#define MT_TXS0_PS_FLAG BIT(21)
+#define MT_TXS0_TXOP_TIMEOUT BIT(20)
+#define MT_TXS0_BIP_ERROR BIT(19)
+
+#define MT_TXS0_QUEUE_TIMEOUT BIT(18)
+#define MT_TXS0_RTS_TIMEOUT BIT(17)
+#define MT_TXS0_ACK_TIMEOUT BIT(16)
+#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16)
+
+#define MT_TXS0_TX_STATUS_HOST BIT(15)
+#define MT_TXS0_TX_STATUS_MCU BIT(14)
+#define MT_TXS0_TXS_FORMAT BIT(13)
+#define MT_TXS0_FIXED_RATE BIT(12)
+#define MT_TXS0_TX_RATE GENMASK(11, 0)
+
+#define MT_TXS1_ANT_ID GENMASK(31, 20)
+#define MT_TXS1_RESP_RATE GENMASK(19, 16)
+#define MT_TXS1_BW GENMASK(15, 14)
+#define MT_TXS1_I_TXBF BIT(13)
+#define MT_TXS1_E_TXBF BIT(12)
+#define MT_TXS1_TID GENMASK(11, 9)
+#define MT_TXS1_AMPDU BIT(8)
+#define MT_TXS1_ACKED_MPDU BIT(7)
+#define MT_TXS1_TX_POWER_DBM GENMASK(6, 0)
+
+#define MT_TXS2_WCID GENMASK(31, 24)
+#define MT_TXS2_RXV_SEQNO GENMASK(23, 16)
+#define MT_TXS2_TX_DELAY GENMASK(15, 0)
+
+#define MT_TXS3_LAST_TX_RATE GENMASK(31, 29)
+#define MT_TXS3_TX_COUNT GENMASK(28, 24)
+#define MT_TXS3_F1_TSSI1 GENMASK(23, 12)
+#define MT_TXS3_F1_TSSI0 GENMASK(11, 0)
+#define MT_TXS3_F0_SEQNO GENMASK(11, 0)
+
+#define MT_TXS4_F0_TIMESTAMP GENMASK(31, 0)
+#define MT_TXS4_F1_TSSI3 GENMASK(23, 12)
+#define MT_TXS4_F1_TSSI2 GENMASK(11, 0)
+
+#define MT_TXS5_F0_FRONT_TIME GENMASK(24, 0)
+#define MT_TXS5_F1_NOISE_2 GENMASK(23, 16)
+#define MT_TXS5_F1_NOISE_1 GENMASK(15, 8)
+#define MT_TXS5_F1_NOISE_0 GENMASK(7, 0)
+
+#define MT_TXS6_F1_RCPI_3 GENMASK(31, 24)
+#define MT_TXS6_F1_RCPI_2 GENMASK(23, 16)
+#define MT_TXS6_F1_RCPI_1 GENMASK(15, 8)
+#define MT_TXS6_F1_RCPI_0 GENMASK(7, 0)
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
new file mode 100644
index 000000000000..80e6b211f60b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Roy Luo <royluo@google.com>
+ * Ryder Lee <ryder.lee@mediatek.com>
+ * Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include "mt7615.h"
+
+static int mt7615_start(struct ieee80211_hw *hw)
+{
+ struct mt7615_dev *dev = hw->priv;
+
+ set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+ MT7615_WATCHDOG_TIME);
+
+ return 0;
+}
+
+static void mt7615_stop(struct ieee80211_hw *hw)
+{
+ struct mt7615_dev *dev = hw->priv;
+
+ clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
+}
+
+static int get_omac_idx(enum nl80211_iftype type, u32 mask)
+{
+ int i;
+
+ switch (type) {
+ case NL80211_IFTYPE_AP:
+ /* ap use hw bssid 0 and ext bssid */
+ if (~mask & BIT(HW_BSSID_0))
+ return HW_BSSID_0;
+
+ for (i = EXT_BSSID_1; i < EXT_BSSID_END; i++)
+ if (~mask & BIT(i))
+ return i;
+
+ break;
+ case NL80211_IFTYPE_STATION:
+ /* sta use hw bssid other than 0 */
+ for (i = HW_BSSID_1; i < HW_BSSID_MAX; i++)
+ if (~mask & BIT(i))
+ return i;
+
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ };
+
+ return -1;
+}
+
+static int mt7615_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_dev *dev = hw->priv;
+ struct mt76_txq *mtxq;
+ int idx, ret = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mvif->idx = ffs(~dev->vif_mask) - 1;
+ if (mvif->idx >= MT7615_MAX_INTERFACES) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ mvif->omac_idx = get_omac_idx(vif->type, dev->omac_mask);
+ if (mvif->omac_idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ /* TODO: DBDC support. Use band 0 and wmm 0 for now */
+ mvif->band_idx = 0;
+ mvif->wmm_idx = 0;
+
+ ret = mt7615_mcu_set_dev_info(dev, vif, 1);
+ if (ret)
+ goto out;
+
+ dev->vif_mask |= BIT(mvif->idx);
+ dev->omac_mask |= BIT(mvif->omac_idx);
+ idx = MT7615_WTBL_RESERVED - 1 - mvif->idx;
+ mvif->sta.wcid.idx = idx;
+ mvif->sta.wcid.hw_key_idx = -1;
+
+ rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
+ mtxq = (struct mt76_txq *)vif->txq->drv_priv;
+ mtxq->wcid = &mvif->sta.wcid;
+ mt76_txq_init(&dev->mt76, vif->txq);
+
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static void mt7615_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_dev *dev = hw->priv;
+ int idx = mvif->sta.wcid.idx;
+
+ /* TODO: disable beacon for the bss */
+
+ mt7615_mcu_set_dev_info(dev, vif, 0);
+
+ rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+ mt76_txq_remove(&dev->mt76, vif->txq);
+
+ mutex_lock(&dev->mt76.mutex);
+ dev->vif_mask &= ~BIT(mvif->idx);
+ dev->omac_mask &= ~BIT(mvif->omac_idx);
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static int mt7615_set_channel(struct mt7615_dev *dev,
+ struct cfg80211_chan_def *def)
+{
+ int ret;
+
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
+ set_bit(MT76_RESET, &dev->mt76.state);
+
+ mt76_set_channel(&dev->mt76);
+
+ ret = mt7615_mcu_set_channel(dev);
+ if (ret)
+ return ret;
+
+ clear_bit(MT76_RESET, &dev->mt76.state);
+
+ mt76_txq_schedule_all(&dev->mt76);
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
+ MT7615_WATCHDOG_TIME);
+ return 0;
+}
+
+static int mt7615_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt7615_dev *dev = hw->priv;
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv :
+ &mvif->sta;
+ struct mt76_wcid *wcid = &msta->wcid;
+ int idx = key->keyidx;
+
+ /* The hardware does not support per-STA RX GTK, fallback
+ * to software mode for these.
+ */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ if (cmd == SET_KEY) {
+ key->hw_key_idx = wcid->idx;
+ wcid->hw_key_idx = idx;
+ } else {
+ if (idx == wcid->hw_key_idx)
+ wcid->hw_key_idx = -1;
+
+ key = NULL;
+ }
+ mt76_wcid_key_setup(&dev->mt76, wcid, key);
+
+ return mt7615_mcu_set_wtbl_key(dev, wcid->idx, key, cmd);
+}
+
+static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt7615_dev *dev = hw->priv;
+ int ret = 0;
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ mutex_lock(&dev->mt76.mutex);
+
+ ieee80211_stop_queues(hw);
+ ret = mt7615_set_channel(dev, &hw->conf.chandef);
+ ieee80211_wake_queues(hw);
+
+ mutex_unlock(&dev->mt76.mutex);
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ mutex_lock(&dev->mt76.mutex);
+
+ if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
+ dev->mt76.rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
+ else
+ dev->mt76.rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
+
+ mt76_wr(dev, MT_WF_RFCR, dev->mt76.rxfilter);
+
+ mutex_unlock(&dev->mt76.mutex);
+ }
+ return ret;
+}
+
+static int
+mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct mt7615_dev *dev = hw->priv;
+ static const u8 wmm_queue_map[] = {
+ [IEEE80211_AC_BK] = 0,
+ [IEEE80211_AC_BE] = 1,
+ [IEEE80211_AC_VI] = 2,
+ [IEEE80211_AC_VO] = 3,
+ };
+
+ /* TODO: hw wmm_set 1~3 */
+ return mt7615_mcu_set_wmm(dev, wmm_queue_map[queue], params);
+}
+
+static void mt7615_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct mt7615_dev *dev = hw->priv;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ dev->mt76.rxfilter &= ~(_hw); \
+ dev->mt76.rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ dev->mt76.rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
+ MT_WF_RFCR_DROP_OTHER_BEACON |
+ MT_WF_RFCR_DROP_FRAME_REPORT |
+ MT_WF_RFCR_DROP_PROBEREQ |
+ MT_WF_RFCR_DROP_MCAST_FILTERED |
+ MT_WF_RFCR_DROP_MCAST |
+ MT_WF_RFCR_DROP_BCAST |
+ MT_WF_RFCR_DROP_DUPLICATE |
+ MT_WF_RFCR_DROP_A2_BSSID |
+ MT_WF_RFCR_DROP_UNWANTED_CTL |
+ MT_WF_RFCR_DROP_STBC_MULTI);
+
+ MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
+ MT_WF_RFCR_DROP_A3_MAC |
+ MT_WF_RFCR_DROP_A3_BSSID);
+
+ MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL);
+
+ MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
+ MT_WF_RFCR_DROP_RTS |
+ MT_WF_RFCR_DROP_CTL_RSV |
+ MT_WF_RFCR_DROP_NDPA);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_WF_RFCR, dev->mt76.rxfilter);
+}
+
+static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u32 changed)
+{
+ struct mt7615_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ /* TODO: sta mode connect/disconnect
+ * BSS_CHANGED_ASSOC | BSS_CHANGED_BSSID
+ */
+
+ /* TODO: update beacon content
+ * BSS_CHANGED_BEACON
+ */
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ if (info->enable_beacon) {
+ mt7615_mcu_set_bss_info(dev, vif, 1);
+ mt7615_mcu_add_wtbl_bmc(dev, vif);
+ mt7615_mcu_set_sta_rec_bmc(dev, vif, 1);
+ mt7615_mcu_set_bcn(dev, vif, 1);
+ } else {
+ mt7615_mcu_set_sta_rec_bmc(dev, vif, 0);
+ mt7615_mcu_del_wtbl_bmc(dev, vif);
+ mt7615_mcu_set_bss_info(dev, vif, 0);
+ mt7615_mcu_set_bcn(dev, vif, 0);
+ }
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ int idx;
+
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
+ if (idx < 0)
+ return -ENOSPC;
+
+ msta->vif = mvif;
+ msta->wcid.sta = 1;
+ msta->wcid.idx = idx;
+
+ mt7615_mcu_add_wtbl(dev, vif, sta);
+ mt7615_mcu_set_sta_rec(dev, vif, sta, 1);
+
+ return 0;
+}
+
+void mt7615_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+ if (sta->ht_cap.ht_supported)
+ mt7615_mcu_set_ht_cap(dev, vif, sta);
+}
+
+void mt7615_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+ mt7615_mcu_set_sta_rec(dev, vif, sta, 0);
+ mt7615_mcu_del_wtbl(dev, vif, sta);
+}
+
+static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7615_dev *dev = hw->priv;
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates);
+ int i;
+
+ spin_lock_bh(&dev->mt76.lock);
+ for (i = 0; i < ARRAY_SIZE(msta->rates); i++) {
+ msta->rates[i].idx = sta_rates->rate[i].idx;
+ msta->rates[i].count = sta_rates->rate[i].count;
+ msta->rates[i].flags = sta_rates->rate[i].flags;
+
+ if (msta->rates[i].idx < 0 || !msta->rates[i].count)
+ break;
+ }
+ msta->n_rates = i;
+ mt7615_mcu_set_rates(dev, msta, NULL, msta->rates);
+ msta->rate_probe = false;
+ spin_unlock_bh(&dev->mt76.lock);
+}
+
+static void mt7615_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct mt7615_dev *dev = hw->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+
+ if (control->sta) {
+ struct mt7615_sta *sta;
+
+ sta = (struct mt7615_sta *)control->sta->drv_priv;
+ wcid = &sta->wcid;
+ }
+
+ if (vif && !control->sta) {
+ struct mt7615_vif *mvif;
+
+ mvif = (struct mt7615_vif *)vif->drv_priv;
+ wcid = &mvif->sta.wcid;
+ }
+
+ mt76_tx(&dev->mt76, control->sta, wcid, skb);
+}
+
+static int mt7615_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+{
+ struct mt7615_dev *dev = hw->priv;
+
+ mutex_lock(&dev->mt76.mutex);
+ mt7615_mcu_set_rts_thresh(dev, val);
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static int
+mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ struct mt7615_dev *dev = hw->priv;
+ struct ieee80211_sta *sta = params->sta;
+ struct ieee80211_txq *txq = sta->txq[params->tid];
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ u16 tid = params->tid;
+ u16 *ssn = &params->ssn;
+ struct mt76_txq *mtxq;
+
+ if (!txq)
+ return -EINVAL;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn,
+ params->buf_size);
+ mt7615_mcu_set_rx_ba(dev, params, 1);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ mt7615_mcu_set_rx_ba(dev, params, 0);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+ mt7615_mcu_set_tx_ba(dev, params, 1);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
+ mt7615_mcu_set_tx_ba(dev, params, 0);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(*ssn);
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ mt7615_mcu_set_tx_ba(dev, params, 0);
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+
+ return 0;
+}
+
+static void
+mt7615_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const u8 *mac)
+{
+ struct mt7615_dev *dev = hw->priv;
+
+ set_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+static void
+mt7615_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt7615_dev *dev = hw->priv;
+
+ clear_bit(MT76_SCANNING, &dev->mt76.state);
+}
+
+const struct ieee80211_ops mt7615_ops = {
+ .tx = mt7615_tx,
+ .start = mt7615_start,
+ .stop = mt7615_stop,
+ .add_interface = mt7615_add_interface,
+ .remove_interface = mt7615_remove_interface,
+ .config = mt7615_config,
+ .conf_tx = mt7615_conf_tx,
+ .configure_filter = mt7615_configure_filter,
+ .bss_info_changed = mt7615_bss_info_changed,
+ .sta_state = mt76_sta_state,
+ .set_key = mt7615_set_key,
+ .ampdu_action = mt7615_ampdu_action,
+ .set_rts_threshold = mt7615_set_rts_threshold,
+ .wake_tx_queue = mt76_wake_tx_queue,
+ .sta_rate_tbl_update = mt7615_sta_rate_tbl_update,
+ .sw_scan_start = mt7615_sw_scan,
+ .sw_scan_complete = mt7615_sw_scan_complete,
+ .release_buffered_frames = mt76_release_buffered_frames,
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
new file mode 100644
index 000000000000..ea67c6022fe6
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -0,0 +1,1655 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Roy Luo <royluo@google.com>
+ * Ryder Lee <ryder.lee@mediatek.com>
+ */
+
+#include <linux/firmware.h>
+#include "mt7615.h"
+#include "mcu.h"
+#include "mac.h"
+#include "eeprom.h"
+
+struct mt7615_patch_hdr {
+ char build_date[16];
+ char platform[4];
+ __be32 hw_sw_ver;
+ __be32 patch_ver;
+ __be16 checksum;
+} __packed;
+
+struct mt7615_fw_trailer {
+ __le32 addr;
+ u8 chip_id;
+ u8 feature_set;
+ u8 eco_code;
+ char fw_ver[10];
+ char build_date[15];
+ __le32 len;
+} __packed;
+
+#define MCU_PATCH_ADDRESS 0x80000
+
+#define N9_REGION_NUM 2
+#define CR4_REGION_NUM 1
+
+#define IMG_CRC_LEN 4
+
+#define FW_FEATURE_SET_ENCRYPT BIT(0)
+#define FW_FEATURE_SET_KEY_IDX GENMASK(2, 1)
+
+#define DL_MODE_ENCRYPT BIT(0)
+#define DL_MODE_KEY_IDX GENMASK(2, 1)
+#define DL_MODE_RESET_SEC_IV BIT(3)
+#define DL_MODE_WORKING_PDA_CR4 BIT(4)
+#define DL_MODE_NEED_RSP BIT(31)
+
+#define FW_START_OVERRIDE BIT(0)
+#define FW_START_WORKING_PDA_CR4 BIT(2)
+
+static int __mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
+ int cmd, int query, int dest, int *wait_seq)
+{
+ struct mt7615_mcu_txd *mcu_txd;
+ u8 seq, q_idx, pkt_fmt;
+ enum mt76_txq_id qid;
+ u32 val;
+ __le32 *txd;
+
+ if (!skb)
+ return -EINVAL;
+
+ seq = ++dev->mt76.mmio.mcu.msg_seq & 0xf;
+ if (!seq)
+ seq = ++dev->mt76.mmio.mcu.msg_seq & 0xf;
+
+ mcu_txd = (struct mt7615_mcu_txd *)skb_push(skb,
+ sizeof(struct mt7615_mcu_txd));
+ memset(mcu_txd, 0, sizeof(struct mt7615_mcu_txd));
+
+ if (cmd != -MCU_CMD_FW_SCATTER) {
+ q_idx = MT_TX_MCU_PORT_RX_Q0;
+ pkt_fmt = MT_TX_TYPE_CMD;
+ } else {
+ q_idx = MT_TX_MCU_PORT_RX_FWDL;
+ pkt_fmt = MT_TX_TYPE_FW;
+ }
+
+ txd = mcu_txd->txd;
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, cpu_to_le16(skb->len)) |
+ FIELD_PREP(MT_TXD0_P_IDX, MT_TX_PORT_IDX_MCU) |
+ FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
+ txd[0] = cpu_to_le32(val);
+
+ val = MT_TXD1_LONG_FORMAT |
+ FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_CMD) |
+ FIELD_PREP(MT_TXD1_PKT_FMT, pkt_fmt);
+ txd[1] = cpu_to_le32(val);
+
+ mcu_txd->len = cpu_to_le16(skb->len - sizeof(mcu_txd->txd));
+ mcu_txd->pq_id = cpu_to_le16(MCU_PQ_ID(MT_TX_PORT_IDX_MCU, q_idx));
+ mcu_txd->pkt_type = MCU_PKT_ID;
+ mcu_txd->seq = seq;
+
+ if (cmd < 0) {
+ mcu_txd->cid = -cmd;
+ } else {
+ mcu_txd->cid = MCU_CMD_EXT_CID;
+ mcu_txd->ext_cid = cmd;
+ if (query != MCU_Q_NA)
+ mcu_txd->ext_cid_ack = 1;
+ }
+
+ mcu_txd->set_query = query;
+ mcu_txd->s2d_index = dest;
+
+ if (wait_seq)
+ *wait_seq = seq;
+
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state))
+ qid = MT_TXQ_MCU;
+ else
+ qid = MT_TXQ_FWDL;
+
+ return mt76_tx_queue_skb_raw(dev, qid, skb, 0);
+}
+
+static int mt7615_mcu_msg_send(struct mt7615_dev *dev, struct sk_buff *skb,
+ int cmd, int query, int dest,
+ struct sk_buff **skb_ret)
+{
+ unsigned long expires = jiffies + 10 * HZ;
+ struct mt7615_mcu_rxd *rxd;
+ int ret, seq;
+
+ mutex_lock(&dev->mt76.mmio.mcu.mutex);
+
+ ret = __mt7615_mcu_msg_send(dev, skb, cmd, query, dest, &seq);
+ if (ret)
+ goto out;
+
+ while (1) {
+ skb = mt76_mcu_get_response(&dev->mt76, expires);
+ if (!skb) {
+ dev_err(dev->mt76.dev, "Message %d (seq %d) timeout\n",
+ cmd, seq);
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ rxd = (struct mt7615_mcu_rxd *)skb->data;
+ if (seq != rxd->seq)
+ continue;
+
+ if (skb_ret) {
+ int hdr_len = sizeof(*rxd);
+
+ if (!test_bit(MT76_STATE_MCU_RUNNING,
+ &dev->mt76.state))
+ hdr_len -= 4;
+ skb_pull(skb, hdr_len);
+ *skb_ret = skb;
+ } else {
+ dev_kfree_skb(skb);
+ }
+
+ break;
+ }
+
+out:
+ mutex_unlock(&dev->mt76.mmio.mcu.mutex);
+
+ return ret;
+}
+
+static int mt7615_mcu_init_download(struct mt7615_dev *dev, u32 addr,
+ u32 len, u32 mode)
+{
+ struct {
+ __le32 addr;
+ __le32 len;
+ __le32 mode;
+ } req = {
+ .addr = cpu_to_le32(addr),
+ .len = cpu_to_le32(len),
+ .mode = cpu_to_le32(mode),
+ };
+ struct sk_buff *skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+
+ return mt7615_mcu_msg_send(dev, skb, -MCU_CMD_TARGET_ADDRESS_LEN_REQ,
+ MCU_Q_NA, MCU_S2D_H2N, NULL);
+}
+
+static int mt7615_mcu_send_firmware(struct mt7615_dev *dev, const void *data,
+ int len)
+{
+ struct sk_buff *skb;
+ int ret = 0;
+
+ while (len > 0) {
+ int cur_len = min_t(int, 4096 - sizeof(struct mt7615_mcu_txd),
+ len);
+
+ skb = mt7615_mcu_msg_alloc(data, cur_len);
+ if (!skb)
+ return -ENOMEM;
+
+ ret = __mt7615_mcu_msg_send(dev, skb, -MCU_CMD_FW_SCATTER,
+ MCU_Q_NA, MCU_S2D_H2N, NULL);
+ if (ret)
+ break;
+
+ data += cur_len;
+ len -= cur_len;
+ }
+
+ return ret;
+}
+
+static int mt7615_mcu_start_firmware(struct mt7615_dev *dev, u32 addr,
+ u32 option)
+{
+ struct {
+ __le32 option;
+ __le32 addr;
+ } req = {
+ .option = cpu_to_le32(option),
+ .addr = cpu_to_le32(addr),
+ };
+ struct sk_buff *skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+
+ return mt7615_mcu_msg_send(dev, skb, -MCU_CMD_FW_START_REQ,
+ MCU_Q_NA, MCU_S2D_H2N, NULL);
+}
+
+static int mt7615_mcu_restart(struct mt7615_dev *dev)
+{
+ struct sk_buff *skb = mt7615_mcu_msg_alloc(NULL, 0);
+
+ return mt7615_mcu_msg_send(dev, skb, -MCU_CMD_RESTART_DL_REQ,
+ MCU_Q_NA, MCU_S2D_H2N, NULL);
+}
+
+static int mt7615_mcu_patch_sem_ctrl(struct mt7615_dev *dev, bool get)
+{
+ struct {
+ __le32 operation;
+ } req = {
+ .operation = cpu_to_le32(get ? PATCH_SEM_GET :
+ PATCH_SEM_RELEASE),
+ };
+ struct event {
+ u8 status;
+ u8 reserved[3];
+ } *resp;
+ struct sk_buff *skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+ struct sk_buff *skb_ret;
+ int ret;
+
+ ret = mt7615_mcu_msg_send(dev, skb, -MCU_CMD_PATCH_SEM_CONTROL,
+ MCU_Q_NA, MCU_S2D_H2N, &skb_ret);
+ if (ret)
+ goto out;
+
+ resp = (struct event *)(skb_ret->data);
+ ret = resp->status;
+ dev_kfree_skb(skb_ret);
+
+out:
+ return ret;
+}
+
+static int mt7615_mcu_start_patch(struct mt7615_dev *dev)
+{
+ struct {
+ u8 check_crc;
+ u8 reserved[3];
+ } req = {
+ .check_crc = 0,
+ };
+ struct sk_buff *skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+
+ return mt7615_mcu_msg_send(dev, skb, -MCU_CMD_PATCH_FINISH_REQ,
+ MCU_Q_NA, MCU_S2D_H2N, NULL);
+}
+
+static int mt7615_driver_own(struct mt7615_dev *dev)
+{
+ mt76_wr(dev, MT_CFG_LPCR_HOST, MT_CFG_LPCR_HOST_DRV_OWN);
+ if (!mt76_poll_msec(dev, MT_CFG_LPCR_HOST,
+ MT_CFG_LPCR_HOST_FW_OWN, 0, 500)) {
+ dev_err(dev->mt76.dev, "Timeout for driver own\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int mt7615_load_patch(struct mt7615_dev *dev)
+{
+ const struct firmware *fw;
+ const struct mt7615_patch_hdr *hdr;
+ const char *firmware = MT7615_ROM_PATCH;
+ int len, ret, sem;
+
+ sem = mt7615_mcu_patch_sem_ctrl(dev, 1);
+ switch (sem) {
+ case PATCH_IS_DL:
+ return 0;
+ case PATCH_NOT_DL_SEM_SUCCESS:
+ break;
+ default:
+ dev_err(dev->mt76.dev, "Failed to get patch semaphore\n");
+ return -EAGAIN;
+ }
+
+ ret = request_firmware(&fw, firmware, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7615_patch_hdr *)(fw->data);
+
+ dev_info(dev->mt76.dev, "HW/SW Version: 0x%x, Build Time: %.16s\n",
+ be32_to_cpu(hdr->hw_sw_ver), hdr->build_date);
+
+ len = fw->size - sizeof(*hdr);
+
+ ret = mt7615_mcu_init_download(dev, MCU_PATCH_ADDRESS, len,
+ DL_MODE_NEED_RSP);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Download request failed\n");
+ goto out;
+ }
+
+ ret = mt7615_mcu_send_firmware(dev, fw->data + sizeof(*hdr), len);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to send firmware to device\n");
+ goto out;
+ }
+
+ ret = mt7615_mcu_start_patch(dev);
+ if (ret)
+ dev_err(dev->mt76.dev, "Failed to start patch\n");
+
+out:
+ release_firmware(fw);
+
+ sem = mt7615_mcu_patch_sem_ctrl(dev, 0);
+ switch (sem) {
+ case PATCH_REL_SEM_SUCCESS:
+ break;
+ default:
+ ret = -EAGAIN;
+ dev_err(dev->mt76.dev, "Failed to release patch semaphore\n");
+ break;
+ }
+
+ return ret;
+}
+
+static u32 gen_dl_mode(u8 feature_set, bool is_cr4)
+{
+ u32 ret = 0;
+
+ ret |= (feature_set & FW_FEATURE_SET_ENCRYPT) ?
+ (DL_MODE_ENCRYPT | DL_MODE_RESET_SEC_IV) : 0;
+ ret |= FIELD_PREP(DL_MODE_KEY_IDX,
+ FIELD_GET(FW_FEATURE_SET_KEY_IDX, feature_set));
+ ret |= DL_MODE_NEED_RSP;
+ ret |= is_cr4 ? DL_MODE_WORKING_PDA_CR4 : 0;
+
+ return ret;
+}
+
+static int mt7615_load_ram(struct mt7615_dev *dev)
+{
+ const struct firmware *fw;
+ const struct mt7615_fw_trailer *hdr;
+ const char *n9_firmware = MT7615_FIRMWARE_N9;
+ const char *cr4_firmware = MT7615_FIRMWARE_CR4;
+ u32 n9_ilm_addr, offset;
+ int i, ret;
+
+ ret = request_firmware(&fw, n9_firmware, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < N9_REGION_NUM * sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7615_fw_trailer *)(fw->data + fw->size -
+ N9_REGION_NUM * sizeof(*hdr));
+
+ dev_info(dev->mt76.dev, "N9 Firmware Version: %.10s, Build Time: %.15s\n",
+ hdr->fw_ver, hdr->build_date);
+
+ n9_ilm_addr = le32_to_cpu(hdr->addr);
+
+ for (offset = 0, i = 0; i < N9_REGION_NUM; i++) {
+ u32 len, addr, mode;
+
+ len = le32_to_cpu(hdr[i].len) + IMG_CRC_LEN;
+ addr = le32_to_cpu(hdr[i].addr);
+ mode = gen_dl_mode(hdr[i].feature_set, false);
+
+ ret = mt7615_mcu_init_download(dev, addr, len, mode);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Download request failed\n");
+ goto out;
+ }
+
+ ret = mt7615_mcu_send_firmware(dev, fw->data + offset, len);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to send firmware to device\n");
+ goto out;
+ }
+
+ offset += len;
+ }
+
+ ret = mt7615_mcu_start_firmware(dev, n9_ilm_addr, FW_START_OVERRIDE);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to start N9 firmware\n");
+ goto out;
+ }
+
+ release_firmware(fw);
+
+ ret = request_firmware(&fw, cr4_firmware, dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data || fw->size < CR4_REGION_NUM * sizeof(*hdr)) {
+ dev_err(dev->mt76.dev, "Invalid firmware\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ hdr = (const struct mt7615_fw_trailer *)(fw->data + fw->size -
+ CR4_REGION_NUM * sizeof(*hdr));
+
+ dev_info(dev->mt76.dev, "CR4 Firmware Version: %.10s, Build Time: %.15s\n",
+ hdr->fw_ver, hdr->build_date);
+
+ for (offset = 0, i = 0; i < CR4_REGION_NUM; i++) {
+ u32 len, addr, mode;
+
+ len = le32_to_cpu(hdr[i].len) + IMG_CRC_LEN;
+ addr = le32_to_cpu(hdr[i].addr);
+ mode = gen_dl_mode(hdr[i].feature_set, true);
+
+ ret = mt7615_mcu_init_download(dev, addr, len, mode);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Download request failed\n");
+ goto out;
+ }
+
+ ret = mt7615_mcu_send_firmware(dev, fw->data + offset, len);
+ if (ret) {
+ dev_err(dev->mt76.dev, "Failed to send firmware to device\n");
+ goto out;
+ }
+
+ offset += len;
+ }
+
+ ret = mt7615_mcu_start_firmware(dev, 0, FW_START_WORKING_PDA_CR4);
+ if (ret)
+ dev_err(dev->mt76.dev, "Failed to start CR4 firmware\n");
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int mt7615_load_firmware(struct mt7615_dev *dev)
+{
+ int ret;
+ u32 val;
+
+ val = mt76_get_field(dev, MT_TOP_MISC2, MT_TOP_MISC2_FW_STATE);
+
+ if (val != FW_STATE_FW_DOWNLOAD) {
+ dev_err(dev->mt76.dev, "Firmware is not ready for download\n");
+ return -EIO;
+ }
+
+ ret = mt7615_load_patch(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7615_load_ram(dev);
+ if (ret)
+ return ret;
+
+ if (!mt76_poll_msec(dev, MT_TOP_MISC2, MT_TOP_MISC2_FW_STATE,
+ FIELD_PREP(MT_TOP_MISC2_FW_STATE,
+ FW_STATE_CR4_RDY), 500)) {
+ dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
+ return -EIO;
+ }
+
+ dev_dbg(dev->mt76.dev, "Firmware init done\n");
+
+ return 0;
+}
+
+int mt7615_mcu_init(struct mt7615_dev *dev)
+{
+ int ret;
+
+ ret = mt7615_driver_own(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7615_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
+
+ return 0;
+}
+
+void mt7615_mcu_exit(struct mt7615_dev *dev)
+{
+ mt7615_mcu_restart(dev);
+ mt76_wr(dev, MT_CFG_LPCR_HOST, MT_CFG_LPCR_HOST_FW_OWN);
+ skb_queue_purge(&dev->mt76.mmio.mcu.res_q);
+}
+
+int mt7615_mcu_set_eeprom(struct mt7615_dev *dev)
+{
+ struct req_data {
+ u8 val;
+ } __packed;
+ struct {
+ u8 buffer_mode;
+ u8 pad;
+ u16 len;
+ } __packed req_hdr = {
+ .buffer_mode = 1,
+ .len = __MT_EE_MAX - MT_EE_NIC_CONF_0,
+ };
+ struct sk_buff *skb;
+ struct req_data *data;
+ const int size = (__MT_EE_MAX - MT_EE_NIC_CONF_0) *
+ sizeof(struct req_data);
+ u8 *eep = (u8 *)dev->mt76.eeprom.data;
+ u16 off;
+
+ skb = mt7615_mcu_msg_alloc(NULL, size + sizeof(req_hdr));
+ memcpy(skb_put(skb, sizeof(req_hdr)), &req_hdr, sizeof(req_hdr));
+ data = (struct req_data *)skb_put(skb, size);
+ memset(data, 0, size);
+
+ for (off = MT_EE_NIC_CONF_0; off < __MT_EE_MAX; off++)
+ data[off - MT_EE_NIC_CONF_0].val = eep[off];
+
+ return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+}
+
+int mt7615_mcu_init_mac(struct mt7615_dev *dev)
+{
+ struct {
+ u8 enable;
+ u8 band;
+ u8 rsv[2];
+ } __packed req = {
+ .enable = 1,
+ .band = 0,
+ };
+ struct sk_buff *skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+
+ return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_MAC_INIT_CTRL,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+}
+
+int mt7615_mcu_set_rts_thresh(struct mt7615_dev *dev, u32 val)
+{
+ struct {
+ u8 prot_idx;
+ u8 band;
+ u8 rsv[2];
+ __le32 len_thresh;
+ __le32 pkt_thresh;
+ } __packed req = {
+ .prot_idx = 1,
+ .band = 0,
+ .len_thresh = cpu_to_le32(val),
+ .pkt_thresh = cpu_to_le32(0x2),
+ };
+ struct sk_buff *skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+
+ return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_PROTECT_CTRL,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+}
+
+int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+#define WMM_AIFS_SET BIT(0)
+#define WMM_CW_MIN_SET BIT(1)
+#define WMM_CW_MAX_SET BIT(2)
+#define WMM_TXOP_SET BIT(3)
+ struct req_data {
+ u8 number;
+ u8 rsv[3];
+ u8 queue;
+ u8 valid;
+ u8 aifs;
+ u8 cw_min;
+ __le16 cw_max;
+ __le16 txop;
+ } __packed req = {
+ .number = 1,
+ .queue = queue,
+ .valid = WMM_AIFS_SET | WMM_TXOP_SET,
+ .aifs = params->aifs,
+ .txop = cpu_to_le16(params->txop),
+ };
+ struct sk_buff *skb;
+
+ if (params->cw_min) {
+ req.valid |= WMM_CW_MIN_SET;
+ req.cw_min = params->cw_min;
+ }
+ if (params->cw_max) {
+ req.valid |= WMM_CW_MAX_SET;
+ req.cw_max = cpu_to_le16(params->cw_max);
+ }
+
+ skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+ return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_EDCA_UPDATE,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+}
+
+int mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int enter)
+{
+#define ENTER_PM_STATE 1
+#define EXIT_PM_STATE 2
+ struct {
+ u8 pm_number;
+ u8 pm_state;
+ u8 bssid[ETH_ALEN];
+ u8 dtim_period;
+ u8 wlan_idx;
+ __le16 bcn_interval;
+ __le32 aid;
+ __le32 rx_filter;
+ u8 band_idx;
+ u8 rsv[3];
+ __le32 feature;
+ u8 omac_idx;
+ u8 wmm_idx;
+ u8 bcn_loss_cnt;
+ u8 bcn_sp_duration;
+ } __packed req = {
+ .pm_number = 5,
+ .pm_state = (enter) ? ENTER_PM_STATE : EXIT_PM_STATE,
+ .band_idx = 0,
+ };
+ struct sk_buff *skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+
+ return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_PM_STATE_CTRL,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+}
+
+static int __mt7615_mcu_set_dev_info(struct mt7615_dev *dev,
+ struct dev_info *dev_info)
+{
+ struct req_hdr {
+ u8 omac_idx;
+ u8 band_idx;
+ __le16 tlv_num;
+ u8 is_tlv_append;
+ u8 rsv[3];
+ } __packed req_hdr = {0};
+ struct req_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 active;
+ u8 band_idx;
+ u8 omac_addr[ETH_ALEN];
+ } __packed;
+ struct sk_buff *skb;
+ u16 tlv_num = 0;
+
+ skb = mt7615_mcu_msg_alloc(NULL, sizeof(req_hdr) +
+ sizeof(struct req_tlv));
+ skb_reserve(skb, sizeof(req_hdr));
+
+ if (dev_info->feature & BIT(DEV_INFO_ACTIVE)) {
+ struct req_tlv req_tlv = {
+ .tag = cpu_to_le16(DEV_INFO_ACTIVE),
+ .len = cpu_to_le16(sizeof(req_tlv)),
+ .active = dev_info->enable,
+ .band_idx = dev_info->band_idx,
+ };
+ memcpy(req_tlv.omac_addr, dev_info->omac_addr, ETH_ALEN);
+ memcpy(skb_put(skb, sizeof(req_tlv)), &req_tlv,
+ sizeof(req_tlv));
+ tlv_num++;
+ }
+
+ req_hdr.omac_idx = dev_info->omac_idx;
+ req_hdr.band_idx = dev_info->band_idx;
+ req_hdr.tlv_num = cpu_to_le16(tlv_num);
+ req_hdr.is_tlv_append = tlv_num ? 1 : 0;
+
+ memcpy(skb_push(skb, sizeof(req_hdr)), &req_hdr, sizeof(req_hdr));
+
+ return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_DEV_INFO_UPDATE,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+}
+
+int mt7615_mcu_set_dev_info(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ int en)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct dev_info dev_info = {0};
+
+ dev_info.omac_idx = mvif->omac_idx;
+ memcpy(dev_info.omac_addr, vif->addr, ETH_ALEN);
+ dev_info.band_idx = mvif->band_idx;
+ dev_info.enable = en;
+ dev_info.feature = BIT(DEV_INFO_ACTIVE);
+
+ return __mt7615_mcu_set_dev_info(dev, &dev_info);
+}
+
+static void bss_info_omac_handler (struct mt7615_dev *dev,
+ struct bss_info *bss_info,
+ struct sk_buff *skb)
+{
+ struct bss_info_omac tlv = {0};
+
+ tlv.tag = cpu_to_le16(BSS_INFO_OMAC);
+ tlv.len = cpu_to_le16(sizeof(tlv));
+ tlv.hw_bss_idx = (bss_info->omac_idx > EXT_BSSID_START) ?
+ HW_BSSID_0 : bss_info->omac_idx;
+ tlv.omac_idx = bss_info->omac_idx;
+ tlv.band_idx = bss_info->band_idx;
+ tlv.conn_type = cpu_to_le32(bss_info->conn_type);
+
+ memcpy(skb_put(skb, sizeof(tlv)), &tlv, sizeof(tlv));
+}
+
+static void bss_info_basic_handler (struct mt7615_dev *dev,
+ struct bss_info *bss_info,
+ struct sk_buff *skb)
+{
+ struct bss_info_basic tlv = {0};
+
+ tlv.tag = cpu_to_le16(BSS_INFO_BASIC);
+ tlv.len = cpu_to_le16(sizeof(tlv));
+ tlv.network_type = cpu_to_le32(bss_info->network_type);
+ tlv.active = bss_info->enable;
+ tlv.bcn_interval = cpu_to_le16(bss_info->bcn_interval);
+ memcpy(tlv.bssid, bss_info->bssid, ETH_ALEN);
+ tlv.wmm_idx = bss_info->wmm_idx;
+ tlv.dtim_period = bss_info->dtim_period;
+ tlv.bmc_tx_wlan_idx = bss_info->bmc_tx_wlan_idx;
+
+ memcpy(skb_put(skb, sizeof(tlv)), &tlv, sizeof(tlv));
+}
+
+static void bss_info_ext_bss_handler (struct mt7615_dev *dev,
+ struct bss_info *bss_info,
+ struct sk_buff *skb)
+{
+/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
+#define BCN_TX_ESTIMATE_TIME (4096 + 20)
+ struct bss_info_ext_bss tlv = {0};
+ int ext_bss_idx;
+
+ ext_bss_idx = bss_info->omac_idx - EXT_BSSID_START;
+
+ if (ext_bss_idx < 0)
+ return;
+
+ tlv.tag = cpu_to_le16(BSS_INFO_EXT_BSS);
+ tlv.len = cpu_to_le16(sizeof(tlv));
+ tlv.mbss_tsf_offset = ext_bss_idx * BCN_TX_ESTIMATE_TIME;
+
+ memcpy(skb_put(skb, sizeof(tlv)), &tlv, sizeof(tlv));
+}
+
+static struct bss_info_tag_handler bss_info_tag_handler[] = {
+ {BSS_INFO_OMAC, sizeof(struct bss_info_omac), bss_info_omac_handler},
+ {BSS_INFO_BASIC, sizeof(struct bss_info_basic), bss_info_basic_handler},
+ {BSS_INFO_RF_CH, sizeof(struct bss_info_rf_ch), NULL},
+ {BSS_INFO_PM, 0, NULL},
+ {BSS_INFO_UAPSD, 0, NULL},
+ {BSS_INFO_ROAM_DETECTION, 0, NULL},
+ {BSS_INFO_LQ_RM, 0, NULL},
+ {BSS_INFO_EXT_BSS, sizeof(struct bss_info_ext_bss), bss_info_ext_bss_handler},
+ {BSS_INFO_BMC_INFO, 0, NULL},
+ {BSS_INFO_SYNC_MODE, 0, NULL},
+ {BSS_INFO_RA, 0, NULL},
+ {BSS_INFO_MAX_NUM, 0, NULL},
+};
+
+static int __mt7615_mcu_set_bss_info(struct mt7615_dev *dev,
+ struct bss_info *bss_info)
+{
+ struct req_hdr {
+ u8 bss_idx;
+ u8 rsv0;
+ __le16 tlv_num;
+ u8 is_tlv_append;
+ u8 rsv1[3];
+ } __packed req_hdr = {0};
+ struct sk_buff *skb;
+ u16 tlv_num = 0;
+ u32 size = 0;
+ int i;
+
+ for (i = 0; i < BSS_INFO_MAX_NUM; i++)
+ if ((BIT(bss_info_tag_handler[i].tag) & bss_info->feature) &&
+ bss_info_tag_handler[i].handler) {
+ tlv_num++;
+ size += bss_info_tag_handler[i].len;
+ }
+
+ skb = mt7615_mcu_msg_alloc(NULL, sizeof(req_hdr) + size);
+
+ req_hdr.bss_idx = bss_info->bss_idx;
+ req_hdr.tlv_num = cpu_to_le16(tlv_num);
+ req_hdr.is_tlv_append = tlv_num ? 1 : 0;
+
+ memcpy(skb_put(skb, sizeof(req_hdr)), &req_hdr, sizeof(req_hdr));
+
+ for (i = 0; i < BSS_INFO_MAX_NUM; i++)
+ if ((BIT(bss_info_tag_handler[i].tag) & bss_info->feature) &&
+ bss_info_tag_handler[i].handler)
+ bss_info_tag_handler[i].handler(dev, bss_info, skb);
+
+ return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_BSS_INFO_UPDATE,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+}
+
+static void bss_info_convert_vif_type(enum nl80211_iftype type,
+ u32 *network_type, u32 *conn_type)
+{
+ switch (type) {
+ case NL80211_IFTYPE_AP:
+ if (network_type)
+ *network_type = NETWORK_INFRA;
+ if (conn_type)
+ *conn_type = CONNECTION_INFRA_AP;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (network_type)
+ *network_type = NETWORK_INFRA;
+ if (conn_type)
+ *conn_type = CONNECTION_INFRA_STA;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ };
+}
+
+int mt7615_mcu_set_bss_info(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ int en)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct bss_info bss_info = {0};
+ u8 bmc_tx_wlan_idx = 0;
+ u32 network_type = 0, conn_type = 0;
+
+ if (vif->type == NL80211_IFTYPE_AP) {
+ bmc_tx_wlan_idx = mvif->sta.wcid.idx;
+ } else if (vif->type == NL80211_IFTYPE_STATION) {
+ /* find the unicast entry for sta mode bmc tx */
+ struct ieee80211_sta *ap_sta;
+ struct mt7615_sta *msta;
+
+ rcu_read_lock();
+
+ ap_sta = ieee80211_find_sta(vif, vif->bss_conf.bssid);
+ if (!ap_sta) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ msta = (struct mt7615_sta *)ap_sta->drv_priv;
+ bmc_tx_wlan_idx = msta->wcid.idx;
+
+ rcu_read_unlock();
+ } else {
+ WARN_ON(1);
+ }
+
+ bss_info_convert_vif_type(vif->type, &network_type, &conn_type);
+
+ bss_info.bss_idx = mvif->idx;
+ memcpy(bss_info.bssid, vif->bss_conf.bssid, ETH_ALEN);
+ bss_info.omac_idx = mvif->omac_idx;
+ bss_info.band_idx = mvif->band_idx;
+ bss_info.bmc_tx_wlan_idx = bmc_tx_wlan_idx;
+ bss_info.wmm_idx = mvif->wmm_idx;
+ bss_info.network_type = network_type;
+ bss_info.conn_type = conn_type;
+ bss_info.bcn_interval = vif->bss_conf.beacon_int;
+ bss_info.dtim_period = vif->bss_conf.dtim_period;
+ bss_info.enable = en;
+ bss_info.feature = BIT(BSS_INFO_BASIC);
+ if (en) {
+ bss_info.feature |= BIT(BSS_INFO_OMAC);
+ if (mvif->omac_idx > EXT_BSSID_START)
+ bss_info.feature |= BIT(BSS_INFO_EXT_BSS);
+ }
+
+ return __mt7615_mcu_set_bss_info(dev, &bss_info);
+}
+
+static int __mt7615_mcu_set_wtbl(struct mt7615_dev *dev, int wlan_idx,
+ int operation, void *buf, int buf_len)
+{
+ struct req_hdr {
+ u8 wlan_idx;
+ u8 operation;
+ __le16 tlv_num;
+ u8 rsv[4];
+ } __packed req_hdr = {0};
+ struct tlv {
+ __le16 tag;
+ __le16 len;
+ u8 buf[0];
+ } __packed;
+ struct sk_buff *skb;
+ u16 tlv_num = 0;
+ int offset = 0;
+
+ while (offset < buf_len) {
+ struct tlv *tlv = (struct tlv *)((u8 *)buf + offset);
+
+ tlv_num++;
+ offset += tlv->len;
+ }
+
+ skb = mt7615_mcu_msg_alloc(NULL, sizeof(req_hdr) + buf_len);
+
+ req_hdr.wlan_idx = wlan_idx;
+ req_hdr.operation = operation;
+ req_hdr.tlv_num = cpu_to_le16(tlv_num);
+
+ memcpy(skb_put(skb, sizeof(req_hdr)), &req_hdr, sizeof(req_hdr));
+
+ if (buf && buf_len)
+ memcpy(skb_put(skb, buf_len), buf, buf_len);
+
+ return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_WTBL_UPDATE,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+}
+
+static enum mt7615_cipher_type
+mt7615_get_key_info(struct ieee80211_key_conf *key, u8 *key_data)
+{
+ if (!key || key->keylen > 32)
+ return MT_CIPHER_NONE;
+
+ memcpy(key_data, key->key, key->keylen);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return MT_CIPHER_WEP40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return MT_CIPHER_WEP104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ /* Rx/Tx MIC keys are swapped */
+ memcpy(key_data + 16, key->key + 24, 8);
+ memcpy(key_data + 24, key->key + 16, 8);
+ return MT_CIPHER_TKIP;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return MT_CIPHER_AES_CCMP;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return MT_CIPHER_CCMP_256;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return MT_CIPHER_GCMP;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return MT_CIPHER_GCMP_256;
+ case WLAN_CIPHER_SUITE_SMS4:
+ return MT_CIPHER_WAPI;
+ default:
+ return MT_CIPHER_NONE;
+ }
+}
+
+int mt7615_mcu_set_wtbl_key(struct mt7615_dev *dev, int wcid,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd)
+{
+ struct wtbl_sec_key wtbl_sec_key = {0};
+ int buf_len = sizeof(struct wtbl_sec_key);
+ u8 cipher;
+
+ wtbl_sec_key.tag = cpu_to_le16(WTBL_SEC_KEY);
+ wtbl_sec_key.len = cpu_to_le16(buf_len);
+ wtbl_sec_key.add = cmd;
+
+ if (cmd == SET_KEY) {
+ cipher = mt7615_get_key_info(key, wtbl_sec_key.key_material);
+ if (cipher == MT_CIPHER_NONE && key)
+ return -EOPNOTSUPP;
+
+ wtbl_sec_key.cipher_id = cipher;
+ wtbl_sec_key.key_id = key->keyidx;
+ wtbl_sec_key.key_len = key->keylen;
+ } else {
+ wtbl_sec_key.key_len = sizeof(wtbl_sec_key.key_material);
+ }
+
+ return __mt7615_mcu_set_wtbl(dev, wcid, WTBL_SET, &wtbl_sec_key,
+ buf_len);
+}
+
+int mt7615_mcu_add_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct wtbl_generic *wtbl_generic;
+ struct wtbl_rx *wtbl_rx;
+ int buf_len, ret;
+ u8 *buf;
+
+ buf = kzalloc(MT7615_WTBL_UPDATE_MAX_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ wtbl_generic = (struct wtbl_generic *)buf;
+ buf_len = sizeof(*wtbl_generic);
+ wtbl_generic->tag = cpu_to_le16(WTBL_GENERIC);
+ wtbl_generic->len = cpu_to_le16(buf_len);
+ eth_broadcast_addr(wtbl_generic->peer_addr);
+ wtbl_generic->muar_idx = 0xe;
+
+ wtbl_rx = (struct wtbl_rx *)(buf + buf_len);
+ buf_len += sizeof(*wtbl_rx);
+ wtbl_rx->tag = cpu_to_le16(WTBL_RX);
+ wtbl_rx->len = cpu_to_le16(sizeof(*wtbl_rx));
+ wtbl_rx->rca1 = 1;
+ wtbl_rx->rca2 = 1;
+ wtbl_rx->rv = 1;
+
+ ret = __mt7615_mcu_set_wtbl(dev, mvif->sta.wcid.idx,
+ WTBL_RESET_AND_SET, buf, buf_len);
+
+ kfree(buf);
+ return ret;
+}
+
+int mt7615_mcu_del_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+
+ return __mt7615_mcu_set_wtbl(dev, mvif->sta.wcid.idx,
+ WTBL_RESET_AND_SET, NULL, 0);
+}
+
+int mt7615_mcu_add_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ struct wtbl_generic *wtbl_generic;
+ struct wtbl_rx *wtbl_rx;
+ int buf_len, ret;
+ u8 *buf;
+
+ buf = kzalloc(MT7615_WTBL_UPDATE_MAX_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ wtbl_generic = (struct wtbl_generic *)buf;
+ buf_len = sizeof(*wtbl_generic);
+ wtbl_generic->tag = cpu_to_le16(WTBL_GENERIC);
+ wtbl_generic->len = cpu_to_le16(buf_len);
+ memcpy(wtbl_generic->peer_addr, sta->addr, ETH_ALEN);
+ wtbl_generic->muar_idx = mvif->omac_idx;
+ wtbl_generic->qos = sta->wme;
+ wtbl_generic->partial_aid = cpu_to_le16(sta->aid);
+
+ wtbl_rx = (struct wtbl_rx *)(buf + buf_len);
+ buf_len += sizeof(*wtbl_rx);
+ wtbl_rx->tag = cpu_to_le16(WTBL_RX);
+ wtbl_rx->len = cpu_to_le16(sizeof(*wtbl_rx));
+ wtbl_rx->rca1 = (vif->type == NL80211_IFTYPE_AP) ? 0 : 1;
+ wtbl_rx->rca2 = 1;
+ wtbl_rx->rv = 1;
+
+ ret = __mt7615_mcu_set_wtbl(dev, msta->wcid.idx,
+ WTBL_RESET_AND_SET, buf, buf_len);
+
+ kfree(buf);
+ return ret;
+}
+
+int mt7615_mcu_del_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+
+ return __mt7615_mcu_set_wtbl(dev, msta->wcid.idx,
+ WTBL_RESET_AND_SET, NULL, 0);
+}
+
+int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev)
+{
+ return __mt7615_mcu_set_wtbl(dev, 0, WTBL_RESET_ALL, NULL, 0);
+}
+
+static int __mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, int bss_idx,
+ int wlan_idx, int muar_idx, void *buf,
+ int buf_len)
+{
+ struct req_hdr {
+ u8 bss_idx;
+ u8 wlan_idx;
+ __le16 tlv_num;
+ u8 is_tlv_append;
+ u8 muar_idx;
+ u8 rsv[2];
+ } __packed req_hdr = {0};
+ struct tlv {
+ __le16 tag;
+ __le16 len;
+ u8 buf[0];
+ } __packed;
+ struct sk_buff *skb;
+ u16 tlv_num = 0;
+ int offset = 0;
+
+ while (offset < buf_len) {
+ struct tlv *tlv = (struct tlv *)((u8 *)buf + offset);
+
+ tlv_num++;
+ offset += tlv->len;
+ }
+
+ skb = mt7615_mcu_msg_alloc(NULL, sizeof(req_hdr) + buf_len);
+
+ req_hdr.bss_idx = bss_idx;
+ req_hdr.wlan_idx = wlan_idx;
+ req_hdr.tlv_num = cpu_to_le16(tlv_num);
+ req_hdr.is_tlv_append = tlv_num ? 1 : 0;
+ req_hdr.muar_idx = muar_idx;
+
+ memcpy(skb_put(skb, sizeof(req_hdr)), &req_hdr, sizeof(req_hdr));
+
+ if (buf && buf_len)
+ memcpy(skb_put(skb, buf_len), buf, buf_len);
+
+ return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_STA_REC_UPDATE,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+}
+
+int mt7615_mcu_set_sta_rec_bmc(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif, bool en)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct sta_rec_basic sta_rec_basic = {0};
+ int buf_len = sizeof(struct sta_rec_basic);
+
+ sta_rec_basic.tag = cpu_to_le16(STA_REC_BASIC);
+ sta_rec_basic.len = cpu_to_le16(buf_len);
+ sta_rec_basic.conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
+ eth_broadcast_addr(sta_rec_basic.peer_addr);
+ if (en) {
+ sta_rec_basic.conn_state = CONN_STATE_PORT_SECURE;
+ sta_rec_basic.extra_info =
+ cpu_to_le16(EXTRA_INFO_VER | EXTRA_INFO_NEW);
+ } else {
+ sta_rec_basic.conn_state = CONN_STATE_DISCONNECT;
+ sta_rec_basic.extra_info = cpu_to_le16(EXTRA_INFO_VER);
+ }
+
+ return __mt7615_mcu_set_sta_rec(dev, mvif->idx, mvif->sta.wcid.idx,
+ mvif->omac_idx, &sta_rec_basic,
+ buf_len);
+}
+
+static void sta_rec_convert_vif_type(enum nl80211_iftype type, u32 *conn_type)
+{
+ switch (type) {
+ case NL80211_IFTYPE_AP:
+ if (conn_type)
+ *conn_type = CONNECTION_INFRA_STA;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (conn_type)
+ *conn_type = CONNECTION_INFRA_AP;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ };
+}
+
+int mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool en)
+{
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ struct sta_rec_basic sta_rec_basic = {0};
+ int buf_len = sizeof(struct sta_rec_basic);
+ u32 conn_type = 0;
+
+ sta_rec_convert_vif_type(vif->type, &conn_type);
+
+ sta_rec_basic.tag = cpu_to_le16(STA_REC_BASIC);
+ sta_rec_basic.len = cpu_to_le16(buf_len);
+ sta_rec_basic.conn_type = cpu_to_le32(conn_type);
+ sta_rec_basic.qos = sta->wme;
+ sta_rec_basic.aid = cpu_to_le16(sta->aid);
+ memcpy(sta_rec_basic.peer_addr, sta->addr, ETH_ALEN);
+
+ if (en) {
+ sta_rec_basic.conn_state = CONN_STATE_PORT_SECURE;
+ sta_rec_basic.extra_info =
+ cpu_to_le16(EXTRA_INFO_VER | EXTRA_INFO_NEW);
+ } else {
+ sta_rec_basic.conn_state = CONN_STATE_DISCONNECT;
+ sta_rec_basic.extra_info = cpu_to_le16(EXTRA_INFO_VER);
+ }
+
+ return __mt7615_mcu_set_sta_rec(dev, mvif->idx, msta->wcid.idx,
+ mvif->omac_idx, &sta_rec_basic,
+ buf_len);
+}
+
+int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ int en)
+{
+ struct req {
+ u8 omac_idx;
+ u8 enable;
+ u8 wlan_idx;
+ u8 band_idx;
+ u8 pkt_type;
+ u8 need_pre_tbtt_int;
+ __le16 csa_ie_pos;
+ __le16 pkt_len;
+ __le16 tim_ie_pos;
+ u8 pkt[512];
+ u8 csa_cnt;
+ /* bss color change */
+ u8 bcc_cnt;
+ __le16 bcc_ie_pos;
+ } __packed req = {0};
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ struct sk_buff *skb;
+ u16 tim_off, tim_len;
+
+ skb = ieee80211_beacon_get_tim(mt76_hw(dev), vif, &tim_off, &tim_len);
+
+ if (!skb)
+ return -EINVAL;
+
+ if (skb->len > 512 - MT_TXD_SIZE) {
+ dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ mt7615_mac_write_txwi(dev, (__le32 *)(req.pkt), skb, wcid, NULL,
+ 0, NULL);
+ memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len);
+ dev_kfree_skb(skb);
+
+ req.omac_idx = mvif->omac_idx;
+ req.enable = en;
+ req.wlan_idx = wcid->idx;
+ req.band_idx = mvif->band_idx;
+ /* pky_type: 0 for bcn, 1 for tim */
+ req.pkt_type = 0;
+ req.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+ req.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + tim_off);
+
+ skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+
+ return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_BCN_OFFLOAD,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+}
+
+int mt7615_mcu_set_channel(struct mt7615_dev *dev)
+{
+ struct cfg80211_chan_def *chdef = &dev->mt76.chandef;
+ struct {
+ u8 control_chan;
+ u8 center_chan;
+ u8 bw;
+ u8 tx_streams;
+ u8 rx_streams_mask;
+ u8 switch_reason;
+ u8 band_idx;
+ /* for 80+80 only */
+ u8 center_chan2;
+ __le16 cac_case;
+ u8 channel_band;
+ u8 rsv0;
+ __le32 outband_freq;
+ u8 txpower_drop;
+ u8 rsv1[3];
+ u8 txpower_sku[53];
+ u8 rsv2[3];
+ } req = {0};
+ struct sk_buff *skb;
+ int ret;
+
+ req.control_chan = chdef->chan->hw_value;
+ req.center_chan = ieee80211_frequency_to_channel(chdef->center_freq1);
+ req.tx_streams = (dev->mt76.chainmask >> 8) & 0xf;
+ req.rx_streams_mask = dev->mt76.antenna_mask;
+ req.switch_reason = CH_SWITCH_NORMAL;
+ req.band_idx = 0;
+ req.center_chan2 = ieee80211_frequency_to_channel(chdef->center_freq2);
+ req.txpower_drop = 0;
+
+ switch (dev->mt76.chandef.width) {
+ case NL80211_CHAN_WIDTH_40:
+ req.bw = CMD_CBW_40MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ req.bw = CMD_CBW_80MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ req.bw = CMD_CBW_8080MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ req.bw = CMD_CBW_160MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_5:
+ req.bw = CMD_CBW_5MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ req.bw = CMD_CBW_10MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ default:
+ req.bw = CMD_CBW_20MHZ;
+ }
+
+ memset(req.txpower_sku, 0x3f, 49);
+
+ skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+ ret = mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_CHANNEL_SWITCH,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+ if (ret)
+ return ret;
+
+ skb = mt7615_mcu_msg_alloc(&req, sizeof(req));
+ return mt7615_mcu_msg_send(dev, skb, MCU_EXT_CMD_SET_RX_PATH,
+ MCU_Q_SET, MCU_S2D_H2N, NULL);
+}
+
+int mt7615_mcu_set_ht_cap(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
+ struct wtbl_ht *wtbl_ht;
+ struct wtbl_raw *wtbl_raw;
+ struct sta_rec_ht *sta_rec_ht;
+ int buf_len, ret;
+ u32 msk, val = 0;
+ u8 *buf;
+
+ buf = kzalloc(MT7615_WTBL_UPDATE_MAX_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* ht basic */
+ buf_len = sizeof(*wtbl_ht);
+ wtbl_ht = (struct wtbl_ht *)buf;
+ wtbl_ht->tag = cpu_to_le16(WTBL_HT);
+ wtbl_ht->len = cpu_to_le16(sizeof(*wtbl_ht));
+ wtbl_ht->ht = 1;
+ wtbl_ht->ldpc = sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING;
+ wtbl_ht->af = sta->ht_cap.ampdu_factor;
+ wtbl_ht->mm = sta->ht_cap.ampdu_density;
+
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+ val |= MT_WTBL_W5_SHORT_GI_20;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ val |= MT_WTBL_W5_SHORT_GI_40;
+
+ /* vht basic */
+ if (sta->vht_cap.vht_supported) {
+ struct wtbl_vht *wtbl_vht;
+
+ wtbl_vht = (struct wtbl_vht *)(buf + buf_len);
+ buf_len += sizeof(*wtbl_vht);
+ wtbl_vht->tag = cpu_to_le16(WTBL_VHT);
+ wtbl_vht->len = cpu_to_le16(sizeof(*wtbl_vht));
+ wtbl_vht->ldpc = sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC;
+ wtbl_vht->vht = 1;
+
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ val |= MT_WTBL_W5_SHORT_GI_80;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+ val |= MT_WTBL_W5_SHORT_GI_160;
+ }
+
+ /* smps */
+ if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC) {
+ struct wtbl_smps *wtbl_smps;
+
+ wtbl_smps = (struct wtbl_smps *)(buf + buf_len);
+ buf_len += sizeof(*wtbl_smps);
+ wtbl_smps->tag = cpu_to_le16(WTBL_SMPS);
+ wtbl_smps->len = cpu_to_le16(sizeof(*wtbl_smps));
+ wtbl_smps->smps = 1;
+ }
+
+ /* sgi */
+ msk = MT_WTBL_W5_SHORT_GI_20 | MT_WTBL_W5_SHORT_GI_40 |
+ MT_WTBL_W5_SHORT_GI_80 | MT_WTBL_W5_SHORT_GI_160;
+
+ wtbl_raw = (struct wtbl_raw *)(buf + buf_len);
+ buf_len += sizeof(*wtbl_raw);
+ wtbl_raw->tag = cpu_to_le16(WTBL_RAW_DATA);
+ wtbl_raw->len = cpu_to_le16(sizeof(*wtbl_raw));
+ wtbl_raw->wtbl_idx = 1;
+ wtbl_raw->dw = 5;
+ wtbl_raw->msk = cpu_to_le32(~msk);
+ wtbl_raw->val = cpu_to_le32(val);
+
+ ret = __mt7615_mcu_set_wtbl(dev, msta->wcid.idx, WTBL_SET, buf,
+ buf_len);
+ if (ret) {
+ kfree(buf);
+ return ret;
+ }
+
+ memset(buf, 0, MT7615_WTBL_UPDATE_MAX_SIZE);
+
+ buf_len = sizeof(*sta_rec_ht);
+ sta_rec_ht = (struct sta_rec_ht *)buf;
+ sta_rec_ht->tag = cpu_to_le16(STA_REC_HT);
+ sta_rec_ht->len = cpu_to_le16(sizeof(*sta_rec_ht));
+ sta_rec_ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
+
+ if (sta->vht_cap.vht_supported) {
+ struct sta_rec_vht *sta_rec_vht;
+
+ sta_rec_vht = (struct sta_rec_vht *)(buf + buf_len);
+ buf_len += sizeof(*sta_rec_vht);
+ sta_rec_vht->tag = cpu_to_le16(STA_REC_VHT);
+ sta_rec_vht->len = cpu_to_le16(sizeof(*sta_rec_vht));
+ sta_rec_vht->vht_cap = cpu_to_le32(sta->vht_cap.cap);
+ sta_rec_vht->vht_rx_mcs_map =
+ cpu_to_le16(sta->vht_cap.vht_mcs.rx_mcs_map);
+ sta_rec_vht->vht_tx_mcs_map =
+ cpu_to_le16(sta->vht_cap.vht_mcs.tx_mcs_map);
+ }
+
+ ret = __mt7615_mcu_set_sta_rec(dev, mvif->idx, msta->wcid.idx,
+ mvif->omac_idx, buf, buf_len);
+ kfree(buf);
+ return ret;
+}
+
+int mt7615_mcu_set_tx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool add)
+{
+ struct ieee80211_sta *sta = params->sta;
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ struct mt7615_vif *mvif = msta->vif;
+ u8 ba_range[8] = {4, 8, 12, 24, 36, 48, 54, 64};
+ u16 tid = params->tid;
+ u16 ba_size = params->buf_size;
+ u16 ssn = params->ssn;
+ struct wtbl_ba wtbl_ba = {0};
+ struct sta_rec_ba sta_rec_ba = {0};
+ int ret, buf_len;
+
+ buf_len = sizeof(struct wtbl_ba);
+
+ wtbl_ba.tag = cpu_to_le16(WTBL_BA);
+ wtbl_ba.len = cpu_to_le16(buf_len);
+ wtbl_ba.tid = tid;
+ wtbl_ba.ba_type = MT_BA_TYPE_ORIGINATOR;
+
+ if (add) {
+ u8 idx;
+
+ for (idx = 7; idx > 0; idx--) {
+ if (ba_size >= ba_range[idx])
+ break;
+ }
+
+ wtbl_ba.sn = cpu_to_le16(ssn);
+ wtbl_ba.ba_en = 1;
+ wtbl_ba.ba_winsize_idx = idx;
+ }
+
+ ret = __mt7615_mcu_set_wtbl(dev, msta->wcid.idx, WTBL_SET, &wtbl_ba,
+ buf_len);
+ if (ret)
+ return ret;
+
+ buf_len = sizeof(struct sta_rec_ba);
+
+ sta_rec_ba.tag = cpu_to_le16(STA_REC_BA);
+ sta_rec_ba.len = cpu_to_le16(buf_len);
+ sta_rec_ba.tid = tid;
+ sta_rec_ba.ba_type = MT_BA_TYPE_ORIGINATOR;
+ sta_rec_ba.amsdu = params->amsdu;
+ sta_rec_ba.ba_en = add << tid;
+ sta_rec_ba.ssn = cpu_to_le16(ssn);
+ sta_rec_ba.winsize = cpu_to_le16(ba_size);
+
+ return __mt7615_mcu_set_sta_rec(dev, mvif->idx, msta->wcid.idx,
+ mvif->omac_idx, &sta_rec_ba, buf_len);
+}
+
+int mt7615_mcu_set_rx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool add)
+{
+ struct ieee80211_sta *sta = params->sta;
+ struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv;
+ struct mt7615_vif *mvif = msta->vif;
+ u16 tid = params->tid;
+ struct wtbl_ba wtbl_ba = {0};
+ struct sta_rec_ba sta_rec_ba = {0};
+ int ret, buf_len;
+
+ buf_len = sizeof(struct sta_rec_ba);
+
+ sta_rec_ba.tag = cpu_to_le16(STA_REC_BA);
+ sta_rec_ba.len = cpu_to_le16(buf_len);
+ sta_rec_ba.tid = tid;
+ sta_rec_ba.ba_type = MT_BA_TYPE_RECIPIENT;
+ sta_rec_ba.amsdu = params->amsdu;
+ sta_rec_ba.ba_en = add << tid;
+ sta_rec_ba.ssn = cpu_to_le16(params->ssn);
+ sta_rec_ba.winsize = cpu_to_le16(params->buf_size);
+
+ ret = __mt7615_mcu_set_sta_rec(dev, mvif->idx, msta->wcid.idx,
+ mvif->omac_idx, &sta_rec_ba, buf_len);
+ if (ret || !add)
+ return ret;
+
+ buf_len = sizeof(struct wtbl_ba);
+
+ wtbl_ba.tag = cpu_to_le16(WTBL_BA);
+ wtbl_ba.len = cpu_to_le16(buf_len);
+ wtbl_ba.tid = tid;
+ wtbl_ba.ba_type = MT_BA_TYPE_RECIPIENT;
+ memcpy(wtbl_ba.peer_addr, sta->addr, ETH_ALEN);
+ wtbl_ba.rst_ba_tid = tid;
+ wtbl_ba.rst_ba_sel = RST_BA_MAC_TID_MATCH;
+ wtbl_ba.rst_ba_sb = 1;
+
+ return __mt7615_mcu_set_wtbl(dev, msta->wcid.idx, WTBL_SET,
+ &wtbl_ba, buf_len);
+}
+
+void mt7615_mcu_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
+ struct ieee80211_tx_rate *probe_rate,
+ struct ieee80211_tx_rate *rates)
+{
+ int wcid = sta->wcid.idx;
+ u32 addr = MT_WTBL_BASE + wcid * MT_WTBL_ENTRY_SIZE;
+ bool stbc = false;
+ int n_rates = sta->n_rates;
+ u8 bw, bw_prev, bw_idx = 0;
+ u16 val[4];
+ u16 probe_val;
+ u32 w5, w27;
+ int i;
+
+ if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
+ return;
+
+ for (i = n_rates; i < 4; i++)
+ rates[i] = rates[n_rates - 1];
+
+ val[0] = mt7615_mac_tx_rate_val(dev, &rates[0], stbc, &bw);
+ bw_prev = bw;
+
+ if (probe_rate) {
+ probe_val = mt7615_mac_tx_rate_val(dev, probe_rate, stbc, &bw);
+ if (bw)
+ bw_idx = 1;
+ else
+ bw_prev = 0;
+ } else {
+ probe_val = val[0];
+ }
+
+ val[1] = mt7615_mac_tx_rate_val(dev, &rates[1], stbc, &bw);
+ if (bw_prev) {
+ bw_idx = 3;
+ bw_prev = bw;
+ }
+
+ val[2] = mt7615_mac_tx_rate_val(dev, &rates[2], stbc, &bw);
+ if (bw_prev) {
+ bw_idx = 5;
+ bw_prev = bw;
+ }
+
+ val[3] = mt7615_mac_tx_rate_val(dev, &rates[3], stbc, &bw);
+ if (bw_prev)
+ bw_idx = 7;
+
+ w27 = mt76_rr(dev, addr + 27 * 4);
+ w27 &= ~MT_WTBL_W27_CC_BW_SEL;
+ w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, bw);
+
+ w5 = mt76_rr(dev, addr + 5 * 4);
+ w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE);
+ w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, bw) |
+ FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE, bw_idx ? bw_idx - 1 : 7);
+
+ mt76_wr(dev, MT_WTBL_RIUCR0, w5);
+
+ mt76_wr(dev, MT_WTBL_RIUCR1,
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) |
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) |
+ FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[0]));
+
+ mt76_wr(dev, MT_WTBL_RIUCR2,
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[0] >> 8) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[1]) |
+ FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2]));
+
+ mt76_wr(dev, MT_WTBL_RIUCR3,
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) |
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[2]) |
+ FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3]));
+
+ mt76_wr(dev, MT_WTBL_UPDATE,
+ FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) |
+ MT_WTBL_UPDATE_RATE_UPDATE |
+ MT_WTBL_UPDATE_TX_COUNT_CLEAR);
+
+ mt76_wr(dev, addr + 27 * 4, w27);
+
+ if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET))
+ mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
+
+ sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates;
+ sta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
new file mode 100644
index 000000000000..9455f8fa475d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
@@ -0,0 +1,520 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2019 MediaTek Inc. */
+
+#ifndef __MT7615_MCU_H
+#define __MT7615_MCU_H
+
+struct mt7615_mcu_txd {
+ __le32 txd[8];
+
+ __le16 len;
+ __le16 pq_id;
+
+ u8 cid;
+ u8 pkt_type;
+ u8 set_query; /* FW don't care */
+ u8 seq;
+
+ u8 uc_d2b0_rev;
+ u8 ext_cid;
+ u8 s2d_index;
+ u8 ext_cid_ack;
+
+ u32 reserved[5];
+} __packed __aligned(4);
+
+struct mt7615_mcu_rxd {
+ __le32 rxd[4];
+
+ __le16 len;
+ __le16 pkt_type_id;
+
+ u8 eid;
+ u8 seq;
+ __le16 __rsv;
+
+ u8 ext_eid;
+ u8 __rsv1[2];
+ u8 s2d_index;
+};
+
+#define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10))
+#define MCU_PKT_ID 0xa0
+
+enum {
+ MCU_Q_QUERY,
+ MCU_Q_SET,
+ MCU_Q_RESERVED,
+ MCU_Q_NA
+};
+
+enum {
+ MCU_S2D_H2N,
+ MCU_S2D_C2N,
+ MCU_S2D_H2C,
+ MCU_S2D_H2CN
+};
+
+enum {
+ MCU_CMD_TARGET_ADDRESS_LEN_REQ = 0x01,
+ MCU_CMD_FW_START_REQ = 0x02,
+ MCU_CMD_INIT_ACCESS_REG = 0x3,
+ MCU_CMD_PATCH_START_REQ = 0x05,
+ MCU_CMD_PATCH_FINISH_REQ = 0x07,
+ MCU_CMD_PATCH_SEM_CONTROL = 0x10,
+ MCU_CMD_EXT_CID = 0xED,
+ MCU_CMD_FW_SCATTER = 0xEE,
+ MCU_CMD_RESTART_DL_REQ = 0xEF,
+};
+
+enum {
+ MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
+ MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
+ MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
+ MCU_EXT_CMD_STA_REC_UPDATE = 0x25,
+ MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26,
+ MCU_EXT_CMD_EDCA_UPDATE = 0x27,
+ MCU_EXT_CMD_DEV_INFO_UPDATE = 0x2A,
+ MCU_EXT_CMD_WTBL_UPDATE = 0x32,
+ MCU_EXT_CMD_PROTECT_CTRL = 0x3e,
+ MCU_EXT_CMD_MAC_INIT_CTRL = 0x46,
+ MCU_EXT_CMD_BCN_OFFLOAD = 0x49,
+ MCU_EXT_CMD_SET_RX_PATH = 0x4e,
+};
+
+enum {
+ PATCH_SEM_RELEASE = 0x0,
+ PATCH_SEM_GET = 0x1
+};
+
+enum {
+ PATCH_NOT_DL_SEM_FAIL = 0x0,
+ PATCH_IS_DL = 0x1,
+ PATCH_NOT_DL_SEM_SUCCESS = 0x2,
+ PATCH_REL_SEM_SUCCESS = 0x3
+};
+
+enum {
+ FW_STATE_INITIAL = 0,
+ FW_STATE_FW_DOWNLOAD = 1,
+ FW_STATE_NORMAL_OPERATION = 2,
+ FW_STATE_NORMAL_TRX = 3,
+ FW_STATE_CR4_RDY = 7
+};
+
+#define STA_TYPE_STA BIT(0)
+#define STA_TYPE_AP BIT(1)
+#define STA_TYPE_ADHOC BIT(2)
+#define STA_TYPE_TDLS BIT(3)
+#define STA_TYPE_WDS BIT(4)
+#define STA_TYPE_BC BIT(5)
+
+#define NETWORK_INFRA BIT(16)
+#define NETWORK_P2P BIT(17)
+#define NETWORK_IBSS BIT(18)
+#define NETWORK_MESH BIT(19)
+#define NETWORK_BOW BIT(20)
+#define NETWORK_WDS BIT(21)
+
+#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA)
+#define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA)
+#define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P)
+#define CONNECTION_P2P_GO (STA_TYPE_AP | NETWORK_P2P)
+#define CONNECTION_MESH_STA (STA_TYPE_STA | NETWORK_MESH)
+#define CONNECTION_MESH_AP (STA_TYPE_AP | NETWORK_MESH)
+#define CONNECTION_IBSS_ADHOC (STA_TYPE_ADHOC | NETWORK_IBSS)
+#define CONNECTION_TDLS (STA_TYPE_STA | NETWORK_INFRA | STA_TYPE_TDLS)
+#define CONNECTION_WDS (STA_TYPE_WDS | NETWORK_WDS)
+#define CONNECTION_INFRA_BC (STA_TYPE_BC | NETWORK_INFRA)
+
+#define CONN_STATE_DISCONNECT 0
+#define CONN_STATE_CONNECT 1
+#define CONN_STATE_PORT_SECURE 2
+
+struct dev_info {
+ u8 omac_idx;
+ u8 omac_addr[ETH_ALEN];
+ u8 band_idx;
+ u8 enable;
+ u32 feature;
+};
+
+enum {
+ DEV_INFO_ACTIVE,
+ DEV_INFO_MAX_NUM
+};
+
+struct bss_info {
+ u8 bss_idx;
+ u8 bssid[ETH_ALEN];
+ u8 omac_idx;
+ u8 band_idx;
+ u8 bmc_tx_wlan_idx; /* for bmc tx (sta mode use uc entry) */
+ u8 wmm_idx;
+ u32 network_type;
+ u32 conn_type;
+ u16 bcn_interval;
+ u8 dtim_period;
+ u8 enable;
+ u32 feature;
+};
+
+struct bss_info_tag_handler {
+ u32 tag;
+ u32 len;
+ void (*handler)(struct mt7615_dev *dev,
+ struct bss_info *bss_info, struct sk_buff *skb);
+};
+
+struct bss_info_omac {
+ __le16 tag;
+ __le16 len;
+ u8 hw_bss_idx;
+ u8 omac_idx;
+ u8 band_idx;
+ u8 rsv0;
+ __le32 conn_type;
+ u32 rsv1;
+} __packed;
+
+struct bss_info_basic {
+ __le16 tag;
+ __le16 len;
+ __le32 network_type;
+ u8 active;
+ u8 rsv0;
+ __le16 bcn_interval;
+ u8 bssid[ETH_ALEN];
+ u8 wmm_idx;
+ u8 dtim_period;
+ u8 bmc_tx_wlan_idx;
+ u8 cipher; /* not used */
+ u8 phymode; /* not used */
+ u8 rsv1[5];
+} __packed;
+
+struct bss_info_rf_ch {
+ __le16 tag;
+ __le16 len;
+ u8 pri_ch;
+ u8 central_ch0;
+ u8 central_ch1;
+ u8 bw;
+} __packed;
+
+struct bss_info_ext_bss {
+ __le16 tag;
+ __le16 len;
+ __le32 mbss_tsf_offset; /* in unit of us */
+ u8 rsv[8];
+} __packed;
+
+enum {
+ BSS_INFO_OMAC,
+ BSS_INFO_BASIC,
+ BSS_INFO_RF_CH, /* optional, for BT/LTE coex */
+ BSS_INFO_PM, /* sta only */
+ BSS_INFO_UAPSD, /* sta only */
+ BSS_INFO_ROAM_DETECTION, /* obsoleted */
+ BSS_INFO_LQ_RM, /* obsoleted */
+ BSS_INFO_EXT_BSS,
+ BSS_INFO_BMC_INFO, /* for bmc rate control in CR4 */
+ BSS_INFO_SYNC_MODE, /* obsoleted */
+ BSS_INFO_RA,
+ BSS_INFO_MAX_NUM
+};
+
+enum {
+ WTBL_RESET_AND_SET = 1,
+ WTBL_SET,
+ WTBL_QUERY,
+ WTBL_RESET_ALL
+};
+
+struct wtbl_generic {
+ __le16 tag;
+ __le16 len;
+ u8 peer_addr[ETH_ALEN];
+ u8 muar_idx;
+ u8 skip_tx;
+ u8 cf_ack;
+ u8 qos;
+ u8 mesh;
+ u8 adm;
+ __le16 partial_aid;
+ u8 baf_en;
+ u8 aad_om;
+} __packed;
+
+struct wtbl_rx {
+ __le16 tag;
+ __le16 len;
+ u8 rcid;
+ u8 rca1;
+ u8 rca2;
+ u8 rv;
+ u8 rsv[4];
+} __packed;
+
+struct wtbl_ht {
+ __le16 tag;
+ __le16 len;
+ u8 ht;
+ u8 ldpc;
+ u8 af;
+ u8 mm;
+ u8 rsv[4];
+} __packed;
+
+struct wtbl_vht {
+ __le16 tag;
+ __le16 len;
+ u8 ldpc;
+ u8 dyn_bw;
+ u8 vht;
+ u8 txop_ps;
+ u8 rsv[4];
+} __packed;
+
+struct wtbl_tx_ps {
+ __le16 tag;
+ __le16 len;
+ u8 txps;
+ u8 rsv[3];
+} __packed;
+
+struct wtbl_hdr_trans {
+ __le16 tag;
+ __le16 len;
+ u8 to_ds;
+ u8 from_ds;
+ u8 disable_rx_trans;
+ u8 rsv;
+} __packed;
+
+enum mt7615_cipher_type {
+ MT_CIPHER_NONE,
+ MT_CIPHER_WEP40,
+ MT_CIPHER_TKIP,
+ MT_CIPHER_TKIP_NO_MIC,
+ MT_CIPHER_AES_CCMP,
+ MT_CIPHER_WEP104,
+ MT_CIPHER_BIP_CMAC_128,
+ MT_CIPHER_WEP128,
+ MT_CIPHER_WAPI,
+ MT_CIPHER_CCMP_256 = 10,
+ MT_CIPHER_GCMP,
+ MT_CIPHER_GCMP_256,
+};
+
+struct wtbl_sec_key {
+ __le16 tag;
+ __le16 len;
+ u8 add; /* 0: add, 1: remove */
+ u8 rkv;
+ u8 ikv;
+ u8 cipher_id;
+ u8 key_id;
+ u8 key_len;
+ u8 rsv[2];
+ u8 key_material[32];
+} __packed;
+
+enum {
+ MT_BA_TYPE_INVALID,
+ MT_BA_TYPE_ORIGINATOR,
+ MT_BA_TYPE_RECIPIENT
+};
+
+enum {
+ RST_BA_MAC_TID_MATCH,
+ RST_BA_MAC_MATCH,
+ RST_BA_NO_MATCH
+};
+
+struct wtbl_ba {
+ __le16 tag;
+ __le16 len;
+ /* common */
+ u8 tid;
+ u8 ba_type;
+ u8 rsv0[2];
+ /* originator only */
+ __le16 sn;
+ u8 ba_en;
+ u8 ba_winsize_idx;
+ __le16 ba_winsize;
+ /* recipient only */
+ u8 peer_addr[ETH_ALEN];
+ u8 rst_ba_tid;
+ u8 rst_ba_sel;
+ u8 rst_ba_sb;
+ u8 band_idx;
+ u8 rsv1[4];
+} __packed;
+
+struct wtbl_bf {
+ __le16 tag;
+ __le16 len;
+ u8 ibf;
+ u8 ebf;
+ u8 ibf_vht;
+ u8 ebf_vht;
+ u8 gid;
+ u8 pfmu_idx;
+ u8 rsv[2];
+} __packed;
+
+struct wtbl_smps {
+ __le16 tag;
+ __le16 len;
+ u8 smps;
+ u8 rsv[3];
+} __packed;
+
+struct wtbl_pn {
+ __le16 tag;
+ __le16 len;
+ u8 pn[6];
+ u8 rsv[2];
+} __packed;
+
+struct wtbl_spe {
+ __le16 tag;
+ __le16 len;
+ u8 spe_idx;
+ u8 rsv[3];
+} __packed;
+
+struct wtbl_raw {
+ __le16 tag;
+ __le16 len;
+ u8 wtbl_idx;
+ u8 dw;
+ u8 rsv[2];
+ __le32 msk;
+ __le32 val;
+} __packed;
+
+#define MT7615_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_generic) + \
+ sizeof(struct wtbl_rx) + \
+ sizeof(struct wtbl_ht) + \
+ sizeof(struct wtbl_vht) + \
+ sizeof(struct wtbl_tx_ps) + \
+ sizeof(struct wtbl_hdr_trans) + \
+ sizeof(struct wtbl_sec_key) + \
+ sizeof(struct wtbl_ba) + \
+ sizeof(struct wtbl_bf) + \
+ sizeof(struct wtbl_smps) + \
+ sizeof(struct wtbl_pn) + \
+ sizeof(struct wtbl_spe))
+
+enum {
+ WTBL_GENERIC,
+ WTBL_RX,
+ WTBL_HT,
+ WTBL_VHT,
+ WTBL_PEER_PS, /* not used */
+ WTBL_TX_PS,
+ WTBL_HDR_TRANS,
+ WTBL_SEC_KEY,
+ WTBL_BA,
+ WTBL_RDG, /* obsoleted */
+ WTBL_PROTECT, /* not used */
+ WTBL_CLEAR, /* not used */
+ WTBL_BF,
+ WTBL_SMPS,
+ WTBL_RAW_DATA, /* debug only */
+ WTBL_PN,
+ WTBL_SPE,
+ WTBL_MAX_NUM
+};
+
+struct sta_rec_basic {
+ __le16 tag;
+ __le16 len;
+ __le32 conn_type;
+ u8 conn_state;
+ u8 qos;
+ __le16 aid;
+ u8 peer_addr[ETH_ALEN];
+#define EXTRA_INFO_VER BIT(0)
+#define EXTRA_INFO_NEW BIT(1)
+ __le16 extra_info;
+} __packed;
+
+struct sta_rec_ht {
+ __le16 tag;
+ __le16 len;
+ __le16 ht_cap;
+ u16 rsv;
+} __packed;
+
+struct sta_rec_vht {
+ __le16 tag;
+ __le16 len;
+ __le32 vht_cap;
+ __le16 vht_rx_mcs_map;
+ __le16 vht_tx_mcs_map;
+} __packed;
+
+struct sta_rec_ba {
+ __le16 tag;
+ __le16 len;
+ u8 tid;
+ u8 ba_type;
+ u8 amsdu;
+ u8 ba_en;
+ __le16 ssn;
+ __le16 winsize;
+} __packed;
+
+#define MT7615_STA_REC_UPDATE_MAX_SIZE (sizeof(struct sta_rec_basic) + \
+ sizeof(struct sta_rec_ht) + \
+ sizeof(struct sta_rec_vht))
+
+enum {
+ STA_REC_BASIC,
+ STA_REC_RA,
+ STA_REC_RA_CMM_INFO,
+ STA_REC_RA_UPDATE,
+ STA_REC_BF,
+ STA_REC_AMSDU, /* for CR4 */
+ STA_REC_BA,
+ STA_REC_RED, /* not used */
+ STA_REC_TX_PROC, /* for hdr trans and CSO in CR4 */
+ STA_REC_HT,
+ STA_REC_VHT,
+ STA_REC_APPS,
+ STA_REC_MAX_NUM
+};
+
+enum {
+ CMD_CBW_20MHZ,
+ CMD_CBW_40MHZ,
+ CMD_CBW_80MHZ,
+ CMD_CBW_160MHZ,
+ CMD_CBW_10MHZ,
+ CMD_CBW_5MHZ,
+ CMD_CBW_8080MHZ
+};
+
+enum {
+ CH_SWITCH_NORMAL = 0,
+ CH_SWITCH_SCAN = 3,
+ CH_SWITCH_MCC = 4,
+ CH_SWITCH_DFS = 5,
+ CH_SWITCH_BACKGROUND_SCAN_START = 6,
+ CH_SWITCH_BACKGROUND_SCAN_RUNNING = 7,
+ CH_SWITCH_BACKGROUND_SCAN_STOP = 8,
+ CH_SWITCH_SCAN_BYPASS_DPD = 9
+};
+
+static inline struct sk_buff *
+mt7615_mcu_msg_alloc(const void *data, int len)
+{
+ return mt76_mcu_msg_alloc(data, sizeof(struct mt7615_mcu_txd),
+ len, 0);
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
new file mode 100644
index 000000000000..895c2904d7eb
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2019 MediaTek Inc. */
+
+#ifndef __MT7615_H
+#define __MT7615_H
+
+#include <linux/interrupt.h>
+#include <linux/ktime.h>
+#include "../mt76.h"
+#include "regs.h"
+
+#define MT7615_MAX_INTERFACES 4
+#define MT7615_WTBL_SIZE 128
+#define MT7615_WTBL_RESERVED (MT7615_WTBL_SIZE - 1)
+#define MT7615_WTBL_STA (MT7615_WTBL_RESERVED - \
+ MT7615_MAX_INTERFACES)
+
+#define MT7615_WATCHDOG_TIME 100 /* ms */
+#define MT7615_RATE_RETRY 2
+
+#define MT7615_TX_RING_SIZE 1024
+#define MT7615_TX_MCU_RING_SIZE 128
+#define MT7615_TX_FWDL_RING_SIZE 128
+
+#define MT7615_RX_RING_SIZE 1024
+#define MT7615_RX_MCU_RING_SIZE 512
+
+#define MT7615_FIRMWARE_CR4 "mt7615_cr4.bin"
+#define MT7615_FIRMWARE_N9 "mt7615_n9.bin"
+#define MT7615_ROM_PATCH "mt7615_rom_patch.bin"
+
+#define MT7615_EEPROM_SIZE 1024
+#define MT7615_TOKEN_SIZE 4096
+
+struct mt7615_vif;
+struct mt7615_sta;
+
+enum mt7615_hw_txq_id {
+ MT7615_TXQ_MAIN,
+ MT7615_TXQ_EXT,
+ MT7615_TXQ_MCU,
+ MT7615_TXQ_FWDL,
+};
+
+struct mt7615_sta {
+ struct mt76_wcid wcid; /* must be first */
+
+ struct mt7615_vif *vif;
+
+ struct ieee80211_tx_rate rates[8];
+ u8 rate_count;
+ u8 n_rates;
+
+ u8 rate_probe;
+};
+
+struct mt7615_vif {
+ u8 idx;
+ u8 omac_idx;
+ u8 band_idx;
+ u8 wmm_idx;
+
+ struct mt7615_sta sta;
+};
+
+struct mt7615_dev {
+ struct mt76_dev mt76; /* must be first */
+ u32 vif_mask;
+ u32 omac_mask;
+
+ spinlock_t token_lock;
+ struct idr token;
+};
+
+enum {
+ HW_BSSID_0 = 0x0,
+ HW_BSSID_1,
+ HW_BSSID_2,
+ HW_BSSID_3,
+ HW_BSSID_MAX,
+ EXT_BSSID_START = 0x10,
+ EXT_BSSID_1,
+ EXT_BSSID_2,
+ EXT_BSSID_3,
+ EXT_BSSID_4,
+ EXT_BSSID_5,
+ EXT_BSSID_6,
+ EXT_BSSID_7,
+ EXT_BSSID_8,
+ EXT_BSSID_9,
+ EXT_BSSID_10,
+ EXT_BSSID_11,
+ EXT_BSSID_12,
+ EXT_BSSID_13,
+ EXT_BSSID_14,
+ EXT_BSSID_15,
+ EXT_BSSID_END
+};
+
+extern const struct ieee80211_ops mt7615_ops;
+extern struct pci_driver mt7615_pci_driver;
+
+u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr);
+
+int mt7615_register_device(struct mt7615_dev *dev);
+void mt7615_unregister_device(struct mt7615_dev *dev);
+int mt7615_eeprom_init(struct mt7615_dev *dev);
+int mt7615_dma_init(struct mt7615_dev *dev);
+void mt7615_dma_cleanup(struct mt7615_dev *dev);
+int mt7615_mcu_init(struct mt7615_dev *dev);
+int mt7615_mcu_set_dev_info(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ int en);
+int mt7615_mcu_set_bss_info(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ int en);
+int mt7615_mcu_set_wtbl_key(struct mt7615_dev *dev, int wcid,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd);
+void mt7615_mcu_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
+ struct ieee80211_tx_rate *probe_rate,
+ struct ieee80211_tx_rate *rates);
+int mt7615_mcu_add_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif);
+int mt7615_mcu_del_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif);
+int mt7615_mcu_add_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int mt7615_mcu_del_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev);
+int mt7615_mcu_set_sta_rec_bmc(struct mt7615_dev *dev,
+ struct ieee80211_vif *vif, bool en);
+int mt7615_mcu_set_sta_rec(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool en);
+int mt7615_mcu_set_bcn(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ int en);
+int mt7615_mcu_set_channel(struct mt7615_dev *dev);
+int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue,
+ const struct ieee80211_tx_queue_params *params);
+int mt7615_mcu_set_tx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool add);
+int mt7615_mcu_set_rx_ba(struct mt7615_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool add);
+int mt7615_mcu_set_ht_cap(struct mt7615_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+static inline void mt7615_irq_enable(struct mt7615_dev *dev, u32 mask)
+{
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
+}
+
+static inline void mt7615_irq_disable(struct mt7615_dev *dev, u32 mask)
+{
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
+}
+
+u16 mt7615_mac_tx_rate_val(struct mt7615_dev *dev,
+ const struct ieee80211_tx_rate *rate,
+ bool stbc, u8 *bw);
+int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta, int pid,
+ struct ieee80211_key_conf *key);
+int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb);
+void mt7615_mac_add_txs(struct mt7615_dev *dev, void *data);
+void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb);
+
+int mt7615_mcu_set_eeprom(struct mt7615_dev *dev);
+int mt7615_mcu_init_mac(struct mt7615_dev *dev);
+int mt7615_mcu_set_rts_thresh(struct mt7615_dev *dev, u32 val);
+int mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int enter);
+void mt7615_mcu_exit(struct mt7615_dev *dev);
+
+int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
+
+void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e);
+
+void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb);
+void mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
+void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
+int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt7615_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt7615_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt7615_mac_work(struct work_struct *work);
+void mt7615_txp_skb_unmap(struct mt76_dev *dev,
+ struct mt76_txwi_cache *txwi);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
new file mode 100644
index 000000000000..11122bd2d727
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2019 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ * Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt7615.h"
+#include "mac.h"
+
+static const struct pci_device_id mt7615_pci_device_table[] = {
+ { PCI_DEVICE(0x14c3, 0x7615) },
+ { },
+};
+
+u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr)
+{
+ u32 base = addr & MT_MCU_PCIE_REMAP_2_BASE;
+ u32 offset = addr & MT_MCU_PCIE_REMAP_2_OFFSET;
+
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_2, base);
+
+ return MT_PCIE_REMAP_BASE_2 + offset;
+}
+
+void mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
+{
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+ mt7615_irq_enable(dev, MT_INT_RX_DONE(q));
+}
+
+irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
+{
+ struct mt7615_dev *dev = dev_instance;
+ u32 intr;
+
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state))
+ return IRQ_NONE;
+
+ intr &= dev->mt76.mmio.irqmask;
+
+ if (intr & MT_INT_TX_DONE_ALL) {
+ mt7615_irq_disable(dev, MT_INT_TX_DONE_ALL);
+ tasklet_schedule(&dev->mt76.tx_tasklet);
+ }
+
+ if (intr & MT_INT_RX_DONE(0)) {
+ mt7615_irq_disable(dev, MT_INT_RX_DONE(0));
+ napi_schedule(&dev->mt76.napi[0]);
+ }
+
+ if (intr & MT_INT_RX_DONE(1)) {
+ mt7615_irq_disable(dev, MT_INT_RX_DONE(1));
+ napi_schedule(&dev->mt76.napi[1]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mt7615_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ /* txwi_size = txd size + txp size */
+ .txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp),
+ .txwi_flags = MT_TXWI_NO_FREE,
+ .tx_prepare_skb = mt7615_tx_prepare_skb,
+ .tx_complete_skb = mt7615_tx_complete_skb,
+ .rx_skb = mt7615_queue_rx_skb,
+ .rx_poll_complete = mt7615_rx_poll_complete,
+ .sta_ps = mt7615_sta_ps,
+ .sta_add = mt7615_sta_add,
+ .sta_assoc = mt7615_sta_assoc,
+ .sta_remove = mt7615_sta_remove,
+ };
+ struct mt7615_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7615_ops,
+ &drv_ops);
+ if (!mdev)
+ return -ENOMEM;
+
+ dev = container_of(mdev, struct mt7615_dev, mt76);
+ mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
+
+ mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+ (mt76_rr(dev, MT_HW_REV) & 0xff);
+ dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+ ret = devm_request_irq(mdev->dev, pdev->irq, mt7615_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto error;
+
+ ret = mt7615_register_device(dev);
+ if (ret)
+ goto error;
+
+ return 0;
+error:
+ ieee80211_free_hw(mt76_hw(dev));
+ return ret;
+}
+
+static void mt7615_pci_remove(struct pci_dev *pdev)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+
+ mt7615_unregister_device(dev);
+}
+
+struct pci_driver mt7615_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt7615_pci_device_table,
+ .probe = mt7615_pci_probe,
+ .remove = mt7615_pci_remove,
+};
+
+module_pci_driver(mt7615_pci_driver);
+
+MODULE_DEVICE_TABLE(pci, mt7615_pci_device_table);
+MODULE_FIRMWARE(MT7615_FIRMWARE_CR4);
+MODULE_FIRMWARE(MT7615_FIRMWARE_N9);
+MODULE_FIRMWARE(MT7615_ROM_PATCH);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
new file mode 100644
index 000000000000..70e5ace33cc3
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2019 MediaTek Inc. */
+
+#ifndef __MT7615_REGS_H
+#define __MT7615_REGS_H
+
+#define MT_HW_REV 0x1000
+#define MT_HW_CHIPID 0x1008
+#define MT_TOP_MISC2 0x1134
+#define MT_TOP_MISC2_FW_STATE GENMASK(2, 0)
+
+#define MT_MCU_BASE 0x2000
+#define MT_MCU(ofs) (MT_MCU_BASE + (ofs))
+
+#define MT_MCU_PCIE_REMAP_1 MT_MCU(0x500)
+#define MT_MCU_PCIE_REMAP_1_OFFSET GENMASK(17, 0)
+#define MT_MCU_PCIE_REMAP_1_BASE GENMASK(31, 18)
+#define MT_PCIE_REMAP_BASE_1 0x40000
+
+#define MT_MCU_PCIE_REMAP_2 MT_MCU(0x504)
+#define MT_MCU_PCIE_REMAP_2_OFFSET GENMASK(18, 0)
+#define MT_MCU_PCIE_REMAP_2_BASE GENMASK(31, 19)
+#define MT_PCIE_REMAP_BASE_2 0x80000
+
+#define MT_HIF_BASE 0x4000
+#define MT_HIF(ofs) (MT_HIF_BASE + (ofs))
+
+#define MT_CFG_LPCR_HOST MT_HIF(0x1f0)
+#define MT_CFG_LPCR_HOST_FW_OWN BIT(0)
+#define MT_CFG_LPCR_HOST_DRV_OWN BIT(1)
+
+#define MT_INT_SOURCE_CSR MT_HIF(0x200)
+#define MT_INT_MASK_CSR MT_HIF(0x204)
+#define MT_DELAY_INT_CFG MT_HIF(0x210)
+
+#define MT_INT_RX_DONE(_n) BIT(_n)
+#define MT_INT_RX_DONE_ALL GENMASK(1, 0)
+#define MT_INT_TX_DONE_ALL GENMASK(7, 4)
+#define MT_INT_TX_DONE(_n) BIT((_n) + 4)
+
+#define MT_WPDMA_GLO_CFG MT_HIF(0x208)
+#define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0)
+#define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
+#define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4)
+#define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6)
+#define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT0 BIT(9)
+#define MT_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
+#define MT_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
+#define MT_WPDMA_GLO_CFG_TX_BT_SIZE_BIT21 GENMASK(23, 22)
+#define MT_WPDMA_GLO_CFG_SW_RESET BIT(24)
+#define MT_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
+#define MT_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28)
+
+#define MT_WPDMA_RST_IDX MT_HIF(0x20c)
+
+#define MT_TX_RING_BASE MT_HIF(0x300)
+#define MT_RX_RING_BASE MT_HIF(0x400)
+
+#define MT_WPDMA_GLO_CFG1 MT_HIF(0x500)
+#define MT_WPDMA_TX_PRE_CFG MT_HIF(0x510)
+#define MT_WPDMA_RX_PRE_CFG MT_HIF(0x520)
+#define MT_WPDMA_ABT_CFG MT_HIF(0x530)
+#define MT_WPDMA_ABT_CFG1 MT_HIF(0x534)
+
+#define MT_WF_PHY_BASE 0x10000
+#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs))
+
+#define MT_WF_PHY_WF2_RFCTRL0 MT_WF_PHY(0x1900)
+#define MT_WF_PHY_WF2_RFCTRL0_LPBCN_EN BIT(9)
+
+#define MT_WF_CFG_BASE 0x20200
+#define MT_WF_CFG(ofs) (MT_WF_CFG_BASE + (ofs))
+
+#define MT_CFG_CCR MT_WF_CFG(0x000)
+#define MT_CFG_CCR_MAC_D1_1X_GC_EN BIT(24)
+#define MT_CFG_CCR_MAC_D0_1X_GC_EN BIT(25)
+#define MT_CFG_CCR_MAC_D1_2X_GC_EN BIT(30)
+#define MT_CFG_CCR_MAC_D0_2X_GC_EN BIT(31)
+
+#define MT_WF_AGG_BASE 0x20a00
+#define MT_WF_AGG(ofs) (MT_WF_AGG_BASE + (ofs))
+
+#define MT_AGG_ARCR MT_WF_AGG(0x010)
+#define MT_AGG_ARCR_INIT_RATE1 BIT(0)
+#define MT_AGG_ARCR_RTS_RATE_THR GENMASK(12, 8)
+#define MT_AGG_ARCR_RATE_DOWN_RATIO GENMASK(17, 16)
+#define MT_AGG_ARCR_RATE_DOWN_RATIO_EN BIT(19)
+#define MT_AGG_ARCR_RATE_UP_EXTRA_TH GENMASK(22, 20)
+
+#define MT_AGG_ARUCR MT_WF_AGG(0x018)
+#define MT_AGG_ARDCR MT_WF_AGG(0x01c)
+#define MT_AGG_ARxCR_LIMIT_SHIFT(_n) (4 * (_n))
+#define MT_AGG_ARxCR_LIMIT(_n) GENMASK(2 + \
+ MT_AGG_ARxCR_LIMIT_SHIFT(_n), \
+ MT_AGG_ARxCR_LIMIT_SHIFT(_n))
+
+#define MT_AGG_SCR MT_WF_AGG(0x0fc)
+#define MT_AGG_SCR_NLNAV_MID_PTEC_DIS BIT(3)
+
+#define MT_WF_TMAC_BASE 0x21000
+#define MT_WF_TMAC(ofs) (MT_WF_TMAC_BASE + (ofs))
+
+#define MT_TMAC_CTCR0 MT_WF_TMAC(0x0f4)
+#define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0)
+#define MT_TMAC_CTCR0_INS_DDLMT_DENSITY GENMASK(15, 12)
+#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17)
+#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18)
+
+#define MT_WF_RMAC_BASE 0x21200
+#define MT_WF_RMAC(ofs) (MT_WF_RMAC_BASE + (ofs))
+
+#define MT_WF_RFCR MT_WF_RMAC(0x000)
+#define MT_WF_RFCR_DROP_STBC_MULTI BIT(0)
+#define MT_WF_RFCR_DROP_FCSFAIL BIT(1)
+#define MT_WF_RFCR_DROP_VERSION BIT(3)
+#define MT_WF_RFCR_DROP_PROBEREQ BIT(4)
+#define MT_WF_RFCR_DROP_MCAST BIT(5)
+#define MT_WF_RFCR_DROP_BCAST BIT(6)
+#define MT_WF_RFCR_DROP_MCAST_FILTERED BIT(7)
+#define MT_WF_RFCR_DROP_A3_MAC BIT(8)
+#define MT_WF_RFCR_DROP_A3_BSSID BIT(9)
+#define MT_WF_RFCR_DROP_A2_BSSID BIT(10)
+#define MT_WF_RFCR_DROP_OTHER_BEACON BIT(11)
+#define MT_WF_RFCR_DROP_FRAME_REPORT BIT(12)
+#define MT_WF_RFCR_DROP_CTL_RSV BIT(13)
+#define MT_WF_RFCR_DROP_CTS BIT(14)
+#define MT_WF_RFCR_DROP_RTS BIT(15)
+#define MT_WF_RFCR_DROP_DUPLICATE BIT(16)
+#define MT_WF_RFCR_DROP_OTHER_BSS BIT(17)
+#define MT_WF_RFCR_DROP_OTHER_UC BIT(18)
+#define MT_WF_RFCR_DROP_OTHER_TIM BIT(19)
+#define MT_WF_RFCR_DROP_NDPA BIT(20)
+#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21)
+
+#define MT_WF_DMA_BASE 0x21800
+#define MT_WF_DMA(ofs) (MT_WF_DMA_BASE + (ofs))
+
+#define MT_DMA_DCR0 MT_WF_DMA(0x000)
+#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 2)
+#define MT_DMA_DCR0_RX_VEC_DROP BIT(17)
+
+#define MT_WTBL_BASE 0x30000
+#define MT_WTBL_ENTRY_SIZE 256
+
+#define MT_WTBL_OFF_BASE 0x23400
+#define MT_WTBL_OFF(n) (MT_WTBL_OFF_BASE + (n))
+
+#define MT_WTBL_UPDATE MT_WTBL_OFF(0x030)
+#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(7, 0)
+#define MT_WTBL_UPDATE_RATE_UPDATE BIT(13)
+#define MT_WTBL_UPDATE_TX_COUNT_CLEAR BIT(14)
+#define MT_WTBL_UPDATE_BUSY BIT(31)
+
+#define MT_WTBL_ON_BASE 0x23000
+#define MT_WTBL_ON(_n) (MT_WTBL_ON_BASE + (_n))
+
+#define MT_WTBL_RIUCR0 MT_WTBL_ON(0x020)
+
+#define MT_WTBL_RIUCR1 MT_WTBL_ON(0x024)
+#define MT_WTBL_RIUCR1_RATE0 GENMASK(11, 0)
+#define MT_WTBL_RIUCR1_RATE1 GENMASK(23, 12)
+#define MT_WTBL_RIUCR1_RATE2_LO GENMASK(31, 24)
+
+#define MT_WTBL_RIUCR2 MT_WTBL_ON(0x028)
+#define MT_WTBL_RIUCR2_RATE2_HI GENMASK(3, 0)
+#define MT_WTBL_RIUCR2_RATE3 GENMASK(15, 4)
+#define MT_WTBL_RIUCR2_RATE4 GENMASK(27, 16)
+#define MT_WTBL_RIUCR2_RATE5_LO GENMASK(31, 28)
+
+#define MT_WTBL_RIUCR3 MT_WTBL_ON(0x02c)
+#define MT_WTBL_RIUCR3_RATE5_HI GENMASK(7, 0)
+#define MT_WTBL_RIUCR3_RATE6 GENMASK(19, 8)
+#define MT_WTBL_RIUCR3_RATE7 GENMASK(31, 20)
+
+#define MT_WTBL_W5_CHANGE_BW_RATE GENMASK(7, 5)
+#define MT_WTBL_W5_SHORT_GI_20 BIT(8)
+#define MT_WTBL_W5_SHORT_GI_40 BIT(9)
+#define MT_WTBL_W5_SHORT_GI_80 BIT(10)
+#define MT_WTBL_W5_SHORT_GI_160 BIT(11)
+#define MT_WTBL_W5_BW_CAP GENMASK(13, 12)
+#define MT_WTBL_W27_CC_BW_SEL GENMASK(6, 5)
+
+#define MT_EFUSE_BASE 0x81070000
+#define MT_EFUSE_BASE_CTRL 0x000
+#define MT_EFUSE_BASE_CTRL_EMPTY BIT(30)
+
+#define MT_EFUSE_CTRL 0x008
+#define MT_EFUSE_CTRL_AOUT GENMASK(5, 0)
+#define MT_EFUSE_CTRL_MODE GENMASK(7, 6)
+#define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8)
+#define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14)
+#define MT_EFUSE_CTRL_AIN GENMASK(25, 16)
+#define MT_EFUSE_CTRL_VALID BIT(29)
+#define MT_EFUSE_CTRL_KICK BIT(30)
+#define MT_EFUSE_CTRL_SEL BIT(31)
+
+#define MT_EFUSE_WDATA(_i) (0x010 + ((_i) * 4))
+#define MT_EFUSE_RDATA(_i) (0x030 + ((_i) * 4))
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
index bcb72e019fd2..57e46d57b449 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -259,7 +259,6 @@ int mt76x0_init_hardware(struct mt76x02_dev *dev)
return ret;
mt76x0_phy_init(dev);
- mt76x02_init_beacon_config(dev);
return 0;
}
@@ -281,6 +280,7 @@ mt76x0_init_txpower(struct mt76x02_dev *dev,
mt76x0_get_power_info(dev, chan, &tp);
chan->max_power = (mt76x02_get_max_rate_power(&t) + tp) / 2;
+ chan->orig_mpwr = chan->max_power;
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
index fee16ab21edb..691984037f98 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -22,10 +22,9 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
int ret;
cancel_delayed_work_sync(&dev->cal_work);
- if (mt76_is_mmio(dev)) {
- tasklet_disable(&dev->pre_tbtt_tasklet);
+ dev->beacon_ops->pre_tbtt_enable(dev, false);
+ if (mt76_is_mmio(dev))
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
- }
mt76_set_channel(&dev->mt76);
ret = mt76x0_phy_set_channel(dev, chandef);
@@ -38,9 +37,10 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
if (mt76_is_mmio(dev)) {
mt76x02_dfs_init_params(dev);
- tasklet_enable(&dev->pre_tbtt_tasklet);
tasklet_enable(&dev->dfs_pd.dfs_tasklet);
}
+ dev->beacon_ops->pre_tbtt_enable(dev, true);
+
mt76_txq_schedule_all(&dev->mt76);
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index f302162036d0..4585e1b756c2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -25,25 +25,21 @@ static int mt76x0e_start(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- mutex_lock(&dev->mt76.mutex);
-
mt76x02_mac_start(dev);
mt76x0_phy_calibrate(dev, true);
- ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mt76.mac_work,
MT_MAC_WORK_INTERVAL);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
- mutex_unlock(&dev->mt76.mutex);
-
return 0;
}
static void mt76x0e_stop_hw(struct mt76x02_dev *dev)
{
cancel_delayed_work_sync(&dev->cal_work);
- cancel_delayed_work_sync(&dev->mac_work);
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY,
0, 1000))
@@ -62,10 +58,8 @@ static void mt76x0e_stop(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- mutex_lock(&dev->mt76.mutex);
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
mt76x0e_stop_hw(dev);
- mutex_unlock(&dev->mt76.mutex);
}
static void
@@ -74,13 +68,6 @@ mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
}
-static int
-mt76x0e_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
- bool set)
-{
- return 0;
-}
-
static const struct ieee80211_ops mt76x0e_ops = {
.tx = mt76x02_tx,
.start = mt76x0e_start,
@@ -101,7 +88,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
.get_survey = mt76_get_survey,
.get_txpower = mt76_get_txpower,
.flush = mt76x0e_flush,
- .set_tim = mt76x0e_set_tim,
+ .set_tim = mt76_set_tim,
.release_buffered_frames = mt76_release_buffered_frames,
.set_coverage_class = mt76x02_set_coverage_class,
.set_rts_threshold = mt76x02_set_rts_threshold,
@@ -128,6 +115,8 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
if (err < 0)
return err;
+ mt76x02e_init_beacon_config(dev);
+
if (mt76_chip(&dev->mt76) == 0x7610) {
u16 val;
@@ -164,6 +153,7 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
+ .tx_aligned4_skbs = true,
.update_survey = mt76x02_update_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
@@ -223,7 +213,7 @@ error:
static void mt76x0e_cleanup(struct mt76x02_dev *dev)
{
clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state);
- tasklet_disable(&dev->pre_tbtt_tasklet);
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
mt76x0_chip_onoff(dev, false, false);
mt76x0e_stop_hw(dev);
mt76x02_dma_cleanup(dev);
@@ -238,7 +228,7 @@ mt76x0e_remove(struct pci_dev *pdev)
mt76_unregister_device(mdev);
mt76x0e_cleanup(dev);
- ieee80211_free_hw(mdev->hw);
+ mt76_free_device(mdev);
}
static const struct pci_device_id mt76x0e_device_table[] = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index e5a06f74a6f7..7c38ec4418db 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -81,20 +81,19 @@ static void mt76x0u_cleanup(struct mt76x02_dev *dev)
mt76u_queues_deinit(&dev->mt76);
}
-static void mt76x0u_mac_stop(struct mt76x02_dev *dev)
+static void mt76x0u_stop(struct ieee80211_hw *hw)
{
+ struct mt76x02_dev *dev = hw->priv;
+
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
cancel_delayed_work_sync(&dev->cal_work);
- cancel_delayed_work_sync(&dev->mac_work);
- mt76u_stop_stat_wk(&dev->mt76);
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
+ mt76u_stop_tx(&dev->mt76);
+ mt76x02u_exit_beacon_config(dev);
if (test_bit(MT76_REMOVED, &dev->mt76.state))
return;
- mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN |
- MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN |
- MT_BEACON_TIME_CFG_BEACON_TX);
-
if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000))
dev_warn(dev->mt76.dev, "TX DMA did not stop\n");
@@ -109,31 +108,17 @@ static int mt76x0u_start(struct ieee80211_hw *hw)
struct mt76x02_dev *dev = hw->priv;
int ret;
- mutex_lock(&dev->mt76.mutex);
-
ret = mt76x0_mac_start(dev);
if (ret)
- goto out;
+ return ret;
mt76x0_phy_calibrate(dev, true);
- ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work,
+ ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mt76.mac_work,
MT_MAC_WORK_INTERVAL);
ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work,
MT_CALIBRATE_INTERVAL);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
-
-out:
- mutex_unlock(&dev->mt76.mutex);
- return ret;
-}
-
-static void mt76x0u_stop(struct ieee80211_hw *hw)
-{
- struct mt76x02_dev *dev = hw->priv;
-
- mutex_lock(&dev->mt76.mutex);
- mt76x0u_mac_stop(dev);
- mutex_unlock(&dev->mt76.mutex);
+ return 0;
}
static const struct ieee80211_ops mt76x0u_ops = {
@@ -155,6 +140,8 @@ static const struct ieee80211_ops mt76x0u_ops = {
.set_rts_threshold = mt76x02_set_rts_threshold,
.wake_tx_queue = mt76_wake_tx_queue,
.get_txpower = mt76_get_txpower,
+ .set_tim = mt76_set_tim,
+ .release_buffered_frames = mt76_release_buffered_frames,
};
static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
@@ -175,6 +162,8 @@ static int mt76x0u_init_hardware(struct mt76x02_dev *dev)
if (err < 0)
return err;
+ mt76x02u_init_beacon_config(dev);
+
mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
mt76_wr(dev, MT_TXOP_CTRL_CFG,
FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) |
@@ -223,6 +212,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
.tx_complete_skb = mt76x02u_tx_complete_skb,
.tx_status_data = mt76x02_tx_status_data,
.rx_skb = mt76x02_queue_rx_skb,
+ .sta_ps = mt76x02_sta_ps,
.sta_add = mt76x02_sta_add,
.sta_remove = mt76x02_sta_remove,
};
@@ -232,7 +222,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
u32 mac_rev;
int ret;
- mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops,
+ mdev = mt76_alloc_device(&usb_dev->dev, sizeof(*dev), &mt76x0u_ops,
&drv_ops);
if (!mdev)
return -ENOMEM;
@@ -311,8 +301,7 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
{
struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
- mt76u_stop_queues(&dev->mt76);
- mt76x0u_mac_stop(dev);
+ mt76u_stop_rx(&dev->mt76);
clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state);
mt76x0_chip_onoff(dev, false, false);
@@ -322,16 +311,12 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf)
{
struct mt76x02_dev *dev = usb_get_intfdata(usb_intf);
- struct mt76_usb *usb = &dev->mt76.usb;
int ret;
- ret = mt76u_submit_rx_buffers(&dev->mt76);
+ ret = mt76u_resume_rx(&dev->mt76);
if (ret < 0)
goto err;
- tasklet_enable(&usb->rx_tasklet);
- tasklet_enable(&usb->tx_tasklet);
-
ret = mt76x0u_init_hardware(dev);
if (ret)
goto err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 07061eb4d1e1..687bd14b2d77 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -68,6 +68,13 @@ struct mt76x02_calibration {
s8 tssi_dc;
};
+struct mt76x02_beacon_ops {
+ unsigned int nslots;
+ unsigned int slot_size;
+ void (*pre_tbtt_enable) (struct mt76x02_dev *, bool);
+ void (*beacon_enable) (struct mt76x02_dev *, bool);
+};
+
struct mt76x02_dev {
struct mt76_dev mt76; /* must be first */
@@ -79,23 +86,25 @@ struct mt76x02_dev {
u8 txdone_seq;
DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);
+ spinlock_t txstatus_fifo_lock;
struct sk_buff *rx_head;
- struct tasklet_struct tx_tasklet;
- struct tasklet_struct pre_tbtt_tasklet;
+ struct napi_struct tx_napi;
struct delayed_work cal_work;
- struct delayed_work mac_work;
struct delayed_work wdt_work;
+ struct hrtimer pre_tbtt_timer;
+ struct work_struct pre_tbtt_work;
+
+ const struct mt76x02_beacon_ops *beacon_ops;
+
u32 aggr_stats[32];
struct sk_buff *beacons[8];
- u8 beacon_mask;
u8 beacon_data_mask;
u8 tbtt_count;
- u16 beacon_int;
u32 tx_hang_reset;
u8 tx_hang_check;
@@ -163,7 +172,6 @@ void mt76x02_set_tx_ackto(struct mt76x02_dev *dev);
void mt76x02_set_coverage_class(struct ieee80211_hw *hw,
s16 coverage_class);
int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val);
-int mt76x02_insert_hdr_pad(struct sk_buff *skb);
void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len);
bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update);
void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
@@ -173,9 +181,9 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance);
void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb);
int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi,
- struct sk_buff *skb, struct mt76_queue *q,
- struct mt76_wcid *wcid, struct ieee80211_sta *sta,
- u32 *tx_info);
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const u8 *mac);
void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
@@ -185,9 +193,19 @@ void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info, u32 changed);
-extern const u16 mt76x02_beacon_offsets[16];
+struct beacon_bc_data {
+ struct mt76x02_dev *dev;
+ struct sk_buff_head q;
+ struct sk_buff *tail[8];
+};
void mt76x02_init_beacon_config(struct mt76x02_dev *dev);
-void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set);
+void mt76x02e_init_beacon_config(struct mt76x02_dev *dev);
+void mt76x02_resync_beacon_timer(struct mt76x02_dev *dev);
+void mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif);
+void mt76x02_enqueue_buffered_bc(struct mt76x02_dev *dev,
+ struct beacon_bc_data *data,
+ int max_nframes);
+
void mt76x02_mac_start(struct mt76x02_dev *dev);
void mt76x02_init_debugfs(struct mt76x02_dev *dev);
@@ -208,12 +226,12 @@ static inline bool is_mt76x2(struct mt76x02_dev *dev)
static inline void mt76x02_irq_enable(struct mt76x02_dev *dev, u32 mask)
{
- mt76x02_set_irq_mask(dev, 0, mask);
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, 0, mask);
}
static inline void mt76x02_irq_disable(struct mt76x02_dev *dev, u32 mask)
{
- mt76x02_set_irq_mask(dev, mask, 0);
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
}
static inline bool
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
new file mode 100644
index 000000000000..e196b9c0a686
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c
@@ -0,0 +1,286 @@
+/*
+ * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mt76x02.h"
+
+static void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev)
+{
+ u32 regs[4] = {};
+ u16 val;
+ int i;
+
+ for (i = 0; i < dev->beacon_ops->nslots; i++) {
+ val = i * dev->beacon_ops->slot_size;
+ regs[i / 4] |= (val / 64) << (8 * (i % 4));
+ }
+
+ for (i = 0; i < 4; i++)
+ mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
+}
+
+static int
+mt76x02_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb)
+{
+ int beacon_len = dev->beacon_ops->slot_size;
+ struct mt76x02_txwi txwi;
+
+ if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi)))
+ return -ENOSPC;
+
+ mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
+
+ mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
+ offset += sizeof(txwi);
+
+ mt76_wr_copy(dev, offset, skb->data, skb->len);
+ return 0;
+}
+
+static int
+__mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 bcn_idx,
+ struct sk_buff *skb)
+{
+ int beacon_len = dev->beacon_ops->slot_size;
+ int beacon_addr = MT_BEACON_BASE + (beacon_len * bcn_idx);
+ int ret = 0;
+ int i;
+
+ /* Prevent corrupt transmissions during update */
+ mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
+
+ if (skb) {
+ ret = mt76x02_write_beacon(dev, beacon_addr, skb);
+ if (!ret)
+ dev->beacon_data_mask |= BIT(bcn_idx);
+ } else {
+ dev->beacon_data_mask &= ~BIT(bcn_idx);
+ for (i = 0; i < beacon_len; i += 4)
+ mt76_wr(dev, beacon_addr + i, 0);
+ }
+
+ mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
+
+ return ret;
+}
+
+int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
+ struct sk_buff *skb)
+{
+ bool force_update = false;
+ int bcn_idx = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
+ if (vif_idx == i) {
+ force_update = !!dev->beacons[i] ^ !!skb;
+
+ if (dev->beacons[i])
+ dev_kfree_skb(dev->beacons[i]);
+
+ dev->beacons[i] = skb;
+ __mt76x02_mac_set_beacon(dev, bcn_idx, skb);
+ } else if (force_update && dev->beacons[i]) {
+ __mt76x02_mac_set_beacon(dev, bcn_idx,
+ dev->beacons[i]);
+ }
+
+ bcn_idx += !!dev->beacons[i];
+ }
+
+ for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
+ if (!(dev->beacon_data_mask & BIT(i)))
+ break;
+
+ __mt76x02_mac_set_beacon(dev, i, NULL);
+ }
+
+ mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
+ bcn_idx - 1);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76x02_mac_set_beacon);
+
+static void
+__mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx,
+ bool val, struct sk_buff *skb)
+{
+ u8 old_mask = dev->mt76.beacon_mask;
+ bool en;
+ u32 reg;
+
+ if (val) {
+ dev->mt76.beacon_mask |= BIT(vif_idx);
+ if (skb)
+ mt76x02_mac_set_beacon(dev, vif_idx, skb);
+ } else {
+ dev->mt76.beacon_mask &= ~BIT(vif_idx);
+ mt76x02_mac_set_beacon(dev, vif_idx, NULL);
+ }
+
+ if (!!old_mask == !!dev->mt76.beacon_mask)
+ return;
+
+ en = dev->mt76.beacon_mask;
+
+ reg = MT_BEACON_TIME_CFG_BEACON_TX |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_TIMER_EN;
+ mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
+
+ dev->beacon_ops->beacon_enable(dev, en);
+}
+
+void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
+ struct ieee80211_vif *vif, bool val)
+{
+ u8 vif_idx = ((struct mt76x02_vif *)vif->drv_priv)->idx;
+ struct sk_buff *skb = NULL;
+
+ dev->beacon_ops->pre_tbtt_enable(dev, false);
+
+ if (mt76_is_usb(dev))
+ skb = ieee80211_beacon_get(mt76_hw(dev), vif);
+
+ if (!dev->mt76.beacon_mask)
+ dev->tbtt_count = 0;
+
+ __mt76x02_mac_set_beacon_enable(dev, vif_idx, val, skb);
+
+ dev->beacon_ops->pre_tbtt_enable(dev, true);
+}
+
+void
+mt76x02_resync_beacon_timer(struct mt76x02_dev *dev)
+{
+ u32 timer_val = dev->mt76.beacon_int << 4;
+
+ dev->tbtt_count++;
+
+ /*
+ * Beacon timer drifts by 1us every tick, the timer is configured
+ * in 1/16 TU (64us) units.
+ */
+ if (dev->tbtt_count < 63)
+ return;
+
+ /*
+ * The updated beacon interval takes effect after two TBTT, because
+ * at this point the original interval has already been loaded into
+ * the next TBTT_TIMER value
+ */
+ if (dev->tbtt_count == 63)
+ timer_val -= 1;
+
+ mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_INTVAL, timer_val);
+
+ if (dev->tbtt_count >= 64) {
+ dev->tbtt_count = 0;
+ return;
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x02_resync_beacon_timer);
+
+void
+mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt76x02_dev *dev = (struct mt76x02_dev *)priv;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+ struct sk_buff *skb = NULL;
+
+ if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
+ return;
+
+ skb = ieee80211_beacon_get(mt76_hw(dev), vif);
+ if (!skb)
+ return;
+
+ mt76x02_mac_set_beacon(dev, mvif->idx, skb);
+}
+EXPORT_SYMBOL_GPL(mt76x02_update_beacon_iter);
+
+static void
+mt76x02_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct beacon_bc_data *data = priv;
+ struct mt76x02_dev *dev = data->dev;
+ struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb;
+
+ if (!(dev->mt76.beacon_mask & BIT(mvif->idx)))
+ return;
+
+ skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
+ if (!skb)
+ return;
+
+ info = IEEE80211_SKB_CB(skb);
+ info->control.vif = vif;
+ info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
+ mt76_skb_set_moredata(skb, true);
+ __skb_queue_tail(&data->q, skb);
+ data->tail[mvif->idx] = skb;
+}
+
+void
+mt76x02_enqueue_buffered_bc(struct mt76x02_dev *dev, struct beacon_bc_data *data,
+ int max_nframes)
+{
+ int i, nframes;
+
+ data->dev = dev;
+ __skb_queue_head_init(&data->q);
+
+ do {
+ nframes = skb_queue_len(&data->q);
+ ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt76x02_add_buffered_bc, data);
+ } while (nframes != skb_queue_len(&data->q) &&
+ skb_queue_len(&data->q) < max_nframes);
+
+ if (!skb_queue_len(&data->q))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(data->tail); i++) {
+ if (!data->tail[i])
+ continue;
+ mt76_skb_set_moredata(data->tail[i], false);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76x02_enqueue_buffered_bc);
+
+void mt76x02_init_beacon_config(struct mt76x02_dev *dev)
+{
+ int i;
+
+ mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX));
+ mt76_set(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_SYNC_MODE);
+ mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
+
+ for (i = 0; i < 8; i++)
+ mt76x02_mac_set_beacon(dev, i, NULL);
+
+ mt76x02_set_beacon_offsets(dev);
+}
+EXPORT_SYMBOL_GPL(mt76x02_init_beacon_config);
+
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index 4fe5a83ca5a4..56510a1a843a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -218,10 +218,17 @@ mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
const struct ieee80211_tx_rate *rate)
{
- spin_lock_bh(&dev->mt76.lock);
- wcid->tx_rate = mt76x02_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss);
- wcid->tx_rate_set = true;
- spin_unlock_bh(&dev->mt76.lock);
+ s8 max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
+ __le16 rateval;
+ u32 tx_info;
+ s8 nss;
+
+ rateval = mt76x02_mac_tx_rate_val(dev, rate, &nss);
+ tx_info = FIELD_PREP(MT_WCID_TX_INFO_RATE, rateval) |
+ FIELD_PREP(MT_WCID_TX_INFO_NSS, nss) |
+ FIELD_PREP(MT_WCID_TX_INFO_TXPWR_ADJ, max_txpwr_adj) |
+ MT_WCID_TX_INFO_SET;
+ wcid->tx_info = tx_info;
}
void mt76x02_mac_set_short_preamble(struct mt76x02_dev *dev, bool enable)
@@ -323,6 +330,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *rate = &info->control.rates[0];
struct ieee80211_key_conf *key = info->control.hw_key;
+ u32 wcid_tx_info;
u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
u16 txwi_flags = 0;
u8 nss;
@@ -357,16 +365,16 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
txwi->eiv = *((__le32 *)&ccmp_pn[4]);
}
- spin_lock_bh(&dev->mt76.lock);
if (wcid && (rate->idx < 0 || !rate->count)) {
- txwi->rate = wcid->tx_rate;
- max_txpwr_adj = wcid->max_txpwr_adj;
- nss = wcid->tx_rate_nss;
+ wcid_tx_info = wcid->tx_info;
+ txwi->rate = FIELD_GET(MT_WCID_TX_INFO_RATE, wcid_tx_info);
+ max_txpwr_adj = FIELD_GET(MT_WCID_TX_INFO_TXPWR_ADJ,
+ wcid_tx_info);
+ nss = FIELD_GET(MT_WCID_TX_INFO_NSS, wcid_tx_info);
} else {
txwi->rate = mt76x02_mac_tx_rate_val(dev, rate, &nss);
max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
}
- spin_unlock_bh(&dev->mt76.lock);
txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->mt76.txpower_conf,
max_txpwr_adj);
@@ -731,7 +739,6 @@ int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb,
void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
{
struct mt76x02_tx_status stat = {};
- unsigned long flags;
u8 update = 1;
bool ret;
@@ -741,9 +748,11 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
trace_mac_txstat_poll(dev);
while (!irq || !kfifo_is_full(&dev->txstatus_fifo)) {
- spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
+ if (!spin_trylock(&dev->txstatus_fifo_lock))
+ break;
+
ret = mt76x02_mac_load_tx_status(dev, &stat);
- spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
+ spin_unlock(&dev->txstatus_fifo_lock);
if (!ret)
break;
@@ -757,11 +766,12 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
}
}
-void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush)
+void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76x02_txwi *txwi;
+ u8 *txwi_ptr;
if (!e->txwi) {
dev_kfree_skb_any(e->skb);
@@ -770,7 +780,8 @@ void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
mt76x02_mac_poll_tx_status(dev, false);
- txwi = (struct mt76x02_txwi *) &e->txwi->txwi;
+ txwi_ptr = mt76_get_txwi_ptr(mdev, e->txwi);
+ txwi = (struct mt76x02_txwi *)txwi_ptr;
trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid);
mt76_tx_complete_skb(mdev, e->skb);
@@ -1021,7 +1032,7 @@ static void mt76x02_edcca_check(struct mt76x02_dev *dev)
void mt76x02_mac_work(struct work_struct *work)
{
struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
- mac_work.work);
+ mt76.mac_work.work);
int i, idx;
mutex_lock(&dev->mt76.mutex);
@@ -1034,7 +1045,7 @@ void mt76x02_mac_work(struct work_struct *work)
dev->aggr_stats[idx++] += val >> 16;
}
- if (!dev->beacon_mask)
+ if (!dev->mt76.beacon_mask)
mt76x02_check_mac_err(dev);
if (dev->ed_monitor)
@@ -1044,7 +1055,7 @@ void mt76x02_mac_work(struct work_struct *work)
mt76_tx_status_check(&dev->mt76, NULL, false);
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
MT_MAC_WORK_INTERVAL);
}
@@ -1055,141 +1066,3 @@ void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr)
mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR,
get_unaligned_le16(addr + 4));
}
-
-static int
-mt76x02_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb)
-{
- int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0];
- struct mt76x02_txwi txwi;
-
- if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi)))
- return -ENOSPC;
-
- mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len);
-
- mt76_wr_copy(dev, offset, &txwi, sizeof(txwi));
- offset += sizeof(txwi);
-
- mt76_wr_copy(dev, offset, skb->data, skb->len);
- return 0;
-}
-
-static int
-__mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 bcn_idx,
- struct sk_buff *skb)
-{
- int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0];
- int beacon_addr = mt76x02_beacon_offsets[bcn_idx];
- int ret = 0;
- int i;
-
- /* Prevent corrupt transmissions during update */
- mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx));
-
- if (skb) {
- ret = mt76x02_write_beacon(dev, beacon_addr, skb);
- if (!ret)
- dev->beacon_data_mask |= BIT(bcn_idx);
- } else {
- dev->beacon_data_mask &= ~BIT(bcn_idx);
- for (i = 0; i < beacon_len; i += 4)
- mt76_wr(dev, beacon_addr + i, 0);
- }
-
- mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask);
-
- return ret;
-}
-
-int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
- struct sk_buff *skb)
-{
- bool force_update = false;
- int bcn_idx = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) {
- if (vif_idx == i) {
- force_update = !!dev->beacons[i] ^ !!skb;
-
- if (dev->beacons[i])
- dev_kfree_skb(dev->beacons[i]);
-
- dev->beacons[i] = skb;
- __mt76x02_mac_set_beacon(dev, bcn_idx, skb);
- } else if (force_update && dev->beacons[i]) {
- __mt76x02_mac_set_beacon(dev, bcn_idx,
- dev->beacons[i]);
- }
-
- bcn_idx += !!dev->beacons[i];
- }
-
- for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) {
- if (!(dev->beacon_data_mask & BIT(i)))
- break;
-
- __mt76x02_mac_set_beacon(dev, i, NULL);
- }
-
- mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N,
- bcn_idx - 1);
- return 0;
-}
-
-static void
-__mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx,
- bool val, struct sk_buff *skb)
-{
- u8 old_mask = dev->beacon_mask;
- bool en;
- u32 reg;
-
- if (val) {
- dev->beacon_mask |= BIT(vif_idx);
- if (skb)
- mt76x02_mac_set_beacon(dev, vif_idx, skb);
- } else {
- dev->beacon_mask &= ~BIT(vif_idx);
- mt76x02_mac_set_beacon(dev, vif_idx, NULL);
- }
-
- if (!!old_mask == !!dev->beacon_mask)
- return;
-
- en = dev->beacon_mask;
-
- reg = MT_BEACON_TIME_CFG_BEACON_TX |
- MT_BEACON_TIME_CFG_TBTT_EN |
- MT_BEACON_TIME_CFG_TIMER_EN;
- mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en);
-
- if (mt76_is_usb(dev))
- return;
-
- mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
- if (en)
- mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
- else
- mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
-}
-
-void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
- struct ieee80211_vif *vif, bool val)
-{
- u8 vif_idx = ((struct mt76x02_vif *)vif->drv_priv)->idx;
- struct sk_buff *skb = NULL;
-
- if (mt76_is_mmio(dev))
- tasklet_disable(&dev->pre_tbtt_tasklet);
- else if (val)
- skb = ieee80211_beacon_get(mt76_hw(dev), vif);
-
- if (!dev->beacon_mask)
- dev->tbtt_count = 0;
-
- __mt76x02_mac_set_beacon_enable(dev, vif_idx, val, skb);
-
- if (mt76_is_mmio(dev))
- tasklet_enable(&dev->pre_tbtt_tasklet);
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
index caeeef96c42f..e4a9e0d0924b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -198,8 +198,8 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int len);
void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq);
-void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush);
+void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e);
void mt76x02_update_channel(struct mt76_dev *mdev);
void mt76x02_mac_work(struct work_struct *work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index daaed1220147..7b7163bc3b62 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -22,96 +22,18 @@
#include "mt76x02_mcu.h"
#include "mt76x02_trace.h"
-struct beacon_bc_data {
- struct mt76x02_dev *dev;
- struct sk_buff_head q;
- struct sk_buff *tail[8];
-};
-
-static void
-mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
-{
- struct mt76x02_dev *dev = (struct mt76x02_dev *)priv;
- struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
- struct sk_buff *skb = NULL;
-
- if (!(dev->beacon_mask & BIT(mvif->idx)))
- return;
-
- skb = ieee80211_beacon_get(mt76_hw(dev), vif);
- if (!skb)
- return;
-
- mt76x02_mac_set_beacon(dev, mvif->idx, skb);
-}
-
-static void
-mt76x02_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif)
-{
- struct beacon_bc_data *data = priv;
- struct mt76x02_dev *dev = data->dev;
- struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
- struct ieee80211_tx_info *info;
- struct sk_buff *skb;
-
- if (!(dev->beacon_mask & BIT(mvif->idx)))
- return;
-
- skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif);
- if (!skb)
- return;
-
- info = IEEE80211_SKB_CB(skb);
- info->control.vif = vif;
- info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
- mt76_skb_set_moredata(skb, true);
- __skb_queue_tail(&data->q, skb);
- data->tail[mvif->idx] = skb;
-}
-
-static void
-mt76x02_resync_beacon_timer(struct mt76x02_dev *dev)
-{
- u32 timer_val = dev->beacon_int << 4;
-
- dev->tbtt_count++;
-
- /*
- * Beacon timer drifts by 1us every tick, the timer is configured
- * in 1/16 TU (64us) units.
- */
- if (dev->tbtt_count < 63)
- return;
-
- /*
- * The updated beacon interval takes effect after two TBTT, because
- * at this point the original interval has already been loaded into
- * the next TBTT_TIMER value
- */
- if (dev->tbtt_count == 63)
- timer_val -= 1;
-
- mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
- MT_BEACON_TIME_CFG_INTVAL, timer_val);
-
- if (dev->tbtt_count >= 64) {
- dev->tbtt_count = 0;
- return;
- }
-}
-
static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
{
struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
- struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD];
+ struct mt76_queue *q = dev->mt76.q_tx[MT_TXQ_PSD].q;
struct beacon_bc_data data = {};
struct sk_buff *skb;
- int i, nframes;
+ int i;
- mt76x02_resync_beacon_timer(dev);
+ if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ return;
- data.dev = dev;
- __skb_queue_head_init(&data.q);
+ mt76x02_resync_beacon_timer(dev);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
@@ -122,13 +44,7 @@ static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
if (dev->mt76.csa_complete)
return;
- do {
- nframes = skb_queue_len(&data.q);
- ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
- IEEE80211_IFACE_ITER_RESUME_ALL,
- mt76x02_add_buffered_bc, &data);
- } while (nframes != skb_queue_len(&data.q) &&
- skb_queue_len(&data.q) < 8);
+ mt76x02_enqueue_buffered_bc(dev, &data, 8);
if (!skb_queue_len(&data.q))
return;
@@ -146,25 +62,67 @@ static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
struct ieee80211_vif *vif = info->control.vif;
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
- mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid,
- NULL);
+ mt76_tx_queue_skb(dev, MT_TXQ_PSD, skb, &mvif->group_wcid,
+ NULL);
}
spin_unlock_bh(&q->lock);
}
+static void mt76x02e_pre_tbtt_enable(struct mt76x02_dev *dev, bool en)
+{
+ if (en)
+ tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
+ else
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
+}
+
+static void mt76x02e_beacon_enable(struct mt76x02_dev *dev, bool en)
+{
+ mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
+ if (en)
+ mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
+ else
+ mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
+}
+
+void mt76x02e_init_beacon_config(struct mt76x02_dev *dev)
+{
+ static const struct mt76x02_beacon_ops beacon_ops = {
+ .nslots = 8,
+ .slot_size = 1024,
+ .pre_tbtt_enable = mt76x02e_pre_tbtt_enable,
+ .beacon_enable = mt76x02e_beacon_enable,
+ };
+
+ dev->beacon_ops = &beacon_ops;
+
+ /* Fire a pre-TBTT interrupt 8 ms before TBTT */
+ mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT, 8 << 4);
+ mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
+ MT_DFS_GP_INTERVAL);
+ mt76_wr(dev, MT_INT_TIMER_EN, 0);
+
+ mt76x02_init_beacon_config(dev);
+}
+EXPORT_SYMBOL_GPL(mt76x02e_init_beacon_config);
+
static int
-mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
+mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_sw_queue *q,
int idx, int n_desc)
{
- int ret;
+ struct mt76_queue *hwq;
+ int err;
- q->regs = dev->mt76.mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE;
- q->ndesc = n_desc;
- q->hw_idx = idx;
+ hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
+ if (!hwq)
+ return -ENOMEM;
- ret = mt76_queue_alloc(dev, q);
- if (ret)
- return ret;
+ err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
+ if (err < 0)
+ return err;
+
+ INIT_LIST_HEAD(&q->swq);
+ q->q = hwq;
mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx));
@@ -175,15 +133,12 @@ static int
mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize)
{
- int ret;
-
- q->regs = dev->mt76.mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE;
- q->ndesc = n_desc;
- q->buf_size = bufsize;
+ int err;
- ret = mt76_queue_alloc(dev, q);
- if (ret)
- return ret;
+ err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize,
+ MT_RX_RING_BASE);
+ if (err < 0)
+ return err;
mt76x02_irq_enable(dev, MT_INT_RX_DONE(idx));
@@ -202,15 +157,32 @@ static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev)
static void mt76x02_tx_tasklet(unsigned long data)
{
struct mt76x02_dev *dev = (struct mt76x02_dev *)data;
- int i;
+ mt76x02_mac_poll_tx_status(dev, false);
mt76x02_process_tx_status_fifo(dev);
+ mt76_txq_schedule_all(&dev->mt76);
+}
+
+static int mt76x02_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct mt76x02_dev *dev = container_of(napi, struct mt76x02_dev, tx_napi);
+ int i;
+
+ mt76x02_mac_poll_tx_status(dev, false);
+
for (i = MT_TXQ_MCU; i >= 0; i--)
mt76_queue_tx_cleanup(dev, i, false);
- mt76x02_mac_poll_tx_status(dev, false);
- mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);
+ if (napi_complete_done(napi, 0))
+ mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);
+
+ for (i = MT_TXQ_MCU; i >= 0; i--)
+ mt76_queue_tx_cleanup(dev, i, false);
+
+ tasklet_schedule(&dev->mt76.tx_tasklet);
+
+ return 0;
}
int mt76x02_dma_init(struct mt76x02_dev *dev)
@@ -220,7 +192,6 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
struct mt76_queue *q;
void *status_fifo;
- BUILD_BUG_ON(sizeof(t->txwi) < sizeof(struct mt76x02_txwi));
BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM);
fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status));
@@ -228,10 +199,12 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
if (!status_fifo)
return -ENOMEM;
- tasklet_init(&dev->tx_tasklet, mt76x02_tx_tasklet, (unsigned long) dev);
- tasklet_init(&dev->pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet,
+ tasklet_init(&dev->mt76.tx_tasklet, mt76x02_tx_tasklet,
+ (unsigned long) dev);
+ tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet,
(unsigned long)dev);
+ spin_lock_init(&dev->txstatus_fifo_lock);
kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
mt76_dma_attach(&dev->mt76);
@@ -268,7 +241,15 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
if (ret)
return ret;
- return mt76_init_queues(dev);
+ ret = mt76_init_queues(dev);
+ if (ret)
+ return ret;
+
+ netif_tx_napi_add(&dev->mt76.napi_dev, &dev->tx_napi, mt76x02_poll_tx,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&dev->tx_napi);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(mt76x02_dma_init);
@@ -296,11 +277,6 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
intr &= dev->mt76.mmio.irqmask;
- if (intr & MT_INT_TX_DONE_ALL) {
- mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL);
- tasklet_schedule(&dev->tx_tasklet);
- }
-
if (intr & MT_INT_RX_DONE(0)) {
mt76x02_irq_disable(dev, MT_INT_RX_DONE(0));
napi_schedule(&dev->mt76.napi[0]);
@@ -312,19 +288,22 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
}
if (intr & MT_INT_PRE_TBTT)
- tasklet_schedule(&dev->pre_tbtt_tasklet);
+ tasklet_schedule(&dev->mt76.pre_tbtt_tasklet);
/* send buffered multicast frames now */
if (intr & MT_INT_TBTT) {
if (dev->mt76.csa_complete)
mt76_csa_finish(&dev->mt76);
else
- mt76_queue_kick(dev, &dev->mt76.q_tx[MT_TXQ_PSD]);
+ mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD].q);
}
- if (intr & MT_INT_TX_STAT) {
+ if (intr & MT_INT_TX_STAT)
mt76x02_mac_poll_tx_status(dev, true);
- tasklet_schedule(&dev->tx_tasklet);
+
+ if (intr & (MT_INT_TX_STAT | MT_INT_TX_DONE_ALL)) {
+ mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL);
+ napi_schedule(&dev->tx_napi);
}
if (intr & MT_INT_GPTIMER) {
@@ -336,18 +315,6 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
}
EXPORT_SYMBOL_GPL(mt76x02_irq_handler);
-void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags);
- dev->mt76.mmio.irqmask &= ~clear;
- dev->mt76.mmio.irqmask |= set;
- mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
- spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags);
-}
-EXPORT_SYMBOL_GPL(mt76x02_set_irq_mask);
-
static void mt76x02_dma_enable(struct mt76x02_dev *dev)
{
u32 val;
@@ -366,7 +333,8 @@ static void mt76x02_dma_enable(struct mt76x02_dev *dev)
void mt76x02_dma_cleanup(struct mt76x02_dev *dev)
{
- tasklet_kill(&dev->tx_tasklet);
+ tasklet_kill(&dev->mt76.tx_tasklet);
+ netif_napi_del(&dev->tx_napi);
mt76_dma_cleanup(&dev->mt76);
}
EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup);
@@ -403,13 +371,13 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
int i;
for (i = 0; i < 4; i++) {
- q = &dev->mt76.q_tx[i];
+ q = dev->mt76.q_tx[i].q;
if (!q->queued)
continue;
prev_dma_idx = dev->mt76.tx_dma_idx[i];
- dma_idx = ioread32(&q->regs->dma_idx);
+ dma_idx = readl(&q->regs->dma_idx);
dev->mt76.tx_dma_idx[i] = dma_idx;
if (prev_dma_idx == dma_idx)
@@ -472,7 +440,7 @@ static void mt76x02_reset_state(struct mt76x02_dev *dev)
}
dev->vif_mask = 0;
- dev->beacon_mask = 0;
+ dev->mt76.beacon_mask = 0;
}
static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
@@ -484,8 +452,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
ieee80211_stop_queues(dev->mt76.hw);
set_bit(MT76_RESET, &dev->mt76.state);
- tasklet_disable(&dev->pre_tbtt_tasklet);
- tasklet_disable(&dev->tx_tasklet);
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
+ tasklet_disable(&dev->mt76.tx_tasklet);
+ napi_disable(&dev->tx_napi);
for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++)
napi_disable(&dev->mt76.napi[i]);
@@ -495,7 +464,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
if (restart)
mt76x02_reset_state(dev);
- if (dev->beacon_mask)
+ if (dev->mt76.beacon_mask)
mt76_clear(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_BEACON_TX |
MT_BEACON_TIME_CFG_TBTT_EN);
@@ -514,7 +483,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
mt76_set(dev, 0x734, 0x3);
if (restart)
- dev->mt76.mcu_ops->mcu_restart(&dev->mt76);
+ mt76_mcu_restart(dev);
for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
mt76_queue_tx_cleanup(dev, i, true);
@@ -527,7 +496,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
if (dev->ed_monitor)
mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
- if (dev->beacon_mask && !restart)
+ if (dev->mt76.beacon_mask && !restart)
mt76_set(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_BEACON_TX |
MT_BEACON_TIME_CFG_TBTT_EN);
@@ -538,10 +507,11 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
clear_bit(MT76_RESET, &dev->mt76.state);
- tasklet_enable(&dev->tx_tasklet);
- tasklet_schedule(&dev->tx_tasklet);
+ tasklet_enable(&dev->mt76.tx_tasklet);
+ napi_enable(&dev->tx_napi);
+ napi_schedule(&dev->tx_napi);
- tasklet_enable(&dev->pre_tbtt_tasklet);
+ tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) {
napi_enable(&dev->mt76.napi[i]);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
index 7401cb94fb72..2ce05b543dff 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_regs.h
@@ -356,7 +356,10 @@
#define MT_BEACON_TIME_CFG_TSF_COMP GENMASK(31, 24)
#define MT_TBTT_SYNC_CFG 0x1118
-#define MT_TBTT_TIMER_CFG 0x1124
+#define MT_TSF_TIMER_DW0 0x111c
+#define MT_TSF_TIMER_DW1 0x1120
+#define MT_TBTT_TIMER 0x1124
+#define MT_TBTT_TIMER_VAL GENMASK(16, 0)
#define MT_INT_TIMER_CFG 0x1128
#define MT_INT_TIMER_CFG_PRE_TBTT GENMASK(15, 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
index 94f47248c59f..cf7abd9b7d2e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c
@@ -147,36 +147,33 @@ bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update)
EXPORT_SYMBOL_GPL(mt76x02_tx_status_data);
int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
- struct sk_buff *skb, struct mt76_queue *q,
- struct mt76_wcid *wcid, struct ieee80211_sta *sta,
- u32 *tx_info)
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
struct mt76x02_txwi *txwi = txwi_ptr;
- int qsel = MT_QSEL_EDCA;
- int pid;
- int ret;
+ int hdrlen, len, pid, qsel = MT_QSEL_EDCA;
- if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128)
+ if (qid == MT_TXQ_PSD && wcid && wcid->idx < 128)
mt76x02_mac_wcid_set_drop(dev, wcid->idx, false);
- mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len);
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ len = tx_info->skb->len - (hdrlen & 2);
+ mt76x02_mac_write_txwi(dev, txwi, tx_info->skb, wcid, sta, len);
- pid = mt76_tx_status_skb_add(mdev, wcid, skb);
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
txwi->pktid = pid;
- ret = mt76x02_insert_hdr_pad(skb);
- if (ret < 0)
- return ret;
-
if (pid >= MT_PACKET_ID_FIRST)
qsel = MT_QSEL_MGMT;
- *tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
- MT_TXD_INFO_80211;
+ tx_info->info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |
+ MT_TXD_INFO_80211;
if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
- *tx_info |= MT_TXD_INFO_WIV;
+ tx_info->info |= MT_TXD_INFO_WIV;
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
index 0126e51d77ed..7b53f9e57f29 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
@@ -26,9 +26,11 @@ int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags);
int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
- struct sk_buff *skb, struct mt76_queue *q,
- struct mt76_wcid *wcid, struct ieee80211_sta *sta,
- u32 *tx_info);
-void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush);
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
+void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e);
+void mt76x02u_init_beacon_config(struct mt76x02_dev *dev);
+void mt76x02u_exit_beacon_config(struct mt76x02_dev *dev);
#endif /* __MT76x02_USB_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 6fb52b596d42..6b89f7eab26c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -26,8 +26,8 @@ static void mt76x02u_remove_dma_hdr(struct sk_buff *skb)
mt76x02_remove_hdr_pad(skb, 2);
}
-void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q,
- struct mt76_queue_entry *e, bool flush)
+void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
+ struct mt76_queue_entry *e)
{
mt76x02u_remove_dma_hdr(e->skb);
mt76_tx_complete_skb(mdev, e->skb);
@@ -72,27 +72,26 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
}
int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
- struct sk_buff *skb, struct mt76_queue *q,
- struct mt76_wcid *wcid, struct ieee80211_sta *sta,
- u32 *tx_info)
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid].q->hw_idx);
struct mt76x02_txwi *txwi;
enum mt76_qsel qsel;
- int len = skb->len;
u32 flags;
- int pid;
- mt76x02_insert_hdr_pad(skb);
+ mt76_insert_hdr_pad(tx_info->skb);
- txwi = (struct mt76x02_txwi *)(skb->data - sizeof(struct mt76x02_txwi));
- mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
- skb_push(skb, sizeof(struct mt76x02_txwi));
+ txwi = (struct mt76x02_txwi *)(tx_info->skb->data - sizeof(*txwi));
+ mt76x02_mac_write_txwi(dev, txwi, tx_info->skb, wcid, sta, len);
+ skb_push(tx_info->skb, sizeof(*txwi));
- pid = mt76_tx_status_skb_add(mdev, wcid, skb);
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
txwi->pktid = pid;
- if (pid >= MT_PACKET_ID_FIRST || q2ep(q->hw_idx) == MT_EP_OUT_HCCA)
+ if (pid >= MT_PACKET_ID_FIRST || ep == MT_EP_OUT_HCCA)
qsel = MT_QSEL_MGMT;
else
qsel = MT_QSEL_EDCA;
@@ -102,6 +101,167 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv)
flags |= MT_TXD_INFO_WIV;
- return mt76x02u_skb_dma_info(skb, WLAN_PORT, flags);
+ return mt76x02u_skb_dma_info(tx_info->skb, WLAN_PORT, flags);
}
EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb);
+
+/* Trigger pre-TBTT event 8 ms before TBTT */
+#define PRE_TBTT_USEC 8000
+
+/* Beacon SRAM memory is limited to 8kB. We need to send PS buffered frames
+ * (which can be 1500 bytes big) via beacon memory. That make limit of number
+ * of slots to 5. TODO: dynamically calculate offsets in beacon SRAM.
+ */
+#define N_BCN_SLOTS 5
+
+static void mt76x02u_start_pre_tbtt_timer(struct mt76x02_dev *dev)
+{
+ u64 time;
+ u32 tbtt;
+
+ /* Get remaining TBTT in usec */
+ tbtt = mt76_get_field(dev, MT_TBTT_TIMER, MT_TBTT_TIMER_VAL);
+ tbtt *= 32;
+
+ if (tbtt <= PRE_TBTT_USEC) {
+ queue_work(system_highpri_wq, &dev->pre_tbtt_work);
+ return;
+ }
+
+ time = (tbtt - PRE_TBTT_USEC) * 1000ull;
+ hrtimer_start(&dev->pre_tbtt_timer, time, HRTIMER_MODE_REL);
+}
+
+static void mt76x02u_restart_pre_tbtt_timer(struct mt76x02_dev *dev)
+{
+ u32 tbtt, dw0, dw1;
+ u64 tsf, time;
+
+ /* Get remaining TBTT in usec */
+ tbtt = mt76_get_field(dev, MT_TBTT_TIMER, MT_TBTT_TIMER_VAL);
+ tbtt *= 32;
+
+ dw0 = mt76_rr(dev, MT_TSF_TIMER_DW0);
+ dw1 = mt76_rr(dev, MT_TSF_TIMER_DW1);
+ tsf = (u64)dw0 << 32 | dw1;
+ dev_dbg(dev->mt76.dev, "TSF: %llu us TBTT %u us\n", tsf, tbtt);
+
+ /* Convert beacon interval in TU (1024 usec) to nsec */
+ time = ((1000000000ull * dev->mt76.beacon_int) >> 10);
+
+ /* Adjust time to trigger hrtimer 8ms before TBTT */
+ if (tbtt < PRE_TBTT_USEC)
+ time -= (PRE_TBTT_USEC - tbtt) * 1000ull;
+ else
+ time += (tbtt - PRE_TBTT_USEC) * 1000ull;
+
+ hrtimer_start(&dev->pre_tbtt_timer, time, HRTIMER_MODE_REL);
+}
+
+static void mt76x02u_stop_pre_tbtt_timer(struct mt76x02_dev *dev)
+{
+ do {
+ hrtimer_cancel(&dev->pre_tbtt_timer);
+ cancel_work_sync(&dev->pre_tbtt_work);
+ /* Timer can be rearmed by work. */
+ } while (hrtimer_active(&dev->pre_tbtt_timer));
+}
+
+static void mt76x02u_pre_tbtt_work(struct work_struct *work)
+{
+ struct mt76x02_dev *dev =
+ container_of(work, struct mt76x02_dev, pre_tbtt_work);
+ struct beacon_bc_data data = {};
+ struct sk_buff *skb;
+ int i, nbeacons;
+
+ if (!dev->mt76.beacon_mask)
+ return;
+
+ if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ return;
+
+ mt76x02_resync_beacon_timer(dev);
+
+ ieee80211_iterate_active_interfaces(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt76x02_update_beacon_iter, dev);
+
+ nbeacons = hweight8(dev->mt76.beacon_mask);
+ mt76x02_enqueue_buffered_bc(dev, &data, N_BCN_SLOTS - nbeacons);
+
+ for (i = nbeacons; i < N_BCN_SLOTS; i++) {
+ skb = __skb_dequeue(&data.q);
+ mt76x02_mac_set_beacon(dev, i, skb);
+ }
+
+ mt76x02u_restart_pre_tbtt_timer(dev);
+}
+
+static enum hrtimer_restart mt76x02u_pre_tbtt_interrupt(struct hrtimer *timer)
+{
+ struct mt76x02_dev *dev =
+ container_of(timer, struct mt76x02_dev, pre_tbtt_timer);
+
+ queue_work(system_highpri_wq, &dev->pre_tbtt_work);
+
+ return HRTIMER_NORESTART;
+}
+
+static void mt76x02u_pre_tbtt_enable(struct mt76x02_dev *dev, bool en)
+{
+ if (en && dev->mt76.beacon_mask &&
+ !hrtimer_active(&dev->pre_tbtt_timer))
+ mt76x02u_start_pre_tbtt_timer(dev);
+ if (!en)
+ mt76x02u_stop_pre_tbtt_timer(dev);
+}
+
+static void mt76x02u_beacon_enable(struct mt76x02_dev *dev, bool en)
+{
+ int i;
+
+ if (WARN_ON_ONCE(!dev->mt76.beacon_int))
+ return;
+
+ if (en) {
+ mt76x02u_start_pre_tbtt_timer(dev);
+ } else {
+ /* Timer is already stopped, only clean up
+ * PS buffered frames if any.
+ */
+ for (i = 0; i < N_BCN_SLOTS; i++)
+ mt76x02_mac_set_beacon(dev, i, NULL);
+ }
+}
+
+void mt76x02u_init_beacon_config(struct mt76x02_dev *dev)
+{
+ static const struct mt76x02_beacon_ops beacon_ops = {
+ .nslots = N_BCN_SLOTS,
+ .slot_size = (8192 / N_BCN_SLOTS) & ~63,
+ .pre_tbtt_enable = mt76x02u_pre_tbtt_enable,
+ .beacon_enable = mt76x02u_beacon_enable,
+ };
+ dev->beacon_ops = &beacon_ops;
+
+ hrtimer_init(&dev->pre_tbtt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ dev->pre_tbtt_timer.function = mt76x02u_pre_tbtt_interrupt;
+ INIT_WORK(&dev->pre_tbtt_work, mt76x02u_pre_tbtt_work);
+
+ mt76x02_init_beacon_config(dev);
+}
+EXPORT_SYMBOL_GPL(mt76x02u_init_beacon_config);
+
+void mt76x02u_exit_beacon_config(struct mt76x02_dev *dev)
+{
+ if (!test_bit(MT76_REMOVED, &dev->mt76.state))
+ mt76_clear(dev, MT_BEACON_TIME_CFG,
+ MT_BEACON_TIME_CFG_TIMER_EN |
+ MT_BEACON_TIME_CFG_SYNC_MODE |
+ MT_BEACON_TIME_CFG_TBTT_EN |
+ MT_BEACON_TIME_CFG_BEACON_TX);
+
+ mt76x02u_stop_pre_tbtt_timer(dev);
+}
+EXPORT_SYMBOL_GPL(mt76x02u_exit_beacon_config);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index cd072ac614f7..ad5323447ed4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -132,7 +132,7 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
struct ieee80211_hw *hw = mt76_hw(dev);
struct wiphy *wiphy = hw->wiphy;
- INIT_DELAYED_WORK(&dev->mac_work, mt76x02_mac_work);
+ INIT_DELAYED_WORK(&dev->mt76.mac_work, mt76x02_mac_work);
hw->queues = 4;
hw->max_rates = 1;
@@ -142,6 +142,7 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
@@ -158,7 +159,6 @@ void mt76x02_init_device(struct mt76x02_dev *dev)
wiphy->reg_notifier = mt76x02_regd_notifier;
wiphy->iface_combinations = mt76x02_if_comb;
wiphy->n_iface_combinations = ARRAY_SIZE(mt76x02_if_comb);
- wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
/* init led callbacks */
@@ -378,7 +378,7 @@ int mt76x02_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
break;
case IEEE80211_AMPDU_TX_START:
- mtxq->agg_ssn = *ssn << 4;
+ mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(*ssn);
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
@@ -424,6 +424,16 @@ int mt76x02_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return -EOPNOTSUPP;
+ /*
+ * In USB AP mode, broadcast/multicast frames are setup in beacon
+ * data registers and sent via HW beacons engine, they require to
+ * be already encrypted.
+ */
+ if (mt76_is_usb(dev) &&
+ vif->type == NL80211_IFTYPE_AP &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
msta = sta ? (struct mt76x02_sta *) sta->drv_priv : NULL;
wcid = msta ? &msta->wcid : &mvif->group_wcid;
@@ -465,7 +475,7 @@ int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u8 cw_min = 5, cw_max = 10, qid;
u32 val;
- qid = dev->mt76.q_tx[queue].hw_idx;
+ qid = dev->mt76.q_tx[queue].q->hw_idx;
if (params->cw_min)
cw_min = fls(params->cw_min);
@@ -562,26 +572,9 @@ void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw,
rate.idx = rates->rate[0].idx;
rate.flags = rates->rate[0].flags;
mt76x02_mac_wcid_set_rate(dev, &msta->wcid, &rate);
- msta->wcid.max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, &rate);
}
EXPORT_SYMBOL_GPL(mt76x02_sta_rate_tbl_update);
-int mt76x02_insert_hdr_pad(struct sk_buff *skb)
-{
- int len = ieee80211_get_hdrlen_from_skb(skb);
-
- if (len % 4 == 0)
- return 0;
-
- skb_push(skb, 2);
- memmove(skb->data, skb->data + 2, len);
-
- skb->data[len] = 0;
- skb->data[len + 1] = 0;
- return 2;
-}
-EXPORT_SYMBOL_GPL(mt76x02_insert_hdr_pad);
-
void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len)
{
int hdrlen;
@@ -600,8 +593,6 @@ void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt76x02_dev *dev = hw->priv;
- if (mt76_is_mmio(dev))
- tasklet_disable(&dev->pre_tbtt_tasklet);
set_bit(MT76_SCANNING, &dev->mt76.state);
}
EXPORT_SYMBOL_GPL(mt76x02_sw_scan);
@@ -612,9 +603,6 @@ void mt76x02_sw_scan_complete(struct ieee80211_hw *hw,
struct mt76x02_dev *dev = hw->priv;
clear_bit(MT76_SCANNING, &dev->mt76.state);
- if (mt76_is_mmio(dev))
- tasklet_enable(&dev->pre_tbtt_tasklet);
-
if (dev->cal.gain_init_done) {
/* Restore AGC gain and resume calibration after scanning. */
dev->cal.low_gain = -1;
@@ -631,72 +619,11 @@ void mt76x02_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta,
int idx = msta->wcid.idx;
mt76_stop_tx_queues(&dev->mt76, sta, true);
- mt76x02_mac_wcid_set_drop(dev, idx, ps);
+ if (mt76_is_mmio(dev))
+ mt76x02_mac_wcid_set_drop(dev, idx, ps);
}
EXPORT_SYMBOL_GPL(mt76x02_sta_ps);
-const u16 mt76x02_beacon_offsets[16] = {
- /* 1024 byte per beacon */
- 0xc000,
- 0xc400,
- 0xc800,
- 0xcc00,
- 0xd000,
- 0xd400,
- 0xd800,
- 0xdc00,
- /* BSS idx 8-15 not used for beacons */
- 0xc000,
- 0xc000,
- 0xc000,
- 0xc000,
- 0xc000,
- 0xc000,
- 0xc000,
- 0xc000,
-};
-
-static void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev)
-{
- u16 val, base = MT_BEACON_BASE;
- u32 regs[4] = {};
- int i;
-
- for (i = 0; i < 16; i++) {
- val = mt76x02_beacon_offsets[i] - base;
- regs[i / 4] |= (val / 64) << (8 * (i % 4));
- }
-
- for (i = 0; i < 4; i++)
- mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]);
-}
-
-void mt76x02_init_beacon_config(struct mt76x02_dev *dev)
-{
- int i;
-
- if (mt76_is_mmio(dev)) {
- /* Fire a pre-TBTT interrupt 8 ms before TBTT */
- mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT,
- 8 << 4);
- mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
- MT_DFS_GP_INTERVAL);
- mt76_wr(dev, MT_INT_TIMER_EN, 0);
- }
-
- mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN |
- MT_BEACON_TIME_CFG_TBTT_EN |
- MT_BEACON_TIME_CFG_BEACON_TX));
- mt76_set(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_SYNC_MODE);
- mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff);
-
- for (i = 0; i < 8; i++)
- mt76x02_mac_set_beacon(dev, i, NULL);
-
- mt76x02_set_beacon_offsets(dev);
-}
-EXPORT_SYMBOL_GPL(mt76x02_init_beacon_config);
-
void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -718,7 +645,7 @@ void mt76x02_bss_info_changed(struct ieee80211_hw *hw,
mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_INTVAL,
info->beacon_int << 4);
- dev->beacon_int = info->beacon_int;
+ dev->mt76.beacon_int = info->beacon_int;
}
if (changed & BSS_CHANGED_BEACON_ENABLED)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
index a30ef2c5a9db..c6078e90ca43 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
@@ -165,27 +165,21 @@ void mt76x2_init_txpower(struct mt76x02_dev *dev,
struct ieee80211_channel *chan;
struct mt76x2_tx_power_info txp;
struct mt76_rate_power t = {};
- int target_power;
int i;
for (i = 0; i < sband->n_channels; i++) {
chan = &sband->channels[i];
mt76x2_get_power_info(dev, &txp, chan);
-
- target_power = max_t(int, (txp.chain[0].target_power +
- txp.chain[0].delta),
- (txp.chain[1].target_power +
- txp.chain[1].delta));
-
mt76x2_get_rate_power(dev, &t, chan);
chan->max_power = mt76x02_get_max_rate_power(&t) +
- target_power;
- chan->max_power /= 2;
+ txp.target_power;
+ chan->max_power = DIV_ROUND_UP(chan->max_power, 2);
/* convert to combined output power on 2x2 devices */
chan->max_power += 3;
+ chan->orig_mpwr = chan->max_power;
}
}
EXPORT_SYMBOL_GPL(mt76x2_init_txpower);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index 6274655e1f7e..e84d5c5911ea 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -32,6 +32,7 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = sizeof(struct mt76x02_txwi),
+ .tx_aligned4_skbs = true,
.update_survey = mt76x02_update_channel,
.tx_prepare_skb = mt76x02_tx_prepare_skb,
.tx_complete_skb = mt76x02_tx_complete_skb,
@@ -106,7 +107,7 @@ mt76pci_remove(struct pci_dev *pdev)
mt76_unregister_device(mdev);
mt76x2_cleanup(dev);
- ieee80211_free_hw(mdev->hw);
+ mt76_free_device(mdev);
}
MODULE_DEVICE_TABLE(pci, mt76pci_device_table);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index d3927a13e92e..71aea2832644 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -120,7 +120,7 @@ int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN);
mt76x02_mac_setaddr(dev, macaddr);
- mt76x02_init_beacon_config(dev);
+ mt76x02e_init_beacon_config(dev);
if (!hard)
return 0;
@@ -291,7 +291,7 @@ static int mt76x2_init_hardware(struct mt76x02_dev *dev)
void mt76x2_stop_hardware(struct mt76x02_dev *dev)
{
cancel_delayed_work_sync(&dev->cal_work);
- cancel_delayed_work_sync(&dev->mac_work);
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
cancel_delayed_work_sync(&dev->wdt_work);
mt76x02_mcu_set_radio_state(dev, false);
mt76x2_mac_stop(dev, false);
@@ -300,7 +300,7 @@ void mt76x2_stop_hardware(struct mt76x02_dev *dev)
void mt76x2_cleanup(struct mt76x02_dev *dev)
{
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
- tasklet_disable(&dev->pre_tbtt_tasklet);
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
mt76x2_stop_hardware(dev);
mt76x02_dma_cleanup(dev);
mt76x02_mcu_cleanup(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index 878ce92405ed..e416eee6a306 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -22,26 +22,21 @@ mt76x2_start(struct ieee80211_hw *hw)
struct mt76x02_dev *dev = hw->priv;
int ret;
- mutex_lock(&dev->mt76.mutex);
-
ret = mt76x2_mac_start(dev);
if (ret)
- goto out;
+ return ret;
ret = mt76x2_phy_start(dev);
if (ret)
- goto out;
+ return ret;
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
MT_MAC_WORK_INTERVAL);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,
MT_WATCHDOG_TIME);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
-
-out:
- mutex_unlock(&dev->mt76.mutex);
- return ret;
+ return 0;
}
static void
@@ -49,10 +44,8 @@ mt76x2_stop(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- mutex_lock(&dev->mt76.mutex);
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
mt76x2_stop_hardware(dev);
- mutex_unlock(&dev->mt76.mutex);
}
static int
@@ -66,7 +59,7 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
mt76_set_channel(&dev->mt76);
- tasklet_disable(&dev->pre_tbtt_tasklet);
+ tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
mt76x2_mac_stop(dev, true);
@@ -80,7 +73,7 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
mt76x2_mac_resume(dev);
tasklet_enable(&dev->dfs_pd.dfs_tasklet);
- tasklet_enable(&dev->pre_tbtt_tasklet);
+ tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
clear_bit(MT76_RESET, &dev->mt76.state);
@@ -135,12 +128,6 @@ mt76x2_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
}
-static int
-mt76x2_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
-{
- return 0;
-}
-
static int mt76x2_set_antenna(struct ieee80211_hw *hw, u32 tx_ant,
u32 rx_ant)
{
@@ -197,7 +184,7 @@ const struct ieee80211_ops mt76x2_ops = {
.release_buffered_frames = mt76_release_buffered_frames,
.set_coverage_class = mt76x02_set_coverage_class,
.get_survey = mt76_get_survey,
- .set_tim = mt76x2_set_tim,
+ .set_tim = mt76_set_tim,
.set_antenna = mt76x2_set_antenna,
.get_antenna = mt76x2_get_antenna,
.set_rts_threshold = mt76x02_set_rts_threshold,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
index 769a9b972044..cdedf95ca4f5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
@@ -161,12 +161,12 @@ void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
delta = txp.delta_bw80;
mt76x2_get_rate_power(dev, &t, chan);
- mt76x02_add_rate_power_offset(&t, txp.chain[0].target_power);
+ mt76x02_add_rate_power_offset(&t, txp.target_power + delta);
mt76x02_limit_rate_power(&t, dev->mt76.txpower_conf);
dev->mt76.txpower_cur = mt76x02_get_max_rate_power(&t);
base_power = mt76x2_get_min_rate_power(&t);
- delta += base_power - txp.chain[0].target_power;
+ delta = base_power - txp.target_power;
txp_0 = txp.chain[0].target_power + txp.chain[0].delta + delta;
txp_1 = txp.chain[1].target_power + txp.chain[1].delta + delta;
@@ -182,7 +182,7 @@ void mt76x2_phy_set_txpower(struct mt76x02_dev *dev)
}
mt76x02_add_rate_power_offset(&t, -base_power);
- dev->target_power = txp.chain[0].target_power;
+ dev->target_power = txp.target_power;
dev->target_power_delta[0] = txp_0 - txp.chain[0].target_power;
dev->target_power_delta[1] = txp_1 - txp.chain[0].target_power;
dev->mt76.rate_power = t;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index ac0f13d46299..7a994a783510 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -40,6 +40,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
.tx_complete_skb = mt76x02u_tx_complete_skb,
.tx_status_data = mt76x02_tx_status_data,
.rx_skb = mt76x02_queue_rx_skb,
+ .sta_ps = mt76x02_sta_ps,
.sta_add = mt76x02_sta_add,
.sta_remove = mt76x02_sta_remove,
};
@@ -48,7 +49,7 @@ static int mt76x2u_probe(struct usb_interface *intf,
struct mt76_dev *mdev;
int err;
- mdev = mt76_alloc_device(&intf->dev, sizeof(*dev), &mt76x2u_ops,
+ mdev = mt76_alloc_device(&udev->dev, sizeof(*dev), &mt76x2u_ops,
&drv_ops);
if (!mdev)
return -ENOMEM;
@@ -58,6 +59,8 @@ static int mt76x2u_probe(struct usb_interface *intf,
udev = usb_get_dev(udev);
usb_reset_device(udev);
+ usb_set_intfdata(intf, dev);
+
mt76x02u_init_mcu(mdev);
err = mt76u_init(mdev, intf);
if (err < 0)
@@ -104,8 +107,7 @@ static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf,
{
struct mt76x02_dev *dev = usb_get_intfdata(intf);
- mt76u_stop_queues(&dev->mt76);
- mt76x2u_stop_hw(dev);
+ mt76u_stop_rx(&dev->mt76);
return 0;
}
@@ -113,16 +115,12 @@ static int __maybe_unused mt76x2u_suspend(struct usb_interface *intf,
static int __maybe_unused mt76x2u_resume(struct usb_interface *intf)
{
struct mt76x02_dev *dev = usb_get_intfdata(intf);
- struct mt76_usb *usb = &dev->mt76.usb;
int err;
- err = mt76u_submit_rx_buffers(&dev->mt76);
+ err = mt76u_resume_rx(&dev->mt76);
if (err < 0)
goto err;
- tasklet_enable(&usb->rx_tasklet);
- tasklet_enable(&usb->tx_tasklet);
-
err = mt76x2u_init_hardware(dev);
if (err < 0)
goto err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
index 1da90e58d942..f2c57d5b87f9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
@@ -183,7 +183,7 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev)
mt76x02_mac_shared_key_setup(dev, i, k, NULL);
}
- mt76x02_init_beacon_config(dev);
+ mt76x02u_init_beacon_config(dev);
mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e);
mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x583f);
@@ -244,9 +244,8 @@ fail:
void mt76x2u_stop_hw(struct mt76x02_dev *dev)
{
- mt76u_stop_stat_wk(&dev->mt76);
cancel_delayed_work_sync(&dev->cal_work);
- cancel_delayed_work_sync(&dev->mac_work);
+ cancel_delayed_work_sync(&dev->mt76.mac_work);
mt76x2u_mac_stop(dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index 2ac78e4dc41a..97bcf6494ec1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -21,29 +21,24 @@ static int mt76x2u_start(struct ieee80211_hw *hw)
struct mt76x02_dev *dev = hw->priv;
int ret;
- mutex_lock(&dev->mt76.mutex);
-
ret = mt76x2u_mac_start(dev);
if (ret)
- goto out;
+ return ret;
- ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work,
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mt76.mac_work,
MT_MAC_WORK_INTERVAL);
set_bit(MT76_STATE_RUNNING, &dev->mt76.state);
-out:
- mutex_unlock(&dev->mt76.mutex);
- return ret;
+ return 0;
}
static void mt76x2u_stop(struct ieee80211_hw *hw)
{
struct mt76x02_dev *dev = hw->priv;
- mutex_lock(&dev->mt76.mutex);
clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+ mt76u_stop_tx(&dev->mt76);
mt76x2u_stop_hw(dev);
- mutex_unlock(&dev->mt76.mutex);
}
static int
@@ -57,6 +52,8 @@ mt76x2u_set_channel(struct mt76x02_dev *dev,
mt76_set_channel(&dev->mt76);
+ dev->beacon_ops->pre_tbtt_enable(dev, false);
+
mt76x2_mac_stop(dev, false);
err = mt76x2u_phy_set_channel(dev, chandef);
@@ -64,6 +61,8 @@ mt76x2u_set_channel(struct mt76x02_dev *dev,
mt76x2_mac_resume(dev);
mt76x02_edcca_init(dev, true);
+ dev->beacon_ops->pre_tbtt_enable(dev, true);
+
clear_bit(MT76_RESET, &dev->mt76.state);
mt76_txq_schedule_all(&dev->mt76);
@@ -125,4 +124,6 @@ const struct ieee80211_ops mt76x2u_ops = {
.sw_scan_complete = mt76x02_sw_scan_complete,
.sta_rate_tbl_update = mt76x02_sta_rate_tbl_update,
.get_txpower = mt76_get_txpower,
+ .set_tim = mt76_set_tim,
+ .release_buffered_frames = mt76_release_buffered_frames,
};
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 2585df512335..5397827668b9 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -21,15 +21,17 @@ mt76_alloc_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t;
dma_addr_t addr;
+ u8 *txwi;
int size;
- size = (sizeof(*t) + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1);
- t = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
- if (!t)
+ size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
+ txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
+ if (!txwi)
return NULL;
- addr = dma_map_single(dev->dev, &t->txwi, sizeof(t->txwi),
+ addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
DMA_TO_DEVICE);
+ t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
t->dma_addr = addr;
return t;
@@ -72,13 +74,14 @@ mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
list_add(&t->list, &dev->txwi_cache);
spin_unlock_bh(&dev->lock);
}
+EXPORT_SYMBOL_GPL(mt76_put_txwi);
void mt76_tx_free(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t;
while ((t = __mt76_get_txwi(dev)) != NULL)
- dma_unmap_single(dev->dev, t->dma_addr, sizeof(t->txwi),
+ dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
DMA_TO_DEVICE);
}
@@ -266,7 +269,7 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
skb_set_queue_mapping(skb, qid);
}
- if (!wcid->tx_rate_set)
+ if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
ieee80211_get_tx_rates(info->control.vif, sta, skb,
info->control.rates, 1);
@@ -283,10 +286,10 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
mt76_check_agg_ssn(mtxq, skb);
}
- q = &dev->q_tx[qid];
+ q = dev->q_tx[qid].q;
spin_lock_bh(&q->lock);
- dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
+ dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta);
dev->queue_ops->kick(dev, q);
if (q->queued > q->ndesc - 8 && !q->stopped) {
@@ -327,7 +330,6 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
{
struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
if (last)
@@ -335,7 +337,7 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
IEEE80211_TX_CTL_REQ_TX_STATUS;
mt76_skb_set_moredata(skb, !last);
- dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta);
+ dev->queue_ops->tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta);
}
void
@@ -346,7 +348,7 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
{
struct mt76_dev *dev = hw->priv;
struct sk_buff *last_skb = NULL;
- struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
+ struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD].q;
int i;
spin_lock_bh(&hwq->lock);
@@ -386,12 +388,14 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
static int
-mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
+mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
struct mt76_txq *mtxq, bool *empty)
{
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
- struct ieee80211_tx_info *info;
+ enum mt76_txq_id qid = mt76_txq_get_qid(txq);
struct mt76_wcid *wcid = mtxq->wcid;
+ struct mt76_queue *hwq = sq->q;
+ struct ieee80211_tx_info *info;
struct sk_buff *skb;
int n_frames = 1, limit;
struct ieee80211_tx_rate tx_rate;
@@ -411,7 +415,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
}
info = IEEE80211_SKB_CB(skb);
- if (!wcid->tx_rate_set)
+ if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
info->control.rates, 1);
tx_rate = info->control.rates[0];
@@ -423,7 +427,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
if (ampdu)
mt76_check_agg_ssn(mtxq, skb);
- idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
+ idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, txq->sta);
if (idx < 0)
return idx;
@@ -458,7 +462,7 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
if (cur_ampdu)
mt76_check_agg_ssn(mtxq, skb);
- idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid,
+ idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid,
txq->sta);
if (idx < 0)
return idx;
@@ -467,8 +471,9 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
} while (n_frames < limit);
if (!probe) {
- hwq->swq_queued++;
+ hwq->entry[idx].qid = sq - dev->q_tx;
hwq->entry[idx].schedule = true;
+ sq->swq_queued++;
}
dev->queue_ops->kick(dev, hwq);
@@ -477,22 +482,37 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
}
static int
-mt76_txq_schedule_list(struct mt76_dev *dev, struct mt76_queue *hwq)
+mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
{
- struct mt76_txq *mtxq, *mtxq_last;
- int len = 0;
+ struct mt76_sw_queue *sq = &dev->q_tx[qid];
+ struct mt76_queue *hwq = sq->q;
+ struct ieee80211_txq *txq;
+ struct mt76_txq *mtxq;
+ struct mt76_wcid *wcid;
+ int ret = 0;
-restart:
- mtxq_last = list_last_entry(&hwq->swq, struct mt76_txq, list);
- while (!list_empty(&hwq->swq)) {
+ spin_lock_bh(&hwq->lock);
+ while (1) {
bool empty = false;
- int cur;
+
+ if (sq->swq_queued >= 4)
+ break;
if (test_bit(MT76_OFFCHANNEL, &dev->state) ||
- test_bit(MT76_RESET, &dev->state))
- return -EBUSY;
+ test_bit(MT76_RESET, &dev->state)) {
+ ret = -EBUSY;
+ break;
+ }
+
+ txq = ieee80211_next_txq(dev->hw, qid);
+ if (!txq)
+ break;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+ wcid = mtxq->wcid;
+ if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags))
+ continue;
- mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list);
if (mtxq->send_bar && mtxq->aggr) {
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
struct ieee80211_sta *sta = txq->sta;
@@ -504,38 +524,37 @@ restart:
spin_unlock_bh(&hwq->lock);
ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
spin_lock_bh(&hwq->lock);
- goto restart;
}
- list_del_init(&mtxq->list);
-
- cur = mt76_txq_send_burst(dev, hwq, mtxq, &empty);
- if (!empty)
- list_add_tail(&mtxq->list, &hwq->swq);
-
- if (cur < 0)
- return cur;
-
- len += cur;
-
- if (mtxq == mtxq_last)
- break;
+ ret += mt76_txq_send_burst(dev, sq, mtxq, &empty);
+ if (skb_queue_empty(&mtxq->retry_q))
+ empty = true;
+ ieee80211_return_txq(dev->hw, txq, !empty);
}
+ spin_unlock_bh(&hwq->lock);
- return len;
+ return ret;
}
-void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq)
+void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid)
{
+ struct mt76_sw_queue *sq = &dev->q_tx[qid];
int len;
+ if (qid >= 4)
+ return;
+
+ if (sq->swq_queued >= 4)
+ return;
+
rcu_read_lock();
- do {
- if (hwq->swq_queued >= 4 || list_empty(&hwq->swq))
- break;
- len = mt76_txq_schedule_list(dev, hwq);
+ do {
+ ieee80211_txq_schedule_start(dev->hw, qid);
+ len = mt76_txq_schedule_list(dev, qid);
+ ieee80211_txq_schedule_end(dev->hw, qid);
} while (len > 0);
+
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule);
@@ -544,13 +563,8 @@ void mt76_txq_schedule_all(struct mt76_dev *dev)
{
int i;
- for (i = 0; i <= MT_TXQ_BK; i++) {
- struct mt76_queue *q = &dev->q_tx[i];
-
- spin_lock_bh(&q->lock);
- mt76_txq_schedule(dev, q);
- spin_unlock_bh(&q->lock);
- }
+ for (i = 0; i <= MT_TXQ_BK; i++)
+ mt76_txq_schedule(dev, i);
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
@@ -561,18 +575,18 @@ void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
struct ieee80211_txq *txq = sta->txq[i];
+ struct mt76_queue *hwq;
struct mt76_txq *mtxq;
if (!txq)
continue;
mtxq = (struct mt76_txq *)txq->drv_priv;
+ hwq = mtxq->swq->q;
- spin_lock_bh(&mtxq->hwq->lock);
+ spin_lock_bh(&hwq->lock);
mtxq->send_bar = mtxq->aggr && send_bar;
- if (!list_empty(&mtxq->list))
- list_del_init(&mtxq->list);
- spin_unlock_bh(&mtxq->hwq->lock);
+ spin_unlock_bh(&hwq->lock);
}
}
EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
@@ -580,36 +594,23 @@ EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
{
struct mt76_dev *dev = hw->priv;
- struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
- struct mt76_queue *hwq = mtxq->hwq;
if (!test_bit(MT76_STATE_RUNNING, &dev->state))
return;
- spin_lock_bh(&hwq->lock);
- if (list_empty(&mtxq->list))
- list_add_tail(&mtxq->list, &hwq->swq);
- mt76_txq_schedule(dev, hwq);
- spin_unlock_bh(&hwq->lock);
+ tasklet_schedule(&dev->tx_tasklet);
}
EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
{
struct mt76_txq *mtxq;
- struct mt76_queue *hwq;
struct sk_buff *skb;
if (!txq)
return;
mtxq = (struct mt76_txq *) txq->drv_priv;
- hwq = mtxq->hwq;
-
- spin_lock_bh(&hwq->lock);
- if (!list_empty(&mtxq->list))
- list_del_init(&mtxq->list);
- spin_unlock_bh(&hwq->lock);
while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
ieee80211_free_txskb(dev->hw, skb);
@@ -620,10 +621,9 @@ void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
{
struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
- INIT_LIST_HEAD(&mtxq->list);
skb_queue_head_init(&mtxq->retry_q);
- mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)];
+ mtxq->swq = &dev->q_tx[mt76_txq_get_qid(txq)];
}
EXPORT_SYMBOL_GPL(mt76_txq_init);
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 4c1abd492405..bbaa1365bbda 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -31,8 +31,7 @@ static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
u8 req_type, u16 val, u16 offset,
void *buf, size_t len)
{
- struct usb_interface *intf = to_usb_interface(dev->dev);
- struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_device *udev = to_usb_device(dev->dev);
unsigned int pipe;
int i, ret;
@@ -247,8 +246,7 @@ mt76u_rd_rp(struct mt76_dev *dev, u32 base,
static bool mt76u_check_sg(struct mt76_dev *dev)
{
- struct usb_interface *intf = to_usb_interface(dev->dev);
- struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_device *udev = to_usb_device(dev->dev);
return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
(udev->bus->no_sg_constraint ||
@@ -285,28 +283,24 @@ mt76u_set_endpoints(struct usb_interface *intf,
}
static int
-mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
- int nsgs, int len, int sglen)
+mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
+ int nsgs, gfp_t gfp)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
- struct urb *urb = buf->urb;
int i;
- spin_lock_bh(&q->rx_page_lock);
for (i = 0; i < nsgs; i++) {
struct page *page;
void *data;
int offset;
- data = page_frag_alloc(&q->rx_page, len, GFP_ATOMIC);
+ data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
if (!data)
break;
page = virt_to_head_page(data);
offset = data - page_address(page);
- sg_set_page(&urb->sg[i], page, sglen, offset);
+ sg_set_page(&urb->sg[i], page, q->buf_size, offset);
}
- spin_unlock_bh(&q->rx_page_lock);
if (i < nsgs) {
int j;
@@ -317,72 +311,78 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
}
urb->num_sgs = max_t(int, i, urb->num_sgs);
- buf->len = urb->num_sgs * sglen,
+ urb->transfer_buffer_length = urb->num_sgs * q->buf_size,
sg_init_marker(urb->sg, urb->num_sgs);
return i ? : -ENOMEM;
}
static int
-mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
- struct mt76u_buf *buf, int nsgs, gfp_t gfp)
+mt76u_refill_rx(struct mt76_dev *dev, struct urb *urb, int nsgs, gfp_t gfp)
{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+
if (dev->usb.sg_en) {
- return mt76u_fill_rx_sg(dev, buf, nsgs, q->buf_size,
- SKB_WITH_OVERHEAD(q->buf_size));
+ return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
} else {
- buf->buf = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
- return buf->buf ? 0 : -ENOMEM;
+ urb->transfer_buffer_length = q->buf_size;
+ urb->transfer_buffer = page_frag_alloc(&q->rx_page,
+ q->buf_size, gfp);
+ return urb->transfer_buffer ? 0 : -ENOMEM;
}
}
static int
-mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf)
+mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ unsigned int size = sizeof(struct urb);
- buf->len = SKB_WITH_OVERHEAD(q->buf_size);
- buf->dev = dev;
+ if (dev->usb.sg_en)
+ size += MT_SG_MAX_SIZE * sizeof(struct scatterlist);
- buf->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!buf->urb)
+ e->urb = kzalloc(size, GFP_KERNEL);
+ if (!e->urb)
return -ENOMEM;
- if (dev->usb.sg_en) {
- buf->urb->sg = devm_kcalloc(dev->dev, MT_SG_MAX_SIZE,
- sizeof(*buf->urb->sg),
- GFP_KERNEL);
- if (!buf->urb->sg)
- return -ENOMEM;
+ usb_init_urb(e->urb);
- sg_init_table(buf->urb->sg, MT_SG_MAX_SIZE);
- }
+ if (dev->usb.sg_en)
+ e->urb->sg = (struct scatterlist *)(e->urb + 1);
- return mt76u_refill_rx(dev, q, buf, MT_SG_MAX_SIZE, GFP_KERNEL);
+ return 0;
}
-static void mt76u_buf_free(struct mt76u_buf *buf)
+static int
+mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
+{
+ int err;
+
+ err = mt76u_urb_alloc(dev, e);
+ if (err)
+ return err;
+
+ return mt76u_refill_rx(dev, e->urb, MT_SG_MAX_SIZE, GFP_KERNEL);
+}
+
+static void mt76u_urb_free(struct urb *urb)
{
- struct urb *urb = buf->urb;
int i;
for (i = 0; i < urb->num_sgs; i++)
skb_free_frag(sg_virt(&urb->sg[i]));
- if (buf->buf)
- skb_free_frag(buf->buf);
+ if (urb->transfer_buffer)
+ skb_free_frag(urb->transfer_buffer);
- usb_free_urb(buf->urb);
+ usb_free_urb(urb);
}
static void
mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
- struct mt76u_buf *buf, usb_complete_t complete_fn,
+ struct urb *urb, usb_complete_t complete_fn,
void *context)
{
- struct usb_interface *intf = to_usb_interface(dev->dev);
- struct usb_device *udev = interface_to_usbdev(intf);
- u8 *data = buf->urb->num_sgs ? NULL : buf->buf;
+ struct usb_device *udev = to_usb_device(dev->dev);
unsigned int pipe;
if (dir == USB_DIR_IN)
@@ -390,37 +390,28 @@ mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
else
pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
- usb_fill_bulk_urb(buf->urb, udev, pipe, data, buf->len,
- complete_fn, context);
+ urb->dev = udev;
+ urb->pipe = pipe;
+ urb->complete = complete_fn;
+ urb->context = context;
}
-static int
-mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
- struct mt76u_buf *buf, gfp_t gfp,
- usb_complete_t complete_fn, void *context)
+static inline struct urb *
+mt76u_get_next_rx_entry(struct mt76_dev *dev)
{
- mt76u_fill_bulk_urb(dev, dir, index, buf, complete_fn,
- context);
- trace_submit_urb(dev, buf->urb);
-
- return usb_submit_urb(buf->urb, gfp);
-}
-
-static inline struct mt76u_buf
-*mt76u_get_next_rx_entry(struct mt76_queue *q)
-{
- struct mt76u_buf *buf = NULL;
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ struct urb *urb = NULL;
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
if (q->queued > 0) {
- buf = &q->entry[q->head].ubuf;
+ urb = q->entry[q->head].urb;
q->head = (q->head + 1) % q->ndesc;
q->queued--;
}
spin_unlock_irqrestore(&q->lock, flags);
- return buf;
+ return urb;
}
static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
@@ -439,12 +430,12 @@ static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
}
static int
-mt76u_process_rx_entry(struct mt76_dev *dev, struct mt76u_buf *buf)
+mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
{
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
- struct urb *urb = buf->urb;
- u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : buf->buf;
- int data_len, len, nsgs = 1;
+ u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
+ int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
+ int len, nsgs = 1;
struct sk_buff *skb;
if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
@@ -454,10 +445,11 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct mt76u_buf *buf)
if (len < 0)
return 0;
- data_len = urb->num_sgs ? urb->sg[0].length : buf->len;
data_len = min_t(int, len, data_len - MT_DMA_HDR_LEN);
- if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size))
+ if (MT_DMA_HDR_LEN + data_len > SKB_WITH_OVERHEAD(q->buf_size)) {
+ dev_err_ratelimited(dev->dev, "rx data too big %d\n", data_len);
return 0;
+ }
skb = build_skb(data, q->buf_size);
if (!skb)
@@ -503,7 +495,7 @@ static void mt76u_complete_rx(struct urb *urb)
}
spin_lock_irqsave(&q->lock, flags);
- if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch"))
+ if (WARN_ONCE(q->entry[q->tail].urb != urb, "rx urb mismatch"))
goto out;
q->tail = (q->tail + 1) % q->ndesc;
@@ -513,37 +505,43 @@ out:
spin_unlock_irqrestore(&q->lock, flags);
}
+static int
+mt76u_submit_rx_buf(struct mt76_dev *dev, struct urb *urb)
+{
+ mt76u_fill_bulk_urb(dev, USB_DIR_IN, MT_EP_IN_PKT_RX, urb,
+ mt76u_complete_rx, dev);
+ trace_submit_urb(dev, urb);
+
+ return usb_submit_urb(urb, GFP_ATOMIC);
+}
+
static void mt76u_rx_tasklet(unsigned long data)
{
struct mt76_dev *dev = (struct mt76_dev *)data;
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
- struct mt76u_buf *buf;
+ struct urb *urb;
int err, count;
rcu_read_lock();
while (true) {
- buf = mt76u_get_next_rx_entry(q);
- if (!buf)
+ urb = mt76u_get_next_rx_entry(dev);
+ if (!urb)
break;
- count = mt76u_process_rx_entry(dev, buf);
+ count = mt76u_process_rx_entry(dev, urb);
if (count > 0) {
- err = mt76u_refill_rx(dev, q, buf, count,
- GFP_ATOMIC);
+ err = mt76u_refill_rx(dev, urb, count, GFP_ATOMIC);
if (err < 0)
break;
}
- mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
- buf, GFP_ATOMIC,
- mt76u_complete_rx, dev);
+ mt76u_submit_rx_buf(dev, urb);
}
mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
rcu_read_unlock();
}
-int mt76u_submit_rx_buffers(struct mt76_dev *dev)
+static int mt76u_submit_rx_buffers(struct mt76_dev *dev)
{
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
unsigned long flags;
@@ -551,9 +549,7 @@ int mt76u_submit_rx_buffers(struct mt76_dev *dev)
spin_lock_irqsave(&q->lock, flags);
for (i = 0; i < q->ndesc; i++) {
- err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
- &q->entry[i].ubuf, GFP_ATOMIC,
- mt76u_complete_rx, dev);
+ err = mt76u_submit_rx_buf(dev, q->entry[i].urb);
if (err < 0)
break;
}
@@ -563,7 +559,6 @@ int mt76u_submit_rx_buffers(struct mt76_dev *dev)
return err;
}
-EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
static int mt76u_alloc_rx(struct mt76_dev *dev)
{
@@ -575,7 +570,6 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
if (!usb->mcu.data)
return -ENOMEM;
- spin_lock_init(&q->rx_page_lock);
spin_lock_init(&q->lock);
q->entry = devm_kcalloc(dev->dev,
MT_NUM_RX_ENTRIES, sizeof(*q->entry),
@@ -586,7 +580,7 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
q->buf_size = dev->usb.sg_en ? MT_RX_BUF_SIZE : PAGE_SIZE;
q->ndesc = MT_NUM_RX_ENTRIES;
for (i = 0; i < q->ndesc; i++) {
- err = mt76u_buf_alloc(dev, &q->entry[i].ubuf);
+ err = mt76u_rx_urb_alloc(dev, &q->entry[i]);
if (err < 0)
return err;
}
@@ -601,60 +595,76 @@ static void mt76u_free_rx(struct mt76_dev *dev)
int i;
for (i = 0; i < q->ndesc; i++)
- mt76u_buf_free(&q->entry[i].ubuf);
+ mt76u_urb_free(q->entry[i].urb);
- spin_lock_bh(&q->rx_page_lock);
if (!q->rx_page.va)
- goto out;
+ return;
page = virt_to_page(q->rx_page.va);
__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
memset(&q->rx_page, 0, sizeof(q->rx_page));
-out:
- spin_unlock_bh(&q->rx_page_lock);
}
-static void mt76u_stop_rx(struct mt76_dev *dev)
+void mt76u_stop_rx(struct mt76_dev *dev)
{
struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
int i;
for (i = 0; i < q->ndesc; i++)
- usb_kill_urb(q->entry[i].ubuf.urb);
+ usb_poison_urb(q->entry[i].urb);
+
+ tasklet_kill(&dev->usb.rx_tasklet);
}
+EXPORT_SYMBOL_GPL(mt76u_stop_rx);
+
+int mt76u_resume_rx(struct mt76_dev *dev)
+{
+ struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ int i;
+
+ for (i = 0; i < q->ndesc; i++)
+ usb_unpoison_urb(q->entry[i].urb);
+
+ return mt76u_submit_rx_buffers(dev);
+}
+EXPORT_SYMBOL_GPL(mt76u_resume_rx);
static void mt76u_tx_tasklet(unsigned long data)
{
struct mt76_dev *dev = (struct mt76_dev *)data;
struct mt76_queue_entry entry;
- struct mt76u_buf *buf;
+ struct mt76_sw_queue *sq;
struct mt76_queue *q;
bool wake;
int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = &dev->q_tx[i];
+ u32 n_dequeued = 0, n_sw_dequeued = 0;
- spin_lock_bh(&q->lock);
- while (true) {
- buf = &q->entry[q->head].ubuf;
- if (!buf->done || !q->queued)
+ sq = &dev->q_tx[i];
+ q = sq->q;
+
+ while (q->queued > n_dequeued) {
+ if (!q->entry[q->head].done)
break;
if (q->entry[q->head].schedule) {
q->entry[q->head].schedule = false;
- q->swq_queued--;
+ n_sw_dequeued++;
}
entry = q->entry[q->head];
+ q->entry[q->head].done = false;
q->head = (q->head + 1) % q->ndesc;
- q->queued--;
+ n_dequeued++;
- spin_unlock_bh(&q->lock);
- dev->drv->tx_complete_skb(dev, q, &entry, false);
- spin_lock_bh(&q->lock);
+ dev->drv->tx_complete_skb(dev, i, &entry);
}
- mt76_txq_schedule(dev, q);
+
+ spin_lock_bh(&q->lock);
+
+ sq->swq_queued -= n_sw_dequeued;
+ q->queued -= n_dequeued;
wake = q->stopped && q->queued < q->ndesc - 8;
if (wake)
@@ -665,6 +675,8 @@ static void mt76u_tx_tasklet(unsigned long data)
spin_unlock_bh(&q->lock);
+ mt76_txq_schedule(dev, i);
+
if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
ieee80211_queue_delayed_work(dev->hw,
&dev->usb.stat_work,
@@ -703,34 +715,43 @@ static void mt76u_tx_status_data(struct work_struct *work)
static void mt76u_complete_tx(struct urb *urb)
{
- struct mt76u_buf *buf = urb->context;
- struct mt76_dev *dev = buf->dev;
+ struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
+ struct mt76_queue_entry *e = urb->context;
if (mt76u_urb_error(urb))
dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
- buf->done = true;
+ e->done = true;
- tasklet_schedule(&dev->usb.tx_tasklet);
+ tasklet_schedule(&dev->tx_tasklet);
}
static int
-mt76u_tx_build_sg(struct mt76_dev *dev, struct sk_buff *skb,
- struct urb *urb)
+mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
+ struct urb *urb)
{
- if (!dev->usb.sg_en)
- return 0;
+ urb->transfer_buffer_length = skb->len;
- sg_init_table(urb->sg, MT_SG_MAX_SIZE);
- urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
- return urb->num_sgs;
+ if (!dev->usb.sg_en) {
+ urb->transfer_buffer = skb->data;
+ return 0;
+ } else {
+ sg_init_table(urb->sg, MT_SG_MAX_SIZE);
+ urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
+ if (urb->num_sgs == 0)
+ return -ENOMEM;
+ return urb->num_sgs;
+ }
}
static int
-mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
- struct mt76u_buf *buf;
+ struct mt76_queue *q = dev->q_tx[qid].q;
+ struct mt76_tx_info tx_info = {
+ .skb = skb,
+ };
u16 idx = q->tail;
int err;
@@ -738,24 +759,20 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
return -ENOSPC;
skb->prev = skb->next = NULL;
- err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
+ err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
if (err < 0)
return err;
- buf = &q->entry[idx].ubuf;
- buf->buf = skb->data;
- buf->len = skb->len;
- buf->done = false;
-
- err = mt76u_tx_build_sg(dev, skb, buf->urb);
+ err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
if (err < 0)
return err;
mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
- buf, mt76u_complete_tx, buf);
+ q->entry[idx].urb, mt76u_complete_tx,
+ &q->entry[idx]);
q->tail = (q->tail + 1) % q->ndesc;
- q->entry[idx].skb = skb;
+ q->entry[idx].skb = tx_info.skb;
q->queued++;
return idx;
@@ -763,14 +780,14 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
{
- struct mt76u_buf *buf;
+ struct urb *urb;
int err;
while (q->first != q->tail) {
- buf = &q->entry[q->first].ubuf;
+ urb = q->entry[q->first].urb;
- trace_submit_urb(dev, buf->urb);
- err = usb_submit_urb(buf->urb, GFP_ATOMIC);
+ trace_submit_urb(dev, urb);
+ err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
if (err == -ENODEV)
set_bit(MT76_REMOVED, &dev->state);
@@ -785,15 +802,24 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
static int mt76u_alloc_tx(struct mt76_dev *dev)
{
- struct mt76u_buf *buf;
struct mt76_queue *q;
- int i, j;
+ int i, j, err;
+
+ for (i = 0; i <= MT_TXQ_PSD; i++) {
+ INIT_LIST_HEAD(&dev->q_tx[i].swq);
+
+ if (i >= IEEE80211_NUM_ACS) {
+ dev->q_tx[i].q = dev->q_tx[0].q;
+ continue;
+ }
+
+ q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = &dev->q_tx[i];
spin_lock_init(&q->lock);
- INIT_LIST_HEAD(&q->swq);
q->hw_idx = mt76_ac_to_hwq(i);
+ dev->q_tx[i].q = q;
q->entry = devm_kcalloc(dev->dev,
MT_NUM_TX_ENTRIES, sizeof(*q->entry),
@@ -803,22 +829,9 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
q->ndesc = MT_NUM_TX_ENTRIES;
for (j = 0; j < q->ndesc; j++) {
- buf = &q->entry[j].ubuf;
- buf->dev = dev;
-
- buf->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!buf->urb)
- return -ENOMEM;
-
- if (dev->usb.sg_en) {
- size_t size = MT_SG_MAX_SIZE *
- sizeof(struct scatterlist);
-
- buf->urb->sg = devm_kzalloc(dev->dev, size,
- GFP_KERNEL);
- if (!buf->urb->sg)
- return -ENOMEM;
- }
+ err = mt76u_urb_alloc(dev, &q->entry[j]);
+ if (err < 0)
+ return err;
}
}
return 0;
@@ -830,44 +843,60 @@ static void mt76u_free_tx(struct mt76_dev *dev)
int i, j;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = &dev->q_tx[i];
+ q = dev->q_tx[i].q;
for (j = 0; j < q->ndesc; j++)
- usb_free_urb(q->entry[j].ubuf.urb);
+ usb_free_urb(q->entry[j].urb);
}
}
-static void mt76u_stop_tx(struct mt76_dev *dev)
+void mt76u_stop_tx(struct mt76_dev *dev)
{
+ struct mt76_queue_entry entry;
struct mt76_queue *q;
- int i, j;
+ int i, j, ret;
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = &dev->q_tx[i];
- for (j = 0; j < q->ndesc; j++)
- usb_kill_urb(q->entry[j].ubuf.urb);
- }
-}
+ ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(dev), HZ/5);
+ if (!ret) {
+ dev_err(dev->dev, "timed out waiting for pending tx\n");
-void mt76u_stop_queues(struct mt76_dev *dev)
-{
- tasklet_disable(&dev->usb.rx_tasklet);
- tasklet_disable(&dev->usb.tx_tasklet);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = dev->q_tx[i].q;
+ for (j = 0; j < q->ndesc; j++)
+ usb_kill_urb(q->entry[j].urb);
+ }
- mt76u_stop_rx(dev);
- mt76u_stop_tx(dev);
-}
-EXPORT_SYMBOL_GPL(mt76u_stop_queues);
+ tasklet_kill(&dev->tx_tasklet);
+
+ /* On device removal we maight queue skb's, but mt76u_tx_kick()
+ * will fail to submit urb, cleanup those skb's manually.
+ */
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = dev->q_tx[i].q;
+
+ /* Assure we are in sync with killed tasklet. */
+ spin_lock_bh(&q->lock);
+ while (q->queued) {
+ entry = q->entry[q->head];
+ q->head = (q->head + 1) % q->ndesc;
+ q->queued--;
+
+ dev->drv->tx_complete_skb(dev, i, &entry);
+ }
+ spin_unlock_bh(&q->lock);
+ }
+ }
-void mt76u_stop_stat_wk(struct mt76_dev *dev)
-{
cancel_delayed_work_sync(&dev->usb.stat_work);
clear_bit(MT76_READING_STATS, &dev->state);
+
+ mt76_tx_status_check(dev, NULL, true);
}
-EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
+EXPORT_SYMBOL_GPL(mt76u_stop_tx);
void mt76u_queues_deinit(struct mt76_dev *dev)
{
- mt76u_stop_queues(dev);
+ mt76u_stop_rx(dev);
+ mt76u_stop_tx(dev);
mt76u_free_rx(dev);
mt76u_free_tx(dev);
@@ -906,7 +935,7 @@ int mt76u_init(struct mt76_dev *dev,
struct mt76_usb *usb = &dev->usb;
tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
- tasklet_init(&usb->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
+ tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/bus.h b/drivers/net/wireless/quantenna/qtnfmac/bus.h
index 14b569b6d1b5..7cea08f71838 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/bus.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/bus.h
@@ -13,12 +13,11 @@
#define QTNF_MAX_MAC 3
enum qtnf_fw_state {
- QTNF_FW_STATE_RESET,
- QTNF_FW_STATE_FW_DNLD_DONE,
+ QTNF_FW_STATE_DETACHED,
QTNF_FW_STATE_BOOT_DONE,
QTNF_FW_STATE_ACTIVE,
- QTNF_FW_STATE_DETACHED,
- QTNF_FW_STATE_EP_DEAD,
+ QTNF_FW_STATE_RUNNING,
+ QTNF_FW_STATE_DEAD,
};
struct qtnf_bus;
@@ -50,6 +49,7 @@ struct qtnf_bus {
struct napi_struct mux_napi;
struct net_device mux_dev;
struct workqueue_struct *workqueue;
+ struct workqueue_struct *hprio_workqueue;
struct work_struct fw_work;
struct work_struct event_work;
struct mutex bus_lock; /* lock during command/event processing */
@@ -58,6 +58,23 @@ struct qtnf_bus {
char bus_priv[0] __aligned(sizeof(void *));
};
+static inline bool qtnf_fw_is_up(struct qtnf_bus *bus)
+{
+ enum qtnf_fw_state state = bus->fw_state;
+
+ return ((state == QTNF_FW_STATE_ACTIVE) ||
+ (state == QTNF_FW_STATE_RUNNING));
+}
+
+static inline bool qtnf_fw_is_attached(struct qtnf_bus *bus)
+{
+ enum qtnf_fw_state state = bus->fw_state;
+
+ return ((state == QTNF_FW_STATE_ACTIVE) ||
+ (state == QTNF_FW_STATE_RUNNING) ||
+ (state == QTNF_FW_STATE_DEAD));
+}
+
static inline void *get_bus_priv(struct qtnf_bus *bus)
{
if (WARN(!bus, "qtnfmac: invalid bus pointer"))
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index dcb0991432f4..d90016125dfc 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -144,6 +144,7 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
{
struct net_device *netdev = wdev->netdev;
struct qtnf_vif *vif;
+ struct sk_buff *skb;
if (WARN_ON(!netdev))
return -EFAULT;
@@ -157,6 +158,11 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
if (netif_carrier_ok(netdev))
netif_carrier_off(netdev);
+ while ((skb = skb_dequeue(&vif->high_pri_tx_queue)))
+ dev_kfree_skb_any(skb);
+
+ cancel_work_sync(&vif->high_pri_tx_work);
+
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdevice(netdev);
@@ -424,13 +430,13 @@ qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
*cookie = short_cookie;
if (params->offchan)
- flags |= QLINK_MGMT_FRAME_TX_FLAG_OFFCHAN;
+ flags |= QLINK_FRAME_TX_FLAG_OFFCHAN;
if (params->no_cck)
- flags |= QLINK_MGMT_FRAME_TX_FLAG_NO_CCK;
+ flags |= QLINK_FRAME_TX_FLAG_NO_CCK;
if (params->dont_wait_for_ack)
- flags |= QLINK_MGMT_FRAME_TX_FLAG_ACK_NOWAIT;
+ flags |= QLINK_FRAME_TX_FLAG_ACK_NOWAIT;
/* If channel is not specified, pass "freq = 0" to tell device
* firmware to use current channel.
@@ -445,9 +451,8 @@ qtnf_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
le16_to_cpu(mgmt_frame->frame_control), mgmt_frame->da,
params->len, short_cookie, flags);
- return qtnf_cmd_send_mgmt_frame(vif, short_cookie, flags,
- freq,
- params->buf, params->len);
+ return qtnf_cmd_send_frame(vif, short_cookie, flags,
+ freq, params->buf, params->len);
}
static int
@@ -993,53 +998,31 @@ static struct cfg80211_ops qtn_cfg80211_ops = {
#endif
};
-static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy_in,
+static void qtnf_cfg80211_reg_notifier(struct wiphy *wiphy,
struct regulatory_request *req)
{
- struct qtnf_wmac *mac = wiphy_priv(wiphy_in);
- struct qtnf_bus *bus = mac->bus;
- struct wiphy *wiphy;
- unsigned int mac_idx;
+ struct qtnf_wmac *mac = wiphy_priv(wiphy);
enum nl80211_band band;
int ret;
pr_debug("MAC%u: initiator=%d alpha=%c%c\n", mac->macid, req->initiator,
req->alpha2[0], req->alpha2[1]);
- ret = qtnf_cmd_reg_notify(bus, req);
+ ret = qtnf_cmd_reg_notify(mac, req, qtnf_mac_slave_radar_get(wiphy));
if (ret) {
- if (ret == -EOPNOTSUPP) {
- pr_warn("reg update not supported\n");
- } else if (ret == -EALREADY) {
- pr_info("regulatory domain is already set to %c%c",
- req->alpha2[0], req->alpha2[1]);
- } else {
- pr_err("failed to update reg domain to %c%c\n",
- req->alpha2[0], req->alpha2[1]);
- }
-
+ pr_err("MAC%u: failed to update region to %c%c: %d\n",
+ mac->macid, req->alpha2[0], req->alpha2[1], ret);
return;
}
- for (mac_idx = 0; mac_idx < QTNF_MAX_MAC; ++mac_idx) {
- if (!(bus->hw_info.mac_bitmap & (1 << mac_idx)))
+ for (band = 0; band < NUM_NL80211_BANDS; ++band) {
+ if (!wiphy->bands[band])
continue;
- mac = bus->mac[mac_idx];
- if (!mac)
- continue;
-
- wiphy = priv_to_wiphy(mac);
-
- for (band = 0; band < NUM_NL80211_BANDS; ++band) {
- if (!wiphy->bands[band])
- continue;
-
- ret = qtnf_cmd_band_info_get(mac, wiphy->bands[band]);
- if (ret)
- pr_err("failed to get chan info for mac %u band %u\n",
- mac_idx, band);
- }
+ ret = qtnf_cmd_band_info_get(mac, wiphy->bands[band]);
+ if (ret)
+ pr_err("MAC%u: failed to update band %u\n",
+ mac->macid, band);
}
}
@@ -1095,6 +1078,7 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
struct wiphy *wiphy = priv_to_wiphy(mac);
struct qtnf_mac_info *macinfo = &mac->macinfo;
int ret;
+ bool regdomain_is_known;
if (!wiphy) {
pr_err("invalid wiphy pointer\n");
@@ -1127,7 +1111,8 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_CHANNEL_SWITCH |
- WIPHY_FLAG_4ADDR_STATION;
+ WIPHY_FLAG_4ADDR_STATION |
+ WIPHY_FLAG_NETNS_OK;
wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
if (hw_info->hw_capab & QLINK_HW_CAPAB_DFS_OFFLOAD)
@@ -1166,11 +1151,19 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
wiphy->wowlan = macinfo->wowlan;
#endif
+ regdomain_is_known = isalpha(mac->rd->alpha2[0]) &&
+ isalpha(mac->rd->alpha2[1]);
+
if (hw_info->hw_capab & QLINK_HW_CAPAB_REG_UPDATE) {
- wiphy->regulatory_flags |= REGULATORY_STRICT_REG |
- REGULATORY_CUSTOM_REG;
wiphy->reg_notifier = qtnf_cfg80211_reg_notifier;
- wiphy_apply_custom_regulatory(wiphy, hw_info->rd);
+
+ if (mac->rd->alpha2[0] == '9' && mac->rd->alpha2[1] == '9') {
+ wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+ REGULATORY_STRICT_REG;
+ wiphy_apply_custom_regulatory(wiphy, mac->rd);
+ } else if (regdomain_is_known) {
+ wiphy->regulatory_flags |= REGULATORY_STRICT_REG;
+ }
} else {
wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
}
@@ -1193,10 +1186,9 @@ int qtnf_wiphy_register(struct qtnf_hw_info *hw_info, struct qtnf_wmac *mac)
goto out;
if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
- ret = regulatory_set_wiphy_regd(wiphy, hw_info->rd);
- else if (isalpha(hw_info->rd->alpha2[0]) &&
- isalpha(hw_info->rd->alpha2[1]))
- ret = regulatory_hint(wiphy, hw_info->rd->alpha2);
+ ret = regulatory_set_wiphy_regd(wiphy, mac->rd);
+ else if (regdomain_is_known)
+ ret = regulatory_hint(wiphy, mac->rd->alpha2);
out:
return ret;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index 85a2a58f4c16..459f6b81d2eb 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -11,6 +11,13 @@
#include "bus.h"
#include "commands.h"
+#define QTNF_SCAN_TIME_AUTO 0
+
+/* Let device itself to select best values for current conditions */
+#define QTNF_SCAN_DWELL_ACTIVE_DEFAULT QTNF_SCAN_TIME_AUTO
+#define QTNF_SCAN_DWELL_PASSIVE_DEFAULT QTNF_SCAN_TIME_AUTO
+#define QTNF_SCAN_SAMPLE_DURATION_DEFAULT QTNF_SCAN_TIME_AUTO
+
static int qtnf_cmd_check_reply_header(const struct qlink_resp *resp,
u16 cmd_id, u8 mac_id, u8 vif_id,
size_t resp_size)
@@ -89,8 +96,7 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus,
pr_debug("VIF%u.%u cmd=0x%.4X\n", mac_id, vif_id, cmd_id);
- if (bus->fw_state != QTNF_FW_STATE_ACTIVE &&
- cmd_id != QLINK_CMD_FW_INIT) {
+ if (!qtnf_fw_is_up(bus) && cmd_id != QLINK_CMD_FW_INIT) {
pr_warn("VIF%u.%u: drop cmd 0x%.4X in fw state %d\n",
mac_id, vif_id, cmd_id, bus->fw_state);
dev_kfree_skb(cmd_skb);
@@ -177,14 +183,6 @@ static void qtnf_cmd_tlv_ie_set_add(struct sk_buff *cmd_skb, u8 frame_type,
memcpy(tlv->ie_data, buf, len);
}
-static inline size_t qtnf_cmd_acl_data_size(const struct cfg80211_acl_data *acl)
-{
- size_t size = sizeof(struct qlink_acl_data) +
- acl->n_acl_entries * sizeof(struct qlink_mac_address);
-
- return size;
-}
-
static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif,
const struct cfg80211_ap_settings *s)
{
@@ -203,7 +201,7 @@ static bool qtnf_cmd_start_ap_can_fit(const struct qtnf_vif *vif,
if (s->acl)
len += sizeof(struct qlink_tlv_hdr) +
- qtnf_cmd_acl_data_size(s->acl);
+ struct_size(s->acl, mac_addrs, s->acl->n_acl_entries);
if (len > (sizeof(struct qlink_cmd) + QTNF_MAX_CMD_BUF_SIZE)) {
pr_err("VIF%u.%u: can not fit AP settings: %u\n",
@@ -310,7 +308,8 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
}
if (s->acl) {
- size_t acl_size = qtnf_cmd_acl_data_size(s->acl);
+ size_t acl_size = struct_size(s->acl, mac_addrs,
+ s->acl->n_acl_entries);
struct qlink_tlv_hdr *tlv =
skb_put(cmd_skb, sizeof(*tlv) + acl_size);
@@ -382,11 +381,11 @@ out:
return ret;
}
-int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
- u16 freq, const u8 *buf, size_t len)
+int qtnf_cmd_send_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
+ u16 freq, const u8 *buf, size_t len)
{
struct sk_buff *cmd_skb;
- struct qlink_cmd_mgmt_frame_tx *cmd;
+ struct qlink_cmd_frame_tx *cmd;
int ret;
if (sizeof(*cmd) + len > QTNF_MAX_CMD_BUF_SIZE) {
@@ -396,14 +395,14 @@ int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
}
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
- QLINK_CMD_SEND_MGMT_FRAME,
+ QLINK_CMD_SEND_FRAME,
sizeof(*cmd));
if (!cmd_skb)
return -ENOMEM;
qtnf_bus_lock(vif->mac->bus);
- cmd = (struct qlink_cmd_mgmt_frame_tx *)cmd_skb->data;
+ cmd = (struct qlink_cmd_frame_tx *)cmd_skb->data;
cmd->cookie = cpu_to_le32(cookie);
cmd->freq = cpu_to_le16(freq);
cmd->flags = cpu_to_le16(flags);
@@ -786,8 +785,25 @@ int qtnf_cmd_send_change_intf_type(struct qtnf_vif *vif,
int use4addr,
u8 *mac_addr)
{
- return qtnf_cmd_send_add_change_intf(vif, iftype, use4addr, mac_addr,
- QLINK_CMD_CHANGE_INTF);
+ int ret;
+
+ ret = qtnf_cmd_send_add_change_intf(vif, iftype, use4addr, mac_addr,
+ QLINK_CMD_CHANGE_INTF);
+
+ /* Regulatory settings may be different for different interface types */
+ if (ret == 0 && vif->wdev.iftype != iftype) {
+ enum nl80211_band band;
+ struct wiphy *wiphy = priv_to_wiphy(vif->mac);
+
+ for (band = 0; band < NUM_NL80211_BANDS; ++band) {
+ if (!wiphy->bands[band])
+ continue;
+
+ qtnf_cmd_band_info_get(vif->mac, wiphy->bands[band]);
+ }
+ }
+
+ return ret;
}
int qtnf_cmd_send_del_intf(struct qtnf_vif *vif)
@@ -831,55 +847,6 @@ out:
return ret;
}
-static u32 qtnf_cmd_resp_reg_rule_flags_parse(u32 qflags)
-{
- u32 flags = 0;
-
- if (qflags & QLINK_RRF_NO_OFDM)
- flags |= NL80211_RRF_NO_OFDM;
-
- if (qflags & QLINK_RRF_NO_CCK)
- flags |= NL80211_RRF_NO_CCK;
-
- if (qflags & QLINK_RRF_NO_INDOOR)
- flags |= NL80211_RRF_NO_INDOOR;
-
- if (qflags & QLINK_RRF_NO_OUTDOOR)
- flags |= NL80211_RRF_NO_OUTDOOR;
-
- if (qflags & QLINK_RRF_DFS)
- flags |= NL80211_RRF_DFS;
-
- if (qflags & QLINK_RRF_PTP_ONLY)
- flags |= NL80211_RRF_PTP_ONLY;
-
- if (qflags & QLINK_RRF_PTMP_ONLY)
- flags |= NL80211_RRF_PTMP_ONLY;
-
- if (qflags & QLINK_RRF_NO_IR)
- flags |= NL80211_RRF_NO_IR;
-
- if (qflags & QLINK_RRF_AUTO_BW)
- flags |= NL80211_RRF_AUTO_BW;
-
- if (qflags & QLINK_RRF_IR_CONCURRENT)
- flags |= NL80211_RRF_IR_CONCURRENT;
-
- if (qflags & QLINK_RRF_NO_HT40MINUS)
- flags |= NL80211_RRF_NO_HT40MINUS;
-
- if (qflags & QLINK_RRF_NO_HT40PLUS)
- flags |= NL80211_RRF_NO_HT40PLUS;
-
- if (qflags & QLINK_RRF_NO_80MHZ)
- flags |= NL80211_RRF_NO_80MHZ;
-
- if (qflags & QLINK_RRF_NO_160MHZ)
- flags |= NL80211_RRF_NO_160MHZ;
-
- return flags;
-}
-
static int
qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
const struct qlink_resp_get_hw_info *resp,
@@ -887,7 +854,6 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
{
struct qtnf_hw_info *hwinfo = &bus->hw_info;
const struct qlink_tlv_hdr *tlv;
- const struct qlink_tlv_reg_rule *tlv_rule;
const char *bld_name = NULL;
const char *bld_rev = NULL;
const char *bld_type = NULL;
@@ -898,19 +864,8 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
const char *calibration_ver = NULL;
const char *uboot_ver = NULL;
u32 hw_ver = 0;
- struct ieee80211_reg_rule *rule;
u16 tlv_type;
u16 tlv_value_len;
- unsigned int rule_idx = 0;
-
- if (WARN_ON(resp->n_reg_rules > NL80211_MAX_SUPP_REG_RULES))
- return -E2BIG;
-
- hwinfo->rd = kzalloc(struct_size(hwinfo->rd, reg_rules,
- resp->n_reg_rules), GFP_KERNEL);
-
- if (!hwinfo->rd)
- return -ENOMEM;
hwinfo->num_mac = resp->num_mac;
hwinfo->mac_bitmap = resp->mac_bitmap;
@@ -919,30 +874,11 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
hwinfo->total_tx_chain = resp->total_tx_chain;
hwinfo->total_rx_chain = resp->total_rx_chain;
hwinfo->hw_capab = le32_to_cpu(resp->hw_capab);
- hwinfo->rd->n_reg_rules = resp->n_reg_rules;
- hwinfo->rd->alpha2[0] = resp->alpha2[0];
- hwinfo->rd->alpha2[1] = resp->alpha2[1];
bld_tmstamp = le32_to_cpu(resp->bld_tmstamp);
plat_id = le32_to_cpu(resp->plat_id);
hw_ver = le32_to_cpu(resp->hw_ver);
- switch (resp->dfs_region) {
- case QLINK_DFS_FCC:
- hwinfo->rd->dfs_region = NL80211_DFS_FCC;
- break;
- case QLINK_DFS_ETSI:
- hwinfo->rd->dfs_region = NL80211_DFS_ETSI;
- break;
- case QLINK_DFS_JP:
- hwinfo->rd->dfs_region = NL80211_DFS_JP;
- break;
- case QLINK_DFS_UNSET:
- default:
- hwinfo->rd->dfs_region = NL80211_DFS_UNSET;
- break;
- }
-
tlv = (const struct qlink_tlv_hdr *)resp->info;
while (info_len >= sizeof(*tlv)) {
@@ -956,37 +892,6 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
}
switch (tlv_type) {
- case QTN_TLV_ID_REG_RULE:
- if (rule_idx >= resp->n_reg_rules) {
- pr_warn("unexpected number of rules: %u\n",
- resp->n_reg_rules);
- return -EINVAL;
- }
-
- if (tlv_value_len != sizeof(*tlv_rule) - sizeof(*tlv)) {
- pr_warn("malformed TLV 0x%.2X; LEN: %u\n",
- tlv_type, tlv_value_len);
- return -EINVAL;
- }
-
- tlv_rule = (const struct qlink_tlv_reg_rule *)tlv;
- rule = &hwinfo->rd->reg_rules[rule_idx++];
-
- rule->freq_range.start_freq_khz =
- le32_to_cpu(tlv_rule->start_freq_khz);
- rule->freq_range.end_freq_khz =
- le32_to_cpu(tlv_rule->end_freq_khz);
- rule->freq_range.max_bandwidth_khz =
- le32_to_cpu(tlv_rule->max_bandwidth_khz);
- rule->power_rule.max_antenna_gain =
- le32_to_cpu(tlv_rule->max_antenna_gain);
- rule->power_rule.max_eirp =
- le32_to_cpu(tlv_rule->max_eirp);
- rule->dfs_cac_ms =
- le32_to_cpu(tlv_rule->dfs_cac_ms);
- rule->flags = qtnf_cmd_resp_reg_rule_flags_parse(
- le32_to_cpu(tlv_rule->flags));
- break;
case QTN_TLV_ID_BUILD_NAME:
bld_name = (const void *)tlv->val;
break;
@@ -1019,17 +924,8 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
tlv = (struct qlink_tlv_hdr *)(tlv->val + tlv_value_len);
}
- if (rule_idx != resp->n_reg_rules) {
- pr_warn("unexpected number of rules: expected %u got %u\n",
- resp->n_reg_rules, rule_idx);
- kfree(hwinfo->rd);
- hwinfo->rd = NULL;
- return -EINVAL;
- }
-
- pr_info("fw_version=%d, MACs map %#x, alpha2=\"%c%c\", chains Tx=%u Rx=%u, capab=0x%x\n",
+ pr_info("fw_version=%d, MACs map %#x, chains Tx=%u Rx=%u, capab=0x%x\n",
hwinfo->fw_ver, hwinfo->mac_bitmap,
- hwinfo->rd->alpha2[0], hwinfo->rd->alpha2[1],
hwinfo->total_tx_chain, hwinfo->total_rx_chain,
hwinfo->hw_capab);
@@ -1042,7 +938,7 @@ qtnf_cmd_resp_proc_hw_info(struct qtnf_bus *bus,
"\nHardware ID: %s" \
"\nCalibration version: %s" \
"\nU-Boot version: %s" \
- "\nHardware version: 0x%08x",
+ "\nHardware version: 0x%08x\n",
bld_name, bld_rev, bld_type, bld_label,
(unsigned long)bld_tmstamp,
(unsigned long)plat_id,
@@ -1085,9 +981,12 @@ qtnf_parse_wowlan_info(struct qtnf_wmac *mac,
}
}
-static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
- const u8 *tlv_buf, size_t tlv_buf_size)
+static int
+qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
+ const struct qlink_resp_get_mac_info *resp,
+ size_t tlv_buf_size)
{
+ const u8 *tlv_buf = resp->var_info;
struct ieee80211_iface_combination *comb = NULL;
size_t n_comb = 0;
struct ieee80211_iface_limit *limits;
@@ -1105,6 +1004,38 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
u8 ext_capa_len = 0;
u8 ext_capa_mask_len = 0;
int i = 0;
+ struct ieee80211_reg_rule *rule;
+ unsigned int rule_idx = 0;
+ const struct qlink_tlv_reg_rule *tlv_rule;
+
+ if (WARN_ON(resp->n_reg_rules > NL80211_MAX_SUPP_REG_RULES))
+ return -E2BIG;
+
+ mac->rd = kzalloc(sizeof(*mac->rd) +
+ sizeof(struct ieee80211_reg_rule) *
+ resp->n_reg_rules, GFP_KERNEL);
+ if (!mac->rd)
+ return -ENOMEM;
+
+ mac->rd->n_reg_rules = resp->n_reg_rules;
+ mac->rd->alpha2[0] = resp->alpha2[0];
+ mac->rd->alpha2[1] = resp->alpha2[1];
+
+ switch (resp->dfs_region) {
+ case QLINK_DFS_FCC:
+ mac->rd->dfs_region = NL80211_DFS_FCC;
+ break;
+ case QLINK_DFS_ETSI:
+ mac->rd->dfs_region = NL80211_DFS_ETSI;
+ break;
+ case QLINK_DFS_JP:
+ mac->rd->dfs_region = NL80211_DFS_JP;
+ break;
+ case QLINK_DFS_UNSET:
+ default:
+ mac->rd->dfs_region = NL80211_DFS_UNSET;
+ break;
+ }
tlv = (const struct qlink_tlv_hdr *)tlv_buf;
while (tlv_buf_size >= sizeof(struct qlink_tlv_hdr)) {
@@ -1225,6 +1156,23 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
mac->macinfo.wowlan = NULL;
qtnf_parse_wowlan_info(mac, wowlan);
break;
+ case QTN_TLV_ID_REG_RULE:
+ if (rule_idx >= resp->n_reg_rules) {
+ pr_warn("unexpected number of rules: %u\n",
+ resp->n_reg_rules);
+ return -EINVAL;
+ }
+
+ if (tlv_value_len != sizeof(*tlv_rule) - sizeof(*tlv)) {
+ pr_warn("malformed TLV 0x%.2X; LEN: %u\n",
+ tlv_type, tlv_value_len);
+ return -EINVAL;
+ }
+
+ tlv_rule = (const struct qlink_tlv_reg_rule *)tlv;
+ rule = &mac->rd->reg_rules[rule_idx++];
+ qlink_utils_regrule_q2nl(rule, tlv_rule);
+ break;
default:
pr_warn("MAC%u: unknown TLV type %u\n",
mac->macid, tlv_type);
@@ -1253,6 +1201,12 @@ static int qtnf_parse_variable_mac_info(struct qtnf_wmac *mac,
return -EINVAL;
}
+ if (rule_idx != resp->n_reg_rules) {
+ pr_warn("unexpected number of rules: expected %u got %u\n",
+ resp->n_reg_rules, rule_idx);
+ return -EINVAL;
+ }
+
if (ext_capa_len > 0) {
ext_capa = kmemdup(ext_capa, ext_capa_len, GFP_KERNEL);
if (!ext_capa)
@@ -1663,7 +1617,7 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac)
resp = (const struct qlink_resp_get_mac_info *)resp_skb->data;
qtnf_cmd_resp_proc_mac_info(mac, resp);
- ret = qtnf_parse_variable_mac_info(mac, resp->var_info, var_data_len);
+ ret = qtnf_parse_variable_mac_info(mac, resp, var_data_len);
out:
qtnf_bus_unlock(mac->bus);
@@ -1709,21 +1663,7 @@ int qtnf_cmd_band_info_get(struct qtnf_wmac *mac,
struct qlink_resp_band_info_get *resp;
size_t info_len = 0;
int ret = 0;
- u8 qband;
-
- switch (band->band) {
- case NL80211_BAND_2GHZ:
- qband = QLINK_BAND_2GHZ;
- break;
- case NL80211_BAND_5GHZ:
- qband = QLINK_BAND_5GHZ;
- break;
- case NL80211_BAND_60GHZ:
- qband = QLINK_BAND_60GHZ;
- break;
- default:
- return -EINVAL;
- }
+ u8 qband = qlink_utils_band_cfg2q(band->band);
cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0,
QLINK_CMD_BAND_INFO_GET,
@@ -2107,22 +2047,23 @@ out:
static void qtnf_cmd_channel_tlv_add(struct sk_buff *cmd_skb,
const struct ieee80211_channel *sc)
{
- struct qlink_tlv_channel *qchan;
- u32 flags = 0;
-
- qchan = skb_put_zero(cmd_skb, sizeof(*qchan));
- qchan->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANNEL);
- qchan->hdr.len = cpu_to_le16(sizeof(*qchan) - sizeof(qchan->hdr));
- qchan->chan.center_freq = cpu_to_le16(sc->center_freq);
- qchan->chan.hw_value = cpu_to_le16(sc->hw_value);
-
- if (sc->flags & IEEE80211_CHAN_NO_IR)
- flags |= QLINK_CHAN_NO_IR;
-
- if (sc->flags & IEEE80211_CHAN_RADAR)
- flags |= QLINK_CHAN_RADAR;
-
- qchan->chan.flags = cpu_to_le32(flags);
+ struct qlink_tlv_channel *tlv;
+ struct qlink_channel *qch;
+
+ tlv = skb_put_zero(cmd_skb, sizeof(*tlv));
+ qch = &tlv->chan;
+ tlv->hdr.type = cpu_to_le16(QTN_TLV_ID_CHANNEL);
+ tlv->hdr.len = cpu_to_le16(sizeof(*qch));
+
+ qch->center_freq = cpu_to_le16(sc->center_freq);
+ qch->hw_value = cpu_to_le16(sc->hw_value);
+ qch->band = qlink_utils_band_cfg2q(sc->band);
+ qch->max_power = sc->max_power;
+ qch->max_reg_power = sc->max_reg_power;
+ qch->max_antenna_gain = sc->max_antenna_gain;
+ qch->beacon_found = sc->beacon_found;
+ qch->dfs_state = qlink_utils_dfs_state_cfg2q(sc->dfs_state);
+ qch->flags = cpu_to_le32(qlink_utils_chflags_cfg2q(sc->flags));
}
static void qtnf_cmd_randmac_tlv_add(struct sk_buff *cmd_skb,
@@ -2141,6 +2082,35 @@ static void qtnf_cmd_randmac_tlv_add(struct sk_buff *cmd_skb,
memcpy(randmac->mac_addr_mask, mac_addr_mask, ETH_ALEN);
}
+static void qtnf_cmd_scan_set_dwell(struct qtnf_wmac *mac,
+ struct sk_buff *cmd_skb)
+{
+ struct cfg80211_scan_request *scan_req = mac->scan_req;
+ u16 dwell_active = QTNF_SCAN_DWELL_ACTIVE_DEFAULT;
+ u16 dwell_passive = QTNF_SCAN_DWELL_PASSIVE_DEFAULT;
+ u16 duration = QTNF_SCAN_SAMPLE_DURATION_DEFAULT;
+
+ if (scan_req->duration) {
+ dwell_active = scan_req->duration;
+ dwell_passive = scan_req->duration;
+ }
+
+ pr_debug("MAC%u: %s scan dwell active=%u, passive=%u, duration=%u\n",
+ mac->macid,
+ scan_req->duration_mandatory ? "mandatory" : "max",
+ dwell_active, dwell_passive, duration);
+
+ qtnf_cmd_skb_put_tlv_u16(cmd_skb,
+ QTN_TLV_ID_SCAN_DWELL_ACTIVE,
+ dwell_active);
+ qtnf_cmd_skb_put_tlv_u16(cmd_skb,
+ QTN_TLV_ID_SCAN_DWELL_PASSIVE,
+ dwell_passive);
+ qtnf_cmd_skb_put_tlv_u16(cmd_skb,
+ QTN_TLV_ID_SCAN_SAMPLE_DURATION,
+ duration);
+}
+
int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
{
struct sk_buff *cmd_skb;
@@ -2192,6 +2162,8 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
}
}
+ qtnf_cmd_scan_set_dwell(mac, cmd_skb);
+
if (scan_req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
pr_debug("MAC%u: scan with random addr=%pM, mask=%pM\n",
mac->macid,
@@ -2207,15 +2179,6 @@ int qtnf_cmd_send_scan(struct qtnf_wmac *mac)
qtnf_cmd_skb_put_tlv_tag(cmd_skb, QTN_TLV_ID_SCAN_FLUSH);
}
- if (scan_req->duration) {
- pr_debug("MAC%u: %s scan duration %u\n", mac->macid,
- scan_req->duration_mandatory ? "mandatory" : "max",
- scan_req->duration);
-
- qtnf_cmd_skb_put_tlv_u16(cmd_skb, QTN_TLV_ID_SCAN_DWELL,
- scan_req->duration);
- }
-
ret = qtnf_cmd_send(mac->bus, cmd_skb);
if (ret)
goto out;
@@ -2404,13 +2367,18 @@ out:
return ret;
}
-int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req)
+int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req,
+ bool slave_radar)
{
+ struct wiphy *wiphy = priv_to_wiphy(mac);
+ struct qtnf_bus *bus = mac->bus;
struct sk_buff *cmd_skb;
int ret;
struct qlink_cmd_reg_notify *cmd;
+ enum nl80211_band band;
+ const struct ieee80211_supported_band *cfg_band;
- cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD,
+ cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD,
QLINK_CMD_REG_NOTIFY,
sizeof(*cmd));
if (!cmd_skb)
@@ -2447,12 +2415,41 @@ int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req)
break;
}
+ switch (req->dfs_region) {
+ case NL80211_DFS_FCC:
+ cmd->dfs_region = QLINK_DFS_FCC;
+ break;
+ case NL80211_DFS_ETSI:
+ cmd->dfs_region = QLINK_DFS_ETSI;
+ break;
+ case NL80211_DFS_JP:
+ cmd->dfs_region = QLINK_DFS_JP;
+ break;
+ default:
+ cmd->dfs_region = QLINK_DFS_UNSET;
+ break;
+ }
+
+ cmd->slave_radar = slave_radar;
+ cmd->num_channels = 0;
+
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ unsigned int i;
+
+ cfg_band = wiphy->bands[band];
+ if (!cfg_band)
+ continue;
+
+ cmd->num_channels += cfg_band->n_channels;
+
+ for (i = 0; i < cfg_band->n_channels; ++i) {
+ qtnf_cmd_channel_tlv_add(cmd_skb,
+ &cfg_band->channels[i]);
+ }
+ }
+
qtnf_bus_lock(bus);
ret = qtnf_cmd_send(bus, cmd_skb);
- if (ret)
- goto out;
-
-out:
qtnf_bus_unlock(bus);
return ret;
@@ -2592,7 +2589,7 @@ int qtnf_cmd_set_mac_acl(const struct qtnf_vif *vif,
struct qtnf_bus *bus = vif->mac->bus;
struct sk_buff *cmd_skb;
struct qlink_tlv_hdr *tlv;
- size_t acl_size = qtnf_cmd_acl_data_size(params);
+ size_t acl_size = struct_size(params, mac_addrs, params->n_acl_entries);
int ret;
cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.h b/drivers/net/wireless/quantenna/qtnfmac/commands.h
index 64f0b9dc8a14..88d7a3cd90d2 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.h
@@ -27,8 +27,8 @@ int qtnf_cmd_send_start_ap(struct qtnf_vif *vif,
const struct cfg80211_ap_settings *s);
int qtnf_cmd_send_stop_ap(struct qtnf_vif *vif);
int qtnf_cmd_send_register_mgmt(struct qtnf_vif *vif, u16 frame_type, bool reg);
-int qtnf_cmd_send_mgmt_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
- u16 freq, const u8 *buf, size_t len);
+int qtnf_cmd_send_frame(struct qtnf_vif *vif, u32 cookie, u16 flags,
+ u16 freq, const u8 *buf, size_t len);
int qtnf_cmd_send_mgmt_set_appie(struct qtnf_vif *vif, u8 frame_type,
const u8 *buf, size_t len);
int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac,
@@ -57,7 +57,8 @@ int qtnf_cmd_send_disconnect(struct qtnf_vif *vif,
u16 reason_code);
int qtnf_cmd_send_updown_intf(struct qtnf_vif *vif,
bool up);
-int qtnf_cmd_reg_notify(struct qtnf_bus *bus, struct regulatory_request *req);
+int qtnf_cmd_reg_notify(struct qtnf_wmac *mac, struct regulatory_request *req,
+ bool slave_radar);
int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel,
struct qtnf_chan_stats *stats);
int qtnf_cmd_send_chan_switch(struct qtnf_vif *vif,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index ee1b75fda1dd..8d699cc03d26 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -16,6 +16,12 @@
#define QTNF_DMP_MAX_LEN 48
#define QTNF_PRIMARY_VIF_IDX 0
+static bool slave_radar = true;
+module_param(slave_radar, bool, 0644);
+MODULE_PARM_DESC(slave_radar, "set 0 to disable radar detection in slave mode");
+
+static struct dentry *qtnf_debugfs_dir;
+
struct qtnf_frame_meta_info {
u8 magic_s;
u8 ifidx;
@@ -368,6 +374,23 @@ static void qtnf_mac_scan_timeout(struct work_struct *work)
qtnf_mac_scan_finish(mac, true);
}
+static void qtnf_vif_send_data_high_pri(struct work_struct *work)
+{
+ struct qtnf_vif *vif =
+ container_of(work, struct qtnf_vif, high_pri_tx_work);
+ struct sk_buff *skb;
+
+ if (!vif->netdev ||
+ vif->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED)
+ return;
+
+ while ((skb = skb_dequeue(&vif->high_pri_tx_queue))) {
+ qtnf_cmd_send_frame(vif, 0, QLINK_FRAME_TX_FLAG_8023,
+ 0, skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+ }
+}
+
static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
unsigned int macid)
{
@@ -395,7 +418,8 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
vif->mac = mac;
vif->vifid = i;
qtnf_sta_list_init(&vif->sta_list);
-
+ INIT_WORK(&vif->high_pri_tx_work, qtnf_vif_send_data_high_pri);
+ skb_queue_head_init(&vif->high_pri_tx_queue);
vif->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!vif->stats64)
pr_warn("VIF%u.%u: per cpu stats allocation failed\n",
@@ -408,6 +432,11 @@ static struct qtnf_wmac *qtnf_core_mac_alloc(struct qtnf_bus *bus,
return mac;
}
+bool qtnf_mac_slave_radar_get(struct wiphy *wiphy)
+{
+ return slave_radar;
+}
+
static const struct ethtool_ops qtnf_ethtool_ops = {
.get_drvinfo = cfg80211_get_drvinfo,
};
@@ -499,6 +528,8 @@ static void qtnf_core_mac_detach(struct qtnf_bus *bus, unsigned int macid)
qtnf_mac_iface_comb_free(mac);
qtnf_mac_ext_caps_free(mac);
kfree(mac->macinfo.wowlan);
+ kfree(mac->rd);
+ mac->rd = NULL;
wiphy_free(wiphy);
bus->mac[macid] = NULL;
}
@@ -587,8 +618,6 @@ int qtnf_core_attach(struct qtnf_bus *bus)
int ret;
qtnf_trans_init(bus);
-
- bus->fw_state = QTNF_FW_STATE_BOOT_DONE;
qtnf_bus_data_rx_start(bus);
bus->workqueue = alloc_ordered_workqueue("QTNF_BUS", 0);
@@ -598,6 +627,13 @@ int qtnf_core_attach(struct qtnf_bus *bus)
goto error;
}
+ bus->hprio_workqueue = alloc_workqueue("QTNF_HPRI", WQ_HIGHPRI, 0);
+ if (!bus->hprio_workqueue) {
+ pr_err("failed to alloc high prio workqueue\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
INIT_WORK(&bus->event_work, qtnf_event_work_handler);
ret = qtnf_cmd_send_init_fw(bus);
@@ -607,7 +643,6 @@ int qtnf_core_attach(struct qtnf_bus *bus)
}
bus->fw_state = QTNF_FW_STATE_ACTIVE;
-
ret = qtnf_cmd_get_hw_info(bus);
if (ret) {
pr_err("failed to get HW info: %d\n", ret);
@@ -637,11 +672,11 @@ int qtnf_core_attach(struct qtnf_bus *bus)
}
}
+ bus->fw_state = QTNF_FW_STATE_RUNNING;
return 0;
error:
qtnf_core_detach(bus);
-
return ret;
}
EXPORT_SYMBOL_GPL(qtnf_core_attach);
@@ -655,7 +690,7 @@ void qtnf_core_detach(struct qtnf_bus *bus)
for (macid = 0; macid < QTNF_MAX_MAC; macid++)
qtnf_core_mac_detach(bus, macid);
- if (bus->fw_state == QTNF_FW_STATE_ACTIVE)
+ if (qtnf_fw_is_up(bus))
qtnf_cmd_send_deinit_fw(bus);
bus->fw_state = QTNF_FW_STATE_DETACHED;
@@ -663,10 +698,14 @@ void qtnf_core_detach(struct qtnf_bus *bus)
if (bus->workqueue) {
flush_workqueue(bus->workqueue);
destroy_workqueue(bus->workqueue);
+ bus->workqueue = NULL;
}
- kfree(bus->hw_info.rd);
- bus->hw_info.rd = NULL;
+ if (bus->hprio_workqueue) {
+ flush_workqueue(bus->hprio_workqueue);
+ destroy_workqueue(bus->hprio_workqueue);
+ bus->hprio_workqueue = NULL;
+ }
qtnf_trans_free(bus);
}
@@ -684,6 +723,9 @@ struct net_device *qtnf_classify_skb(struct qtnf_bus *bus, struct sk_buff *skb)
struct qtnf_wmac *mac;
struct qtnf_vif *vif;
+ if (unlikely(bus->fw_state != QTNF_FW_STATE_RUNNING))
+ return NULL;
+
meta = (struct qtnf_frame_meta_info *)
(skb_tail_pointer(skb) - sizeof(*meta));
@@ -799,6 +841,39 @@ void qtnf_update_tx_stats(struct net_device *ndev, const struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(qtnf_update_tx_stats);
+void qtnf_packet_send_hi_pri(struct sk_buff *skb)
+{
+ struct qtnf_vif *vif = qtnf_netdev_get_priv(skb->dev);
+
+ skb_queue_tail(&vif->high_pri_tx_queue, skb);
+ queue_work(vif->mac->bus->hprio_workqueue, &vif->high_pri_tx_work);
+}
+EXPORT_SYMBOL_GPL(qtnf_packet_send_hi_pri);
+
+struct dentry *qtnf_get_debugfs_dir(void)
+{
+ return qtnf_debugfs_dir;
+}
+EXPORT_SYMBOL_GPL(qtnf_get_debugfs_dir);
+
+static int __init qtnf_core_register(void)
+{
+ qtnf_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+
+ if (IS_ERR(qtnf_debugfs_dir))
+ qtnf_debugfs_dir = NULL;
+
+ return 0;
+}
+
+static void __exit qtnf_core_exit(void)
+{
+ debugfs_remove(qtnf_debugfs_dir);
+}
+
+module_init(qtnf_core_register);
+module_exit(qtnf_core_exit);
+
MODULE_AUTHOR("Quantenna Communications");
MODULE_DESCRIPTION("Quantenna 802.11 wireless LAN FullMAC driver.");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.h b/drivers/net/wireless/quantenna/qtnfmac/core.h
index a31cff46e964..322858df600c 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.h
@@ -63,6 +63,8 @@ struct qtnf_vif {
struct qtnf_wmac *mac;
struct work_struct reset_work;
+ struct work_struct high_pri_tx_work;
+ struct sk_buff_head high_pri_tx_queue;
struct qtnf_sta_list sta_list;
unsigned long cons_tx_timeout_cnt;
int generation;
@@ -112,6 +114,7 @@ struct qtnf_wmac {
struct cfg80211_scan_request *scan_req;
struct mutex mac_lock; /* lock during wmac speicific ops */
struct delayed_work scan_timeout;
+ struct ieee80211_regdomain *rd;
};
struct qtnf_hw_info {
@@ -120,7 +123,6 @@ struct qtnf_hw_info {
u8 mac_bitmap;
u32 fw_ver;
u32 hw_capab;
- struct ieee80211_regdomain *rd;
u8 total_tx_chain;
u8 total_rx_chain;
char fw_version[ETHTOOL_FWVERS_LEN];
@@ -132,6 +134,7 @@ struct qtnf_vif *qtnf_mac_get_free_vif(struct qtnf_wmac *mac);
struct qtnf_vif *qtnf_mac_get_base_vif(struct qtnf_wmac *mac);
void qtnf_mac_iface_comb_free(struct qtnf_wmac *mac);
void qtnf_mac_ext_caps_free(struct qtnf_wmac *mac);
+bool qtnf_mac_slave_radar_get(struct wiphy *wiphy);
struct wiphy *qtnf_wiphy_allocate(struct qtnf_bus *bus);
int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *priv,
const char *name, unsigned char name_assign_type);
@@ -149,6 +152,8 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev);
void qtnf_netdev_updown(struct net_device *ndev, bool up);
void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted);
+void qtnf_packet_send_hi_pri(struct sk_buff *skb);
+struct dentry *qtnf_get_debugfs_dir(void);
static inline struct qtnf_vif *qtnf_netdev_get_priv(struct net_device *dev)
{
diff --git a/drivers/net/wireless/quantenna/qtnfmac/debug.c b/drivers/net/wireless/quantenna/qtnfmac/debug.c
index 598ece753a4b..2d3574c1f10e 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/debug.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/debug.c
@@ -5,7 +5,9 @@
void qtnf_debugfs_init(struct qtnf_bus *bus, const char *name)
{
- bus->dbg_dir = debugfs_create_dir(name, NULL);
+ struct dentry *parent = qtnf_get_debugfs_dir();
+
+ bus->dbg_dir = debugfs_create_dir(name, parent);
}
void qtnf_debugfs_remove(struct qtnf_bus *bus)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 6c1b886339ac..b57c8c18a8d0 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -493,14 +493,20 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac,
for (i = 0; i < QTNF_MAX_INTF; i++) {
vif = &mac->iflist[i];
+
if (vif->wdev.iftype == NL80211_IFTYPE_UNSPECIFIED)
continue;
- if (vif->netdev) {
- mutex_lock(&vif->wdev.mtx);
- cfg80211_ch_switch_notify(vif->netdev, &chandef);
- mutex_unlock(&vif->wdev.mtx);
- }
+ if (vif->wdev.iftype == NL80211_IFTYPE_STATION &&
+ !vif->wdev.current_bss)
+ continue;
+
+ if (!vif->netdev)
+ continue;
+
+ mutex_lock(&vif->wdev.mtx);
+ cfg80211_ch_switch_notify(vif->netdev, &chandef);
+ mutex_unlock(&vif->wdev.mtx);
}
return 0;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
index c3a32effa6f0..e4e9344b6982 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
@@ -56,7 +56,7 @@ int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb)
if (ret == -ETIMEDOUT) {
pr_err("EP firmware is dead\n");
- bus->fw_state = QTNF_FW_STATE_EP_DEAD;
+ bus->fw_state = QTNF_FW_STATE_DEAD;
}
return ret;
@@ -128,32 +128,22 @@ static int qtnf_dbg_shm_stats(struct seq_file *s, void *data)
return 0;
}
-void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success)
+int qtnf_pcie_fw_boot_done(struct qtnf_bus *bus)
{
- struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus);
- struct pci_dev *pdev = priv->pdev;
int ret;
- if (boot_success) {
- bus->fw_state = QTNF_FW_STATE_FW_DNLD_DONE;
-
- ret = qtnf_core_attach(bus);
- if (ret) {
- pr_err("failed to attach core\n");
- boot_success = false;
- }
- }
-
- if (boot_success) {
+ bus->fw_state = QTNF_FW_STATE_BOOT_DONE;
+ ret = qtnf_core_attach(bus);
+ if (ret) {
+ pr_err("failed to attach core\n");
+ } else {
qtnf_debugfs_init(bus, DRV_NAME);
qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show);
qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show);
qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats);
- } else {
- bus->fw_state = QTNF_FW_STATE_DETACHED;
}
- put_device(&pdev->dev);
+ return ret;
}
static void qtnf_tune_pcie_mps(struct pci_dev *pdev)
@@ -344,7 +334,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pcie_priv = get_bus_priv(bus);
pci_set_drvdata(pdev, bus);
bus->dev = &pdev->dev;
- bus->fw_state = QTNF_FW_STATE_RESET;
+ bus->fw_state = QTNF_FW_STATE_DETACHED;
pcie_priv->pdev = pdev;
pcie_priv->tx_stopped = 0;
pcie_priv->rx_bd_num = rx_bd_size_param;
@@ -364,6 +354,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pcie_priv->pcie_irq_count = 0;
pcie_priv->tx_reclaim_done = 0;
pcie_priv->tx_reclaim_req = 0;
+ pcie_priv->tx_eapol = 0;
pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PCIE");
if (!pcie_priv->workqueue) {
@@ -419,8 +410,7 @@ static void qtnf_pcie_remove(struct pci_dev *dev)
cancel_work_sync(&bus->fw_work);
- if (bus->fw_state == QTNF_FW_STATE_ACTIVE ||
- bus->fw_state == QTNF_FW_STATE_EP_DEAD)
+ if (qtnf_fw_is_attached(bus))
qtnf_core_detach(bus);
netif_napi_del(&bus->mux_napi);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
index bbc074e1f34d..5e8b9cb68419 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h
@@ -62,6 +62,7 @@ struct qtnf_pcie_bus_priv {
u32 tx_done_count;
u32 tx_reclaim_done;
u32 tx_reclaim_req;
+ u32 tx_eapol;
u8 msi_enabled;
u8 tx_stopped;
@@ -70,7 +71,7 @@ struct qtnf_pcie_bus_priv {
int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb);
int qtnf_pcie_alloc_skb_array(struct qtnf_pcie_bus_priv *priv);
-void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success);
+int qtnf_pcie_fw_boot_done(struct qtnf_bus *bus);
void qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv,
struct qtnf_shm_ipc_region __iomem *ipc_tx_reg,
struct qtnf_shm_ipc_region __iomem *ipc_rx_reg,
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index 1f5facbb8905..3aa3714d4dfd 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -980,12 +980,11 @@ static void qtnf_pearl_fw_work_handler(struct work_struct *work)
{
struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work);
struct qtnf_pcie_pearl_state *ps = (void *)get_bus_priv(bus);
+ u32 state = QTN_RC_FW_LOADRDY | QTN_RC_FW_QLINK;
+ const char *fwname = QTN_PCI_PEARL_FW_NAME;
struct pci_dev *pdev = ps->base.pdev;
const struct firmware *fw;
int ret;
- u32 state = QTN_RC_FW_LOADRDY | QTN_RC_FW_QLINK;
- const char *fwname = QTN_PCI_PEARL_FW_NAME;
- bool fw_boot_success = false;
if (ps->base.flashboot) {
state |= QTN_RC_FW_FLASHBOOT;
@@ -1031,23 +1030,23 @@ static void qtnf_pearl_fw_work_handler(struct work_struct *work)
goto fw_load_exit;
}
- pr_info("firmware is up and running\n");
-
if (qtnf_poll_state(&ps->bda->bda_ep_state,
QTN_EP_FW_QLINK_DONE, QTN_FW_QLINK_TIMEOUT_MS)) {
pr_err("firmware runtime failure\n");
goto fw_load_exit;
}
- fw_boot_success = true;
+ pr_info("firmware is up and running\n");
-fw_load_exit:
- qtnf_pcie_fw_boot_done(bus, fw_boot_success);
+ ret = qtnf_pcie_fw_boot_done(bus);
+ if (ret)
+ goto fw_load_exit;
- if (fw_boot_success) {
- qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats);
- qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
- }
+ qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats);
+ qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
+
+fw_load_exit:
+ put_device(&pdev->dev);
}
static void qtnf_pearl_reclaim_tasklet_fn(unsigned long data)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
index cbcda57105f3..9a4380ed7f1b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
@@ -498,6 +498,13 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
int len;
int i;
+ if (unlikely(skb->protocol == htons(ETH_P_PAE))) {
+ qtnf_packet_send_hi_pri(skb);
+ qtnf_update_tx_stats(skb->dev, skb);
+ priv->tx_eapol++;
+ return NETDEV_TX_OK;
+ }
+
spin_lock_irqsave(&priv->tx_lock, flags);
if (!qtnf_tx_queue_ready(ts)) {
@@ -761,6 +768,7 @@ static int qtnf_dbg_pkt_stats(struct seq_file *s, void *data)
seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count);
seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done);
seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req);
+ seq_printf(s, "tx_eapol(%u)\n", priv->tx_eapol);
seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index);
seq_printf(s, "tx_done_index(%u)\n", tx_done_index);
@@ -1023,8 +1031,9 @@ static void qtnf_topaz_fw_work_handler(struct work_struct *work)
{
struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work);
struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus);
- int ret;
int bootloader_needed = readl(&ts->bda->bda_flags) & QTN_BDA_XMIT_UBOOT;
+ struct pci_dev *pdev = ts->base.pdev;
+ int ret;
qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_TARGET_BOOT);
@@ -1073,19 +1082,23 @@ static void qtnf_topaz_fw_work_handler(struct work_struct *work)
}
}
+ ret = qtnf_post_init_ep(ts);
+ if (ret) {
+ pr_err("FW runtime failure\n");
+ goto fw_load_exit;
+ }
+
pr_info("firmware is up and running\n");
- ret = qtnf_post_init_ep(ts);
+ ret = qtnf_pcie_fw_boot_done(bus);
if (ret)
- pr_err("FW runtime failure\n");
+ goto fw_load_exit;
-fw_load_exit:
- qtnf_pcie_fw_boot_done(bus, ret ? false : true);
+ qtnf_debugfs_add_entry(bus, "pkt_stats", qtnf_dbg_pkt_stats);
+ qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
- if (ret == 0) {
- qtnf_debugfs_add_entry(bus, "pkt_stats", qtnf_dbg_pkt_stats);
- qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats);
- }
+fw_load_exit:
+ put_device(&pdev->dev);
}
static void qtnf_reclaim_tasklet_fn(unsigned long data)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink.h b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
index 7798edcf7980..8a3c6344fa8e 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink.h
@@ -6,7 +6,7 @@
#include <linux/ieee80211.h>
-#define QLINK_PROTO_VER 13
+#define QLINK_PROTO_VER 15
#define QLINK_MACID_RSVD 0xFF
#define QLINK_VIFID_RSVD 0xFF
@@ -206,6 +206,8 @@ struct qlink_sta_info_state {
* execution status (one of &enum qlink_cmd_result). Reply message
* may also contain data payload specific to the command type.
*
+ * @QLINK_CMD_SEND_FRAME: send specified frame over the air; firmware will
+ * encapsulate 802.3 packet into 802.11 frame automatically.
* @QLINK_CMD_BAND_INFO_GET: for the specified MAC and specified band, get
* the band's description including number of operational channels and
* info on each channel, HT/VHT capabilities, supported rates etc.
@@ -220,7 +222,7 @@ enum qlink_cmd_type {
QLINK_CMD_FW_INIT = 0x0001,
QLINK_CMD_FW_DEINIT = 0x0002,
QLINK_CMD_REGISTER_MGMT = 0x0003,
- QLINK_CMD_SEND_MGMT_FRAME = 0x0004,
+ QLINK_CMD_SEND_FRAME = 0x0004,
QLINK_CMD_MGMT_SET_APPIE = 0x0005,
QLINK_CMD_PHY_PARAMS_GET = 0x0011,
QLINK_CMD_PHY_PARAMS_SET = 0x0012,
@@ -321,22 +323,26 @@ struct qlink_cmd_mgmt_frame_register {
u8 do_register;
} __packed;
-enum qlink_mgmt_frame_tx_flags {
- QLINK_MGMT_FRAME_TX_FLAG_NONE = 0,
- QLINK_MGMT_FRAME_TX_FLAG_OFFCHAN = BIT(0),
- QLINK_MGMT_FRAME_TX_FLAG_NO_CCK = BIT(1),
- QLINK_MGMT_FRAME_TX_FLAG_ACK_NOWAIT = BIT(2),
+/**
+ * @QLINK_FRAME_TX_FLAG_8023: frame has a 802.3 header; if not set, frame
+ * is a 802.11 encapsulated.
+ */
+enum qlink_frame_tx_flags {
+ QLINK_FRAME_TX_FLAG_OFFCHAN = BIT(0),
+ QLINK_FRAME_TX_FLAG_NO_CCK = BIT(1),
+ QLINK_FRAME_TX_FLAG_ACK_NOWAIT = BIT(2),
+ QLINK_FRAME_TX_FLAG_8023 = BIT(3),
};
/**
- * struct qlink_cmd_mgmt_frame_tx - data for QLINK_CMD_SEND_MGMT_FRAME command
+ * struct qlink_cmd_frame_tx - data for QLINK_CMD_SEND_FRAME command
*
* @cookie: opaque request identifier.
* @freq: Frequency to use for frame transmission.
- * @flags: Transmission flags, one of &enum qlink_mgmt_frame_tx_flags.
+ * @flags: Transmission flags, one of &enum qlink_frame_tx_flags.
* @frame_data: frame to transmit.
*/
-struct qlink_cmd_mgmt_frame_tx {
+struct qlink_cmd_frame_tx {
struct qlink_cmd chdr;
__le32 cookie;
__le16 freq;
@@ -580,12 +586,22 @@ enum qlink_user_reg_hint_type {
* @initiator: which entity sent the request, one of &enum qlink_reg_initiator.
* @user_reg_hint_type: type of hint for QLINK_REGDOM_SET_BY_USER request, one
* of &enum qlink_user_reg_hint_type.
+ * @num_channels: number of &struct qlink_tlv_channel in a variable portion of a
+ * payload.
+ * @slave_radar: whether slave device should enable radar detection.
+ * @dfs_region: one of &enum qlink_dfs_regions.
+ * @info: variable portion of regulatory notifier callback.
*/
struct qlink_cmd_reg_notify {
struct qlink_cmd chdr;
u8 alpha2[2];
u8 initiator;
u8 user_reg_hint_type;
+ u8 num_channels;
+ u8 dfs_region;
+ u8 slave_radar;
+ u8 rsvd[1];
+ u8 info[0];
} __packed;
/**
@@ -765,6 +781,18 @@ struct qlink_resp {
} __packed;
/**
+ * enum qlink_dfs_regions - regulatory DFS regions
+ *
+ * Corresponds to &enum nl80211_dfs_regions.
+ */
+enum qlink_dfs_regions {
+ QLINK_DFS_UNSET = 0,
+ QLINK_DFS_FCC = 1,
+ QLINK_DFS_ETSI = 2,
+ QLINK_DFS_JP = 3,
+};
+
+/**
* struct qlink_resp_get_mac_info - response for QLINK_CMD_MAC_INFO command
*
* Data describing specific physical device providing wireless MAC
@@ -779,6 +807,10 @@ struct qlink_resp {
* @bands_cap: wireless bands WMAC can operate in, bitmap of &enum qlink_band.
* @max_ap_assoc_sta: Maximum number of associations supported by WMAC.
* @radar_detect_widths: bitmask of channels BW for which WMAC can detect radar.
+ * @alpha2: country code ID firmware is configured to.
+ * @n_reg_rules: number of regulatory rules TLVs in variable portion of the
+ * message.
+ * @dfs_region: regulatory DFS region, one of &enum qlink_dfs_regions.
* @var_info: variable-length WMAC info data.
*/
struct qlink_resp_get_mac_info {
@@ -792,23 +824,14 @@ struct qlink_resp_get_mac_info {
__le16 radar_detect_widths;
__le32 max_acl_mac_addrs;
u8 bands_cap;
+ u8 alpha2[2];
+ u8 n_reg_rules;
+ u8 dfs_region;
u8 rsvd[1];
u8 var_info[0];
} __packed;
/**
- * enum qlink_dfs_regions - regulatory DFS regions
- *
- * Corresponds to &enum nl80211_dfs_regions.
- */
-enum qlink_dfs_regions {
- QLINK_DFS_UNSET = 0,
- QLINK_DFS_FCC = 1,
- QLINK_DFS_ETSI = 2,
- QLINK_DFS_JP = 3,
-};
-
-/**
* struct qlink_resp_get_hw_info - response for QLINK_CMD_GET_HW_INFO command
*
* Description of wireless hardware capabilities and features.
@@ -820,11 +843,7 @@ enum qlink_dfs_regions {
* @mac_bitmap: Bitmap of MAC IDs that are active and can be used in firmware.
* @total_tx_chains: total number of transmit chains used by device.
* @total_rx_chains: total number of receive chains.
- * @alpha2: country code ID firmware is configured to.
- * @n_reg_rules: number of regulatory rules TLVs in variable portion of the
- * message.
- * @dfs_region: regulatory DFS region, one of @enum qlink_dfs_region.
- * @info: variable-length HW info, can contain QTN_TLV_ID_REG_RULE.
+ * @info: variable-length HW info.
*/
struct qlink_resp_get_hw_info {
struct qlink_resp rhdr;
@@ -838,9 +857,6 @@ struct qlink_resp_get_hw_info {
u8 mac_bitmap;
u8 total_tx_chain;
u8 total_rx_chain;
- u8 alpha2[2];
- u8 n_reg_rules;
- u8 dfs_region;
u8 info[0];
} __packed;
@@ -1148,6 +1164,13 @@ struct qlink_event_external_auth {
* carried by QTN_TLV_ID_STA_STATS_MAP.
* @QTN_TLV_ID_MAX_SCAN_SSIDS: maximum number of SSIDs the device can scan
* for in any given scan.
+ * @QTN_TLV_ID_SCAN_DWELL_ACTIVE: time spent on a single channel for an active
+ * scan.
+ * @QTN_TLV_ID_SCAN_DWELL_PASSIVE: time spent on a single channel for a passive
+ * scan.
+ * @QTN_TLV_ID_SCAN_SAMPLE_DURATION: total duration of sampling a single channel
+ * during a scan including off-channel dwell time and operating channel
+ * time.
*/
enum qlink_tlv_id {
QTN_TLV_ID_FRAG_THRESH = 0x0201,
@@ -1180,7 +1203,9 @@ enum qlink_tlv_id {
QTN_TLV_ID_WOWLAN_CAPAB = 0x0410,
QTN_TLV_ID_WOWLAN_PATTERN = 0x0411,
QTN_TLV_ID_SCAN_FLUSH = 0x0412,
- QTN_TLV_ID_SCAN_DWELL = 0x0413,
+ QTN_TLV_ID_SCAN_DWELL_ACTIVE = 0x0413,
+ QTN_TLV_ID_SCAN_DWELL_PASSIVE = 0x0416,
+ QTN_TLV_ID_SCAN_SAMPLE_DURATION = 0x0417,
};
struct qlink_tlv_hdr {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
index 72bfd17cb687..1a972bce7b8b 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.c
@@ -182,3 +182,120 @@ void qlink_acl_data_cfg2q(const struct cfg80211_acl_data *acl,
memcpy(qacl->mac_addrs, acl->mac_addrs,
acl->n_acl_entries * sizeof(*qacl->mac_addrs));
}
+
+enum qlink_band qlink_utils_band_cfg2q(enum nl80211_band band)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ return QLINK_BAND_2GHZ;
+ case NL80211_BAND_5GHZ:
+ return QLINK_BAND_5GHZ;
+ case NL80211_BAND_60GHZ:
+ return QLINK_BAND_60GHZ;
+ default:
+ return -EINVAL;
+ }
+}
+
+enum qlink_dfs_state qlink_utils_dfs_state_cfg2q(enum nl80211_dfs_state state)
+{
+ switch (state) {
+ case NL80211_DFS_USABLE:
+ return QLINK_DFS_USABLE;
+ case NL80211_DFS_AVAILABLE:
+ return QLINK_DFS_AVAILABLE;
+ case NL80211_DFS_UNAVAILABLE:
+ default:
+ return QLINK_DFS_UNAVAILABLE;
+ }
+}
+
+u32 qlink_utils_chflags_cfg2q(u32 cfgflags)
+{
+ u32 flags = 0;
+
+ if (cfgflags & IEEE80211_CHAN_DISABLED)
+ flags |= QLINK_CHAN_DISABLED;
+
+ if (cfgflags & IEEE80211_CHAN_NO_IR)
+ flags |= QLINK_CHAN_NO_IR;
+
+ if (cfgflags & IEEE80211_CHAN_RADAR)
+ flags |= QLINK_CHAN_RADAR;
+
+ if (cfgflags & IEEE80211_CHAN_NO_HT40PLUS)
+ flags |= QLINK_CHAN_NO_HT40PLUS;
+
+ if (cfgflags & IEEE80211_CHAN_NO_HT40MINUS)
+ flags |= QLINK_CHAN_NO_HT40MINUS;
+
+ if (cfgflags & IEEE80211_CHAN_NO_80MHZ)
+ flags |= QLINK_CHAN_NO_80MHZ;
+
+ if (cfgflags & IEEE80211_CHAN_NO_160MHZ)
+ flags |= QLINK_CHAN_NO_160MHZ;
+
+ return flags;
+}
+
+static u32 qtnf_reg_rule_flags_parse(u32 qflags)
+{
+ u32 flags = 0;
+
+ if (qflags & QLINK_RRF_NO_OFDM)
+ flags |= NL80211_RRF_NO_OFDM;
+
+ if (qflags & QLINK_RRF_NO_CCK)
+ flags |= NL80211_RRF_NO_CCK;
+
+ if (qflags & QLINK_RRF_NO_INDOOR)
+ flags |= NL80211_RRF_NO_INDOOR;
+
+ if (qflags & QLINK_RRF_NO_OUTDOOR)
+ flags |= NL80211_RRF_NO_OUTDOOR;
+
+ if (qflags & QLINK_RRF_DFS)
+ flags |= NL80211_RRF_DFS;
+
+ if (qflags & QLINK_RRF_PTP_ONLY)
+ flags |= NL80211_RRF_PTP_ONLY;
+
+ if (qflags & QLINK_RRF_PTMP_ONLY)
+ flags |= NL80211_RRF_PTMP_ONLY;
+
+ if (qflags & QLINK_RRF_NO_IR)
+ flags |= NL80211_RRF_NO_IR;
+
+ if (qflags & QLINK_RRF_AUTO_BW)
+ flags |= NL80211_RRF_AUTO_BW;
+
+ if (qflags & QLINK_RRF_IR_CONCURRENT)
+ flags |= NL80211_RRF_IR_CONCURRENT;
+
+ if (qflags & QLINK_RRF_NO_HT40MINUS)
+ flags |= NL80211_RRF_NO_HT40MINUS;
+
+ if (qflags & QLINK_RRF_NO_HT40PLUS)
+ flags |= NL80211_RRF_NO_HT40PLUS;
+
+ if (qflags & QLINK_RRF_NO_80MHZ)
+ flags |= NL80211_RRF_NO_80MHZ;
+
+ if (qflags & QLINK_RRF_NO_160MHZ)
+ flags |= NL80211_RRF_NO_160MHZ;
+
+ return flags;
+}
+
+void qlink_utils_regrule_q2nl(struct ieee80211_reg_rule *rule,
+ const struct qlink_tlv_reg_rule *tlv)
+{
+ rule->freq_range.start_freq_khz = le32_to_cpu(tlv->start_freq_khz);
+ rule->freq_range.end_freq_khz = le32_to_cpu(tlv->end_freq_khz);
+ rule->freq_range.max_bandwidth_khz =
+ le32_to_cpu(tlv->max_bandwidth_khz);
+ rule->power_rule.max_antenna_gain = le32_to_cpu(tlv->max_antenna_gain);
+ rule->power_rule.max_eirp = le32_to_cpu(tlv->max_eirp);
+ rule->dfs_cac_ms = le32_to_cpu(tlv->dfs_cac_ms);
+ rule->flags = qtnf_reg_rule_flags_parse(le32_to_cpu(tlv->flags));
+}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
index 781ea7fe79f2..f873beed2ae7 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/qlink_util.h
@@ -79,5 +79,10 @@ bool qtnf_utils_is_bit_set(const u8 *arr, unsigned int bit,
unsigned int arr_max_len);
void qlink_acl_data_cfg2q(const struct cfg80211_acl_data *acl,
struct qlink_acl_data *qacl);
+enum qlink_band qlink_utils_band_cfg2q(enum nl80211_band band);
+enum qlink_dfs_state qlink_utils_dfs_state_cfg2q(enum nl80211_dfs_state state);
+u32 qlink_utils_chflags_cfg2q(u32 cfgflags);
+void qlink_utils_regrule_q2nl(struct ieee80211_reg_rule *rule,
+ const struct qlink_tlv_reg_rule *tlv_rule);
#endif /* _QTN_FMAC_QLINK_UTIL_H_ */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800.h b/drivers/net/wireless/ralink/rt2x00/rt2800.h
index b05ed2f3025a..06c38bafd2ca 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800.h
@@ -48,7 +48,8 @@
* RF2853 2.4G/5G 3T3R
* RF3320 2.4G 1T1R(RT3350/RT3370/RT3390)
* RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
- * RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
+ * RF3053 2.4G/5G 3T3R(RT3563/RT3573/RT3593)
+ * RF3853 2.4G/5G 3T3R(RT3883/RT3662)
* RF5592 2.4G/5G 2T2R
* RF3070 2.4G 1T1R
* RF5360 2.4G 1T1R
@@ -72,6 +73,7 @@
#define RF5592 0x000f
#define RF3070 0x3070
#define RF3290 0x3290
+#define RF3853 0x3853
#define RF5350 0x5350
#define RF5360 0x5360
#define RF5362 0x5362
@@ -1726,6 +1728,20 @@
#define TX_PWR_CFG_9B_STBC_MCS7 FIELD32(0x000000ff)
/*
+ * TX_TXBF_CFG:
+ */
+#define TX_TXBF_CFG_0 0x138c
+#define TX_TXBF_CFG_1 0x13a4
+#define TX_TXBF_CFG_2 0x13a8
+#define TX_TXBF_CFG_3 0x13ac
+
+/*
+ * TX_FBK_CFG_3S:
+ */
+#define TX_FBK_CFG_3S_0 0x13c4
+#define TX_FBK_CFG_3S_1 0x13c8
+
+/*
* RX_FILTER_CFG: RX configuration register.
*/
#define RX_FILTER_CFG 0x1400
@@ -2296,6 +2312,7 @@ struct mac_iveiv_entry {
/*
* RFCSR 2:
*/
+#define RFCSR2_RESCAL_BP FIELD8(0x40)
#define RFCSR2_RESCAL_EN FIELD8(0x80)
#define RFCSR2_RX2_EN_MT7620 FIELD8(0x02)
#define RFCSR2_TX2_EN_MT7620 FIELD8(0x20)
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index a03b5284a050..c8f2bf1243fd 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -381,7 +381,8 @@ static unsigned int rt2800_eeprom_word_index(struct rt2x00_dev *rt2x00dev,
wiphy_name(rt2x00dev->hw->wiphy), word))
return 0;
- if (rt2x00_rt(rt2x00dev, RT3593))
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883))
map = rt2800_eeprom_map_ext;
else
map = rt2800_eeprom_map;
@@ -590,6 +591,7 @@ void rt2800_get_txwi_rxwi_size(struct rt2x00_dev *rt2x00dev,
{
switch (rt2x00dev->chip.rt) {
case RT3593:
+ case RT3883:
*txwi_size = TXWI_DESC_SIZE_4WORDS;
*rxwi_size = RXWI_DESC_SIZE_5WORDS;
break;
@@ -1100,7 +1102,7 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi,
}
EXPORT_SYMBOL_GPL(rt2800_txdone_entry);
-void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
+void rt2800_txdone(struct rt2x00_dev *rt2x00dev, unsigned int quota)
{
struct data_queue *queue;
struct queue_entry *entry;
@@ -1108,7 +1110,7 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
u8 qid;
bool match;
- while (kfifo_get(&rt2x00dev->txstatus_fifo, &reg)) {
+ while (quota-- > 0 && kfifo_get(&rt2x00dev->txstatus_fifo, &reg)) {
/*
* TX_STA_FIFO_PID_QUEUE is a 2-bit field, thus qid is
* guaranteed to be one of the TX QIDs .
@@ -1164,15 +1166,6 @@ bool rt2800_txstatus_timeout(struct rt2x00_dev *rt2x00dev)
struct data_queue *queue;
struct queue_entry *entry;
- if (!test_bit(DEVICE_STATE_FLUSHING, &rt2x00dev->flags)) {
- unsigned long tout = msecs_to_jiffies(1000);
-
- if (time_before(jiffies, rt2x00dev->last_nostatus_check + tout))
- return false;
- }
-
- rt2x00dev->last_nostatus_check = jiffies;
-
tx_queue_for_each(rt2x00dev, queue) {
entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
if (rt2800_entry_txstatus_timeout(rt2x00dev, entry))
@@ -1183,6 +1176,23 @@ bool rt2800_txstatus_timeout(struct rt2x00_dev *rt2x00dev)
}
EXPORT_SYMBOL_GPL(rt2800_txstatus_timeout);
+/*
+ * test if there is an entry in any TX queue for which DMA is done
+ * but the TX status has not been returned yet
+ */
+bool rt2800_txstatus_pending(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+
+ tx_queue_for_each(rt2x00dev, queue) {
+ if (rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE) !=
+ rt2x00queue_get_entry(queue, Q_INDEX_DONE))
+ return true;
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(rt2800_txstatus_pending);
+
void rt2800_txdone_nostatus(struct rt2x00_dev *rt2x00dev)
{
struct data_queue *queue;
@@ -2172,7 +2182,8 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
rt2800_bbp_write(rt2x00dev, 3, r3);
rt2800_bbp_write(rt2x00dev, 1, r1);
- if (rt2x00_rt(rt2x00dev, RT3593)) {
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883)) {
if (ant->rx_chain_num == 1)
rt2800_bbp_write(rt2x00dev, 86, 0x00);
else
@@ -2194,7 +2205,8 @@ static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev,
eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_LNA);
lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_A0);
} else if (libconf->rf.channel <= 128) {
- if (rt2x00_rt(rt2x00dev, RT3593)) {
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883)) {
eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2);
lna_gain = rt2x00_get_field16(eeprom,
EEPROM_EXT_LNA2_A1);
@@ -2204,7 +2216,8 @@ static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev,
EEPROM_RSSI_BG2_LNA_A1);
}
} else {
- if (rt2x00_rt(rt2x00dev, RT3593)) {
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883)) {
eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2);
lna_gain = rt2x00_get_field16(eeprom,
EEPROM_EXT_LNA2_A2);
@@ -2872,6 +2885,211 @@ static void rt2800_config_channel_rf3053(struct rt2x00_dev *rt2x00dev,
}
}
+static void rt2800_config_channel_rf3853(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf,
+ struct rf_channel *rf,
+ struct channel_info *info)
+{
+ u8 rfcsr;
+ u8 bbp;
+ u8 pwr1, pwr2, pwr3;
+
+ const bool txbf_enabled = false; /* TODO */
+
+ /* TODO: add band selection */
+
+ if (rf->channel <= 14)
+ rt2800_rfcsr_write(rt2x00dev, 6, 0x40);
+ else if (rf->channel < 132)
+ rt2800_rfcsr_write(rt2x00dev, 6, 0x80);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 6, 0x40);
+
+ rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
+ rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
+
+ if (rf->channel <= 14)
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x46);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x48);
+
+ if (rf->channel <= 14)
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x1a);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x52);
+
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x12);
+
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
+
+ switch (rt2x00dev->default_ant.tx_chain_num) {
+ case 3:
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
+ /* fallthrough */
+ case 2:
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
+ /* fallthrough */
+ case 1:
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
+ break;
+ }
+
+ switch (rt2x00dev->default_ant.rx_chain_num) {
+ case 3:
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
+ /* fallthrough */
+ case 2:
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
+ /* fallthrough */
+ case 1:
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
+ break;
+ }
+ rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+ rt2800_freq_cal_mode1(rt2x00dev);
+
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 30);
+ if (!conf_is_ht40(conf))
+ rfcsr &= ~(0x06);
+ else
+ rfcsr |= 0x06;
+ rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+
+ if (rf->channel <= 14)
+ rt2800_rfcsr_write(rt2x00dev, 31, 0xa0);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+
+ if (conf_is_ht40(conf))
+ rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 32, 0xd8);
+
+ if (rf->channel <= 14)
+ rt2800_rfcsr_write(rt2x00dev, 34, 0x3c);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 34, 0x20);
+
+ /* loopback RF_BS */
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 36);
+ if (rf->channel <= 14)
+ rt2x00_set_field8(&rfcsr, RFCSR36_RF_BS, 1);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR36_RF_BS, 0);
+ rt2800_rfcsr_write(rt2x00dev, 36, rfcsr);
+
+ if (rf->channel <= 14)
+ rfcsr = 0x23;
+ else if (rf->channel < 100)
+ rfcsr = 0x36;
+ else if (rf->channel < 132)
+ rfcsr = 0x32;
+ else
+ rfcsr = 0x30;
+
+ if (txbf_enabled)
+ rfcsr |= 0x40;
+
+ rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
+
+ if (rf->channel <= 14)
+ rt2800_rfcsr_write(rt2x00dev, 44, 0x93);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 44, 0x9b);
+
+ if (rf->channel <= 14)
+ rfcsr = 0xbb;
+ else if (rf->channel < 100)
+ rfcsr = 0xeb;
+ else if (rf->channel < 132)
+ rfcsr = 0xb3;
+ else
+ rfcsr = 0x9b;
+ rt2800_rfcsr_write(rt2x00dev, 45, rfcsr);
+
+ if (rf->channel <= 14)
+ rfcsr = 0x8e;
+ else
+ rfcsr = 0x8a;
+
+ if (txbf_enabled)
+ rfcsr |= 0x20;
+
+ rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
+
+ rt2800_rfcsr_write(rt2x00dev, 50, 0x86);
+
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 51);
+ if (rf->channel <= 14)
+ rt2800_rfcsr_write(rt2x00dev, 51, 0x75);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 51, 0x51);
+
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 52);
+ if (rf->channel <= 14)
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x45);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x05);
+
+ if (rf->channel <= 14) {
+ pwr1 = info->default_power1 & 0x1f;
+ pwr2 = info->default_power2 & 0x1f;
+ pwr3 = info->default_power3 & 0x1f;
+ } else {
+ pwr1 = 0x48 | ((info->default_power1 & 0x18) << 1) |
+ (info->default_power1 & 0x7);
+ pwr2 = 0x48 | ((info->default_power2 & 0x18) << 1) |
+ (info->default_power2 & 0x7);
+ pwr3 = 0x48 | ((info->default_power3 & 0x18) << 1) |
+ (info->default_power3 & 0x7);
+ }
+
+ rt2800_rfcsr_write(rt2x00dev, 53, pwr1);
+ rt2800_rfcsr_write(rt2x00dev, 54, pwr2);
+ rt2800_rfcsr_write(rt2x00dev, 55, pwr3);
+
+ rt2x00_dbg(rt2x00dev, "Channel:%d, pwr1:%02x, pwr2:%02x, pwr3:%02x\n",
+ rf->channel, pwr1, pwr2, pwr3);
+
+ bbp = (info->default_power1 >> 5) |
+ ((info->default_power2 & 0xe0) >> 1);
+ rt2800_bbp_write(rt2x00dev, 109, bbp);
+
+ bbp = rt2800_bbp_read(rt2x00dev, 110);
+ bbp &= 0x0f;
+ bbp |= (info->default_power3 & 0xe0) >> 1;
+ rt2800_bbp_write(rt2x00dev, 110, bbp);
+
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 57);
+ if (rf->channel <= 14)
+ rt2800_rfcsr_write(rt2x00dev, 57, 0x6e);
+ else
+ rt2800_rfcsr_write(rt2x00dev, 57, 0x3e);
+
+ /* Enable RF tuning */
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 3);
+ rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
+ rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
+
+ udelay(2000);
+
+ bbp = rt2800_bbp_read(rt2x00dev, 49);
+ /* clear update flag */
+ rt2800_bbp_write(rt2x00dev, 49, bbp & 0xfe);
+ rt2800_bbp_write(rt2x00dev, 49, bbp);
+
+ /* TODO: add calibration for TxBF */
+}
+
#define POWER_BOUND 0x27
#define POWER_BOUND_5G 0x2b
@@ -3675,19 +3893,51 @@ static char rt2800_txpower_to_dev(struct rt2x00_dev *rt2x00dev,
unsigned int channel,
char txpower)
{
- if (rt2x00_rt(rt2x00dev, RT3593))
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883))
txpower = rt2x00_get_field8(txpower, EEPROM_TXPOWER_ALC);
if (channel <= 14)
return clamp_t(char, txpower, MIN_G_TXPOWER, MAX_G_TXPOWER);
- if (rt2x00_rt(rt2x00dev, RT3593))
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883))
return clamp_t(char, txpower, MIN_A_TXPOWER_3593,
MAX_A_TXPOWER_3593);
else
return clamp_t(char, txpower, MIN_A_TXPOWER, MAX_A_TXPOWER);
}
+static void rt3883_bbp_adjust(struct rt2x00_dev *rt2x00dev,
+ struct rf_channel *rf)
+{
+ u8 bbp;
+
+ bbp = (rf->channel > 14) ? 0x48 : 0x38;
+ rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, bbp);
+
+ rt2800_bbp_write(rt2x00dev, 69, 0x12);
+
+ if (rf->channel <= 14) {
+ rt2800_bbp_write(rt2x00dev, 70, 0x0a);
+ } else {
+ /* Disable CCK packet detection */
+ rt2800_bbp_write(rt2x00dev, 70, 0x00);
+ }
+
+ rt2800_bbp_write(rt2x00dev, 73, 0x10);
+
+ if (rf->channel > 14) {
+ rt2800_bbp_write(rt2x00dev, 62, 0x1d);
+ rt2800_bbp_write(rt2x00dev, 63, 0x1d);
+ rt2800_bbp_write(rt2x00dev, 64, 0x1d);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 62, 0x2d);
+ rt2800_bbp_write(rt2x00dev, 63, 0x2d);
+ rt2800_bbp_write(rt2x00dev, 64, 0x2d);
+ }
+}
+
static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
struct rf_channel *rf,
@@ -3706,6 +3956,12 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_txpower_to_dev(rt2x00dev, rf->channel,
info->default_power3);
+ switch (rt2x00dev->chip.rt) {
+ case RT3883:
+ rt3883_bbp_adjust(rt2x00dev, rf);
+ break;
+ }
+
switch (rt2x00dev->chip.rf) {
case RF2020:
case RF3020:
@@ -3726,6 +3982,9 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
case RF3322:
rt2800_config_channel_rf3322(rt2x00dev, conf, rf, info);
break;
+ case RF3853:
+ rt2800_config_channel_rf3853(rt2x00dev, conf, rf, info);
+ break;
case RF3070:
case RF5350:
case RF5360:
@@ -3807,6 +4066,15 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 77, 0x98);
+ } else if (rt2x00_rt(rt2x00dev, RT3883)) {
+ rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
+ rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
+ rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
+
+ if (rt2x00dev->default_ant.rx_chain_num > 1)
+ rt2800_bbp_write(rt2x00dev, 86, 0x46);
+ else
+ rt2800_bbp_write(rt2x00dev, 86, 0);
} else {
rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
@@ -3820,6 +4088,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
!rt2x00_rt(rt2x00dev, RT6352)) {
if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) {
rt2800_bbp_write(rt2x00dev, 82, 0x62);
+ rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
} else {
if (rt2x00_rt(rt2x00dev, RT3593))
@@ -3828,19 +4097,22 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_bbp_write(rt2x00dev, 82, 0x84);
rt2800_bbp_write(rt2x00dev, 75, 0x50);
}
- if (rt2x00_rt(rt2x00dev, RT3593))
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883))
rt2800_bbp_write(rt2x00dev, 83, 0x8a);
}
} else {
if (rt2x00_rt(rt2x00dev, RT3572))
rt2800_bbp_write(rt2x00dev, 82, 0x94);
- else if (rt2x00_rt(rt2x00dev, RT3593))
+ else if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883))
rt2800_bbp_write(rt2x00dev, 82, 0x82);
else if (!rt2x00_rt(rt2x00dev, RT6352))
rt2800_bbp_write(rt2x00dev, 82, 0xf2);
- if (rt2x00_rt(rt2x00dev, RT3593))
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883))
rt2800_bbp_write(rt2x00dev, 83, 0x9a);
if (rt2x00_has_cap_external_lna_a(rt2x00dev))
@@ -3976,6 +4248,23 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
usleep_range(1000, 1500);
}
+ if (rt2x00_rt(rt2x00dev, RT3883)) {
+ if (!conf_is_ht40(conf))
+ rt2800_bbp_write(rt2x00dev, 105, 0x34);
+ else
+ rt2800_bbp_write(rt2x00dev, 105, 0x04);
+
+ /* AGC init */
+ if (rf->channel <= 14)
+ reg = 0x2e + rt2x00dev->lna_gain;
+ else
+ reg = 0x20 + ((rt2x00dev->lna_gain * 5) / 3);
+
+ rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
+
+ usleep_range(1000, 1500);
+ }
+
if (rt2x00_rt(rt2x00dev, RT5592) || rt2x00_rt(rt2x00dev, RT6352)) {
reg = 0x10;
if (!conf_is_ht40(conf)) {
@@ -4235,6 +4524,9 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
if (rt2x00_rt(rt2x00dev, RT3593))
return min_t(u8, txpower, 0xc);
+ if (rt2x00_rt(rt2x00dev, RT3883))
+ return min_t(u8, txpower, 0xf);
+
if (rt2x00_has_cap_power_limit(rt2x00dev)) {
/*
* Check if eirp txpower exceed txpower_limit.
@@ -4996,7 +5288,8 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
struct ieee80211_channel *chan,
int power_level)
{
- if (rt2x00_rt(rt2x00dev, RT3593))
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883))
rt2800_config_txpower_rt3593(rt2x00dev, chan, power_level);
else if (rt2x00_rt(rt2x00dev, RT6352))
rt2800_config_txpower_rt6352(rt2x00dev, chan, power_level);
@@ -5043,6 +5336,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
case RF3053:
case RF3070:
case RF3290:
+ case RF3853:
case RF5350:
case RF5360:
case RF5362:
@@ -5243,7 +5537,8 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
else
vgc = 0x2e + rt2x00dev->lna_gain;
} else { /* 5GHZ band */
- if (rt2x00_rt(rt2x00dev, RT3593))
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883))
vgc = 0x20 + (rt2x00dev->lna_gain * 5) / 3;
else if (rt2x00_rt(rt2x00dev, RT5592))
vgc = 0x24 + (2 * rt2x00dev->lna_gain);
@@ -5263,7 +5558,8 @@ static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
{
if (qual->vgc_level != vgc_level) {
if (rt2x00_rt(rt2x00dev, RT3572) ||
- rt2x00_rt(rt2x00dev, RT3593)) {
+ rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883)) {
rt2800_bbp_write_with_rx_chain(rt2x00dev, 66,
vgc_level);
} else if (rt2x00_rt(rt2x00dev, RT5592)) {
@@ -5310,6 +5606,11 @@ void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
}
break;
+ case RT3883:
+ if (qual->rssi > -65)
+ vgc += 0x10;
+ break;
+
case RT5592:
if (qual->rssi > -65)
vgc += 0x20;
@@ -5462,6 +5763,12 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_SW_CFG2,
0x00000000);
}
+ } else if (rt2x00_rt(rt2x00dev, RT3883)) {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000402);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00040000);
+ rt2800_register_write(rt2x00dev, TX_TXBF_CFG_0, 0x8000fc21);
+ rt2800_register_write(rt2x00dev, TX_TXBF_CFG_3, 0x00009c40);
} else if (rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392) ||
rt2x00_rt(rt2x00dev, RT6352)) {
@@ -5675,6 +5982,11 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
reg = rt2x00_rt(rt2x00dev, RT5592) ? 0x00000082 : 0x00000002;
rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, reg);
+ if (rt2x00_rt(rt2x00dev, RT3883)) {
+ rt2800_register_write(rt2x00dev, TX_FBK_CFG_3S_0, 0x12111008);
+ rt2800_register_write(rt2x00dev, TX_FBK_CFG_3S_1, 0x16151413);
+ }
+
reg = rt2800_register_read(rt2x00dev, TX_RTS_CFG);
rt2x00_set_field32(&reg, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 7);
rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES,
@@ -6291,6 +6603,47 @@ static void rt2800_init_bbp_3593(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 103, 0xc0);
}
+static void rt2800_init_bbp_3883(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_init_bbp_early(rt2x00dev);
+
+ rt2800_bbp_write(rt2x00dev, 4, 0x50);
+ rt2800_bbp_write(rt2x00dev, 47, 0x48);
+
+ rt2800_bbp_write(rt2x00dev, 86, 0x46);
+ rt2800_bbp_write(rt2x00dev, 88, 0x90);
+
+ rt2800_bbp_write(rt2x00dev, 92, 0x02);
+
+ rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+ rt2800_bbp_write(rt2x00dev, 104, 0x92);
+ rt2800_bbp_write(rt2x00dev, 105, 0x34);
+ rt2800_bbp_write(rt2x00dev, 106, 0x12);
+ rt2800_bbp_write(rt2x00dev, 120, 0x50);
+ rt2800_bbp_write(rt2x00dev, 137, 0x0f);
+ rt2800_bbp_write(rt2x00dev, 163, 0x9d);
+
+ /* Set ITxBF timeout to 0x9C40=1000msec */
+ rt2800_bbp_write(rt2x00dev, 179, 0x02);
+ rt2800_bbp_write(rt2x00dev, 180, 0x00);
+ rt2800_bbp_write(rt2x00dev, 182, 0x40);
+ rt2800_bbp_write(rt2x00dev, 180, 0x01);
+ rt2800_bbp_write(rt2x00dev, 182, 0x9c);
+
+ rt2800_bbp_write(rt2x00dev, 179, 0x00);
+
+ /* Reprogram the inband interface to put right values in RXWI */
+ rt2800_bbp_write(rt2x00dev, 142, 0x04);
+ rt2800_bbp_write(rt2x00dev, 143, 0x3b);
+ rt2800_bbp_write(rt2x00dev, 142, 0x06);
+ rt2800_bbp_write(rt2x00dev, 143, 0xa0);
+ rt2800_bbp_write(rt2x00dev, 142, 0x07);
+ rt2800_bbp_write(rt2x00dev, 143, 0xa1);
+ rt2800_bbp_write(rt2x00dev, 142, 0x08);
+ rt2800_bbp_write(rt2x00dev, 143, 0xa2);
+ rt2800_bbp_write(rt2x00dev, 148, 0xc8);
+}
+
static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
{
int ant, div_mode;
@@ -6735,6 +7088,9 @@ static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
case RT3593:
rt2800_init_bbp_3593(rt2x00dev);
return;
+ case RT3883:
+ rt2800_init_bbp_3883(rt2x00dev);
+ return;
case RT5390:
case RT5392:
rt2800_init_bbp_53xx(rt2x00dev);
@@ -7606,6 +7962,144 @@ static void rt2800_init_rfcsr_5350(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
}
+static void rt2800_init_rfcsr_3883(struct rt2x00_dev *rt2x00dev)
+{
+ u8 rfcsr;
+
+ /* TODO: get the actual ECO value from the SoC */
+ const unsigned int eco = 5;
+
+ rt2800_rf_init_calibration(rt2x00dev, 2);
+
+ rt2800_rfcsr_write(rt2x00dev, 0, 0xe0);
+ rt2800_rfcsr_write(rt2x00dev, 1, 0x03);
+ rt2800_rfcsr_write(rt2x00dev, 2, 0x50);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 4, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 5, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 6, 0x40);
+ rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 8, 0x5b);
+ rt2800_rfcsr_write(rt2x00dev, 9, 0x08);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0xd3);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x48);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x1a);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x12);
+ rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 16, 0x00);
+
+ /* RFCSR 17 will be initialized later based on the
+ * frequency offset stored in the EEPROM
+ */
+
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x40);
+ rt2800_rfcsr_write(rt2x00dev, 19, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 20, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 21, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 23, 0xc0);
+ rt2800_rfcsr_write(rt2x00dev, 24, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 34, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 35, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 37, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 38, 0x86);
+ rt2800_rfcsr_write(rt2x00dev, 39, 0x23);
+ rt2800_rfcsr_write(rt2x00dev, 40, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 41, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 42, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 43, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 44, 0x93);
+ rt2800_rfcsr_write(rt2x00dev, 45, 0xbb);
+ rt2800_rfcsr_write(rt2x00dev, 46, 0x60);
+ rt2800_rfcsr_write(rt2x00dev, 47, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 48, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 49, 0x8e);
+ rt2800_rfcsr_write(rt2x00dev, 50, 0x86);
+ rt2800_rfcsr_write(rt2x00dev, 51, 0x51);
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x05);
+ rt2800_rfcsr_write(rt2x00dev, 53, 0x76);
+ rt2800_rfcsr_write(rt2x00dev, 54, 0x76);
+ rt2800_rfcsr_write(rt2x00dev, 55, 0x76);
+ rt2800_rfcsr_write(rt2x00dev, 56, 0xdb);
+ rt2800_rfcsr_write(rt2x00dev, 57, 0x3e);
+ rt2800_rfcsr_write(rt2x00dev, 58, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 59, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 60, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 61, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 62, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 63, 0x00);
+
+ /* TODO: rx filter calibration? */
+
+ rt2800_bbp_write(rt2x00dev, 137, 0x0f);
+
+ rt2800_bbp_write(rt2x00dev, 163, 0x9d);
+
+ rt2800_bbp_write(rt2x00dev, 105, 0x05);
+
+ rt2800_bbp_write(rt2x00dev, 179, 0x02);
+ rt2800_bbp_write(rt2x00dev, 180, 0x00);
+ rt2800_bbp_write(rt2x00dev, 182, 0x40);
+ rt2800_bbp_write(rt2x00dev, 180, 0x01);
+ rt2800_bbp_write(rt2x00dev, 182, 0x9c);
+
+ rt2800_bbp_write(rt2x00dev, 179, 0x00);
+
+ rt2800_bbp_write(rt2x00dev, 142, 0x04);
+ rt2800_bbp_write(rt2x00dev, 143, 0x3b);
+ rt2800_bbp_write(rt2x00dev, 142, 0x06);
+ rt2800_bbp_write(rt2x00dev, 143, 0xa0);
+ rt2800_bbp_write(rt2x00dev, 142, 0x07);
+ rt2800_bbp_write(rt2x00dev, 143, 0xa1);
+ rt2800_bbp_write(rt2x00dev, 142, 0x08);
+ rt2800_bbp_write(rt2x00dev, 143, 0xa2);
+ rt2800_bbp_write(rt2x00dev, 148, 0xc8);
+
+ if (eco == 5) {
+ rt2800_rfcsr_write(rt2x00dev, 32, 0xd8);
+ rt2800_rfcsr_write(rt2x00dev, 33, 0x32);
+ }
+
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 2);
+ rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_BP, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
+ rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
+ msleep(1);
+ rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
+
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
+ rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 6);
+ rfcsr |= 0xc0;
+ rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
+
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 22);
+ rfcsr |= 0x20;
+ rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
+
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 46);
+ rfcsr |= 0x20;
+ rt2800_rfcsr_write(rt2x00dev, 46, rfcsr);
+
+ rfcsr = rt2800_rfcsr_read(rt2x00dev, 20);
+ rfcsr &= ~0xee;
+ rt2800_rfcsr_write(rt2x00dev, 20, rfcsr);
+}
+
static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
{
rt2800_rf_init_calibration(rt2x00dev, 2);
@@ -8448,6 +8942,9 @@ static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
case RT3390:
rt2800_init_rfcsr_3390(rt2x00dev);
break;
+ case RT3883:
+ rt2800_init_rfcsr_3883(rt2x00dev);
+ break;
case RT3572:
rt2800_init_rfcsr_3572(rt2x00dev);
break;
@@ -8653,7 +9150,8 @@ static u8 rt2800_get_txmixer_gain_24g(struct rt2x00_dev *rt2x00dev)
{
u16 word;
- if (rt2x00_rt(rt2x00dev, RT3593))
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883))
return 0;
word = rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG);
@@ -8667,7 +9165,8 @@ static u8 rt2800_get_txmixer_gain_5g(struct rt2x00_dev *rt2x00dev)
{
u16 word;
- if (rt2x00_rt(rt2x00dev, RT3593))
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883))
return 0;
word = rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_A);
@@ -8773,7 +9272,8 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
word = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_BG2_OFFSET2, 0);
- if (!rt2x00_rt(rt2x00dev, RT3593)) {
+ if (!rt2x00_rt(rt2x00dev, RT3593) &&
+ !rt2x00_rt(rt2x00dev, RT3883)) {
if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 ||
rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff)
rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1,
@@ -8793,7 +9293,8 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
word = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A2_OFFSET2)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_A2_OFFSET2, 0);
- if (!rt2x00_rt(rt2x00dev, RT3593)) {
+ if (!rt2x00_rt(rt2x00dev, RT3593) &&
+ !rt2x00_rt(rt2x00dev, RT3883)) {
if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 ||
rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff)
rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2,
@@ -8801,7 +9302,8 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
}
rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
- if (rt2x00_rt(rt2x00dev, RT3593)) {
+ if (rt2x00_rt(rt2x00dev, RT3593) ||
+ rt2x00_rt(rt2x00dev, RT3883)) {
word = rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2);
if (rt2x00_get_field16(word, EEPROM_EXT_LNA2_A1) == 0x00 ||
rt2x00_get_field16(word, EEPROM_EXT_LNA2_A1) == 0xff)
@@ -8840,6 +9342,8 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
rf = rt2800_eeprom_read(rt2x00dev, EEPROM_CHIP_ID);
else if (rt2x00_rt(rt2x00dev, RT3352))
rf = RF3322;
+ else if (rt2x00_rt(rt2x00dev, RT3883))
+ rf = RF3853;
else if (rt2x00_rt(rt2x00dev, RT5350))
rf = RF5350;
else
@@ -8860,6 +9364,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
case RF3290:
case RF3320:
case RF3322:
+ case RF3853:
case RF5350:
case RF5360:
case RF5362:
@@ -9146,6 +9651,66 @@ static const struct rf_channel rf_vals_3x_xtal20[] = {
{14, 0xF0, 2, 0x18},
};
+static const struct rf_channel rf_vals_3853[] = {
+ {1, 241, 6, 2},
+ {2, 241, 6, 7},
+ {3, 242, 6, 2},
+ {4, 242, 6, 7},
+ {5, 243, 6, 2},
+ {6, 243, 6, 7},
+ {7, 244, 6, 2},
+ {8, 244, 6, 7},
+ {9, 245, 6, 2},
+ {10, 245, 6, 7},
+ {11, 246, 6, 2},
+ {12, 246, 6, 7},
+ {13, 247, 6, 2},
+ {14, 248, 6, 4},
+
+ {36, 0x56, 8, 4},
+ {38, 0x56, 8, 6},
+ {40, 0x56, 8, 8},
+ {44, 0x57, 8, 0},
+ {46, 0x57, 8, 2},
+ {48, 0x57, 8, 4},
+ {52, 0x57, 8, 8},
+ {54, 0x57, 8, 10},
+ {56, 0x58, 8, 0},
+ {60, 0x58, 8, 4},
+ {62, 0x58, 8, 6},
+ {64, 0x58, 8, 8},
+
+ {100, 0x5b, 8, 8},
+ {102, 0x5b, 8, 10},
+ {104, 0x5c, 8, 0},
+ {108, 0x5c, 8, 4},
+ {110, 0x5c, 8, 6},
+ {112, 0x5c, 8, 8},
+ {114, 0x5c, 8, 10},
+ {116, 0x5d, 8, 0},
+ {118, 0x5d, 8, 2},
+ {120, 0x5d, 8, 4},
+ {124, 0x5d, 8, 8},
+ {126, 0x5d, 8, 10},
+ {128, 0x5e, 8, 0},
+ {132, 0x5e, 8, 4},
+ {134, 0x5e, 8, 6},
+ {136, 0x5e, 8, 8},
+ {140, 0x5f, 8, 0},
+
+ {149, 0x5f, 8, 9},
+ {151, 0x5f, 8, 11},
+ {153, 0x60, 8, 1},
+ {157, 0x60, 8, 5},
+ {159, 0x60, 8, 7},
+ {161, 0x60, 8, 9},
+ {165, 0x61, 8, 1},
+ {167, 0x61, 8, 3},
+ {169, 0x61, 8, 5},
+ {171, 0x61, 8, 7},
+ {173, 0x61, 8, 9},
+};
+
static const struct rf_channel rf_vals_5592_xtal20[] = {
/* Channel, N, K, mod, R */
{1, 482, 4, 10, 3},
@@ -9409,6 +9974,11 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->channels = rf_vals_3x;
break;
+ case RF3853:
+ spec->num_channels = ARRAY_SIZE(rf_vals_3853);
+ spec->channels = rf_vals_3853;
+ break;
+
case RF5592:
reg = rt2800_register_read(rt2x00dev, MAC_DEBUG_INDEX);
if (rt2x00_get_field32(reg, MAC_DEBUG_INDEX_XTAL)) {
@@ -9528,6 +10098,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
case RF3053:
case RF3070:
case RF3290:
+ case RF3853:
case RF5350:
case RF5360:
case RF5362:
@@ -9570,6 +10141,7 @@ static int rt2800_probe_rt(struct rt2x00_dev *rt2x00dev)
case RT3390:
case RT3572:
case RT3593:
+ case RT3883:
case RT5350:
case RT5390:
case RT5392:
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
index 0dff2c7b3010..759eab2b70c3 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h
@@ -195,9 +195,10 @@ void rt2800_process_rxwi(struct queue_entry *entry, struct rxdone_entry_desc *tx
void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi,
bool match);
-void rt2800_txdone(struct rt2x00_dev *rt2x00dev);
+void rt2800_txdone(struct rt2x00_dev *rt2x00dev, unsigned int quota);
void rt2800_txdone_nostatus(struct rt2x00_dev *rt2x00dev);
bool rt2800_txstatus_timeout(struct rt2x00_dev *rt2x00dev);
+bool rt2800_txstatus_pending(struct rt2x00_dev *rt2x00dev);
void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc);
void rt2800_clear_beacon(struct queue_entry *entry);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
index ddb88cfeace2..ecc4c9332ec7 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
@@ -255,26 +255,12 @@ void rt2800mmio_autowake_tasklet(unsigned long data)
}
EXPORT_SYMBOL_GPL(rt2800mmio_autowake_tasklet);
-static void rt2800mmio_txdone(struct rt2x00_dev *rt2x00dev)
-{
- bool timeout = false;
-
- while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo) ||
- (timeout = rt2800_txstatus_timeout(rt2x00dev))) {
-
- rt2800_txdone(rt2x00dev);
-
- if (timeout)
- rt2800_txdone_nostatus(rt2x00dev);
- }
-}
-
-static bool rt2800mmio_fetch_txstatus(struct rt2x00_dev *rt2x00dev)
+static void rt2800mmio_fetch_txstatus(struct rt2x00_dev *rt2x00dev)
{
u32 status;
- bool more = false;
+ unsigned long flags;
- /* FIXEME: rewrite this comment
+ /*
* The TX_FIFO_STATUS interrupt needs special care. We should
* read TX_STA_FIFO but we should do it immediately as otherwise
* the register can overflow and we would lose status reports.
@@ -285,34 +271,32 @@ static bool rt2800mmio_fetch_txstatus(struct rt2x00_dev *rt2x00dev)
* because we can schedule the tasklet multiple times (when the
* interrupt fires again during tx status processing).
*
- * txstatus tasklet is called with INT_SOURCE_CSR_TX_FIFO_STATUS
- * disabled so have only one producer and one consumer - we don't
- * need to lock the kfifo.
+ * We also read statuses from tx status timeout timer, use
+ * lock to prevent concurent writes to fifo.
*/
+
+ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
+
while (!kfifo_is_full(&rt2x00dev->txstatus_fifo)) {
status = rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO);
if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
break;
kfifo_put(&rt2x00dev->txstatus_fifo, status);
- more = true;
}
- return more;
+ spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
}
void rt2800mmio_txstatus_tasklet(unsigned long data)
{
struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
- do {
- rt2800mmio_txdone(rt2x00dev);
+ rt2800_txdone(rt2x00dev, 16);
- } while (rt2800mmio_fetch_txstatus(rt2x00dev));
+ if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo))
+ tasklet_schedule(&rt2x00dev->txstatus_tasklet);
- if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
- rt2800mmio_enable_interrupt(rt2x00dev,
- INT_SOURCE_CSR_TX_FIFO_STATUS);
}
EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet);
@@ -339,8 +323,10 @@ irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance)
mask = ~reg;
if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) {
+ rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1);
rt2800mmio_fetch_txstatus(rt2x00dev);
- tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+ if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo))
+ tasklet_schedule(&rt2x00dev->txstatus_tasklet);
}
if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT))
@@ -440,6 +426,9 @@ void rt2800mmio_start_queue(struct data_queue *queue)
}
EXPORT_SYMBOL_GPL(rt2800mmio_start_queue);
+/* 200 ms */
+#define TXSTATUS_TIMEOUT 200000000
+
void rt2800mmio_kick_queue(struct data_queue *queue)
{
struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
@@ -454,6 +443,8 @@ void rt2800mmio_kick_queue(struct data_queue *queue)
entry = rt2x00queue_get_entry(queue, Q_INDEX);
rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid),
entry->entry_idx);
+ hrtimer_start(&rt2x00dev->txstatus_timer,
+ TXSTATUS_TIMEOUT, HRTIMER_MODE_REL);
break;
case QID_MGMT:
entry = rt2x00queue_get_entry(queue, Q_INDEX);
@@ -498,11 +489,8 @@ void rt2800mmio_flush_queue(struct data_queue *queue, bool drop)
* For TX queues schedule completion tasklet to catch
* tx status timeouts, othewise just wait.
*/
- if (tx_queue) {
- tasklet_disable(&rt2x00dev->txstatus_tasklet);
- rt2800mmio_txdone(rt2x00dev);
- tasklet_enable(&rt2x00dev->txstatus_tasklet);
- }
+ if (tx_queue)
+ queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
/*
* Wait for a little while to give the driver
@@ -640,6 +628,10 @@ void rt2800mmio_clear_entry(struct queue_entry *entry)
word = rt2x00_desc_read(entry_priv->desc, 1);
rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
rt2x00_desc_write(entry_priv->desc, 1, word);
+
+ /* If last entry stop txstatus timer */
+ if (entry->queue->length == 1)
+ hrtimer_cancel(&rt2x00dev->txstatus_timer);
}
}
EXPORT_SYMBOL_GPL(rt2800mmio_clear_entry);
@@ -772,6 +764,70 @@ int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev)
}
EXPORT_SYMBOL_GPL(rt2800mmio_enable_radio);
+static void rt2800mmio_work_txdone(struct work_struct *work)
+{
+ struct rt2x00_dev *rt2x00dev =
+ container_of(work, struct rt2x00_dev, txdone_work);
+
+ if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ return;
+
+ while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo) ||
+ rt2800_txstatus_timeout(rt2x00dev)) {
+
+ tasklet_disable(&rt2x00dev->txstatus_tasklet);
+ rt2800_txdone(rt2x00dev, UINT_MAX);
+ rt2800_txdone_nostatus(rt2x00dev);
+ tasklet_enable(&rt2x00dev->txstatus_tasklet);
+ }
+
+ if (rt2800_txstatus_pending(rt2x00dev))
+ hrtimer_start(&rt2x00dev->txstatus_timer,
+ TXSTATUS_TIMEOUT, HRTIMER_MODE_REL);
+}
+
+static enum hrtimer_restart rt2800mmio_tx_sta_fifo_timeout(struct hrtimer *timer)
+{
+ struct rt2x00_dev *rt2x00dev =
+ container_of(timer, struct rt2x00_dev, txstatus_timer);
+
+ if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
+ goto out;
+
+ if (!rt2800_txstatus_pending(rt2x00dev))
+ goto out;
+
+ rt2800mmio_fetch_txstatus(rt2x00dev);
+ if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo))
+ tasklet_schedule(&rt2x00dev->txstatus_tasklet);
+ else
+ queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
+out:
+ return HRTIMER_NORESTART;
+}
+
+int rt2800mmio_probe_hw(struct rt2x00_dev *rt2x00dev)
+{
+ int retval;
+
+ retval = rt2800_probe_hw(rt2x00dev);
+ if (retval)
+ return retval;
+
+ /*
+ * Set txstatus timer function.
+ */
+ rt2x00dev->txstatus_timer.function = rt2800mmio_tx_sta_fifo_timeout;
+
+ /*
+ * Overwrite TX done handler
+ */
+ INIT_WORK(&rt2x00dev->txdone_work, rt2800mmio_work_txdone);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt2800mmio_probe_hw);
+
MODULE_AUTHOR(DRV_PROJECT);
MODULE_VERSION(DRV_VERSION);
MODULE_DESCRIPTION("rt2800 MMIO library");
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h
index 3a513273f414..ca58e6c3a4e5 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h
@@ -153,6 +153,7 @@ void rt2800mmio_stop_queue(struct data_queue *queue);
void rt2800mmio_queue_init(struct data_queue *queue);
/* Initialization functions */
+int rt2800mmio_probe_hw(struct rt2x00_dev *rt2x00dev);
bool rt2800mmio_get_entry_state(struct queue_entry *entry);
void rt2800mmio_clear_entry(struct queue_entry *entry);
int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
index 0291441ac548..43e1b1ee96bf 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800pci.c
@@ -346,7 +346,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
.tbtt_tasklet = rt2800mmio_tbtt_tasklet,
.rxdone_tasklet = rt2800mmio_rxdone_tasklet,
.autowake_tasklet = rt2800mmio_autowake_tasklet,
- .probe_hw = rt2800_probe_hw,
+ .probe_hw = rt2800mmio_probe_hw,
.get_firmware_name = rt2800pci_get_firmware_name,
.check_firmware = rt2800_check_firmware,
.load_firmware = rt2800_load_firmware,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
index a502816214ab..4e9e38771a56 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800soc.c
@@ -51,9 +51,16 @@ static bool rt2800soc_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev)
static void rt2800soc_disable_radio(struct rt2x00_dev *rt2x00dev)
{
+ u32 reg;
+
rt2800_disable_radio(rt2x00dev);
rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0);
- rt2x00mmio_register_write(rt2x00dev, TX_PIN_CFG, 0);
+
+ reg = 0;
+ if (rt2x00_rt(rt2x00dev, RT3883))
+ rt2x00_set_field32(&reg, TX_PIN_CFG_RFTR_EN, 1);
+
+ rt2x00mmio_register_write(rt2x00dev, TX_PIN_CFG, reg);
}
static int rt2800soc_set_device_state(struct rt2x00_dev *rt2x00dev,
@@ -185,7 +192,7 @@ static const struct rt2x00lib_ops rt2800soc_rt2x00_ops = {
.tbtt_tasklet = rt2800mmio_tbtt_tasklet,
.rxdone_tasklet = rt2800mmio_rxdone_tasklet,
.autowake_tasklet = rt2800mmio_autowake_tasklet,
- .probe_hw = rt2800_probe_hw,
+ .probe_hw = rt2800mmio_probe_hw,
.get_firmware_name = rt2800soc_get_firmware_name,
.check_firmware = rt2800soc_check_firmware,
.load_firmware = rt2800soc_load_firmware,
@@ -203,7 +210,7 @@ static const struct rt2x00lib_ops rt2800soc_rt2x00_ops = {
.start_queue = rt2800mmio_start_queue,
.kick_queue = rt2800mmio_kick_queue,
.stop_queue = rt2800mmio_stop_queue,
- .flush_queue = rt2x00mmio_flush_queue,
+ .flush_queue = rt2800mmio_flush_queue,
.write_tx_desc = rt2800mmio_write_tx_desc,
.write_tx_data = rt2800_write_tx_data,
.write_beacon = rt2800_write_beacon,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index 19eabf16147b..b5f75df9b563 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -100,22 +100,6 @@ static void rt2800usb_stop_queue(struct data_queue *queue)
}
}
-/*
- * test if there is an entry in any TX queue for which DMA is done
- * but the TX status has not been returned yet
- */
-static bool rt2800usb_txstatus_pending(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue;
-
- tx_queue_for_each(rt2x00dev, queue) {
- if (rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE) !=
- rt2x00queue_get_entry(queue, Q_INDEX_DONE))
- return true;
- }
- return false;
-}
-
#define TXSTATUS_READ_INTERVAL 1000000
static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
@@ -145,7 +129,7 @@ static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev,
if (rt2800_txstatus_timeout(rt2x00dev))
queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
- if (rt2800usb_txstatus_pending(rt2x00dev)) {
+ if (rt2800_txstatus_pending(rt2x00dev)) {
/* Read register after 1 ms */
hrtimer_start(&rt2x00dev->txstatus_timer,
TXSTATUS_READ_INTERVAL,
@@ -160,7 +144,7 @@ stop_reading:
* clear_bit someone could do rt2x00usb_interrupt_txdone, so recheck
* here again if status reading is needed.
*/
- if (rt2800usb_txstatus_pending(rt2x00dev) &&
+ if (rt2800_txstatus_pending(rt2x00dev) &&
!test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags))
return true;
else
@@ -480,7 +464,7 @@ static void rt2800usb_work_txdone(struct work_struct *work)
while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo) ||
rt2800_txstatus_timeout(rt2x00dev)) {
- rt2800_txdone(rt2x00dev);
+ rt2800_txdone(rt2x00dev, UINT_MAX);
rt2800_txdone_nostatus(rt2x00dev);
@@ -489,7 +473,7 @@ static void rt2800usb_work_txdone(struct work_struct *work)
* if the medium is busy, thus the TX_STA_FIFO entry is
* also delayed -> use a timer to retrieve it.
*/
- if (rt2800usb_txstatus_pending(rt2x00dev))
+ if (rt2800_txstatus_pending(rt2x00dev))
rt2800usb_async_read_tx_status(rt2x00dev);
}
}
@@ -562,13 +546,13 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
* stripped it from the frame. Signal this to mac80211.
*/
rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
-
+
if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) {
rxdesc->flags |= RX_FLAG_DECRYPTED;
} else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) {
/*
* In order to check the Michael Mic, the packet must have
- * been decrypted. Mac80211 doesnt check the MMIC failure
+ * been decrypted. Mac80211 doesnt check the MMIC failure
* flag to initiate MMIC countermeasures if the decoded flag
* has not been set.
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 50b92ca92bd7..9c6ef0ca932b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -69,10 +69,10 @@
printk(KERN_ERR KBUILD_MODNAME ": %s: Error - " fmt, \
__func__, ##__VA_ARGS__)
#define rt2x00_err(dev, fmt, ...) \
- wiphy_err((dev)->hw->wiphy, "%s: Error - " fmt, \
+ wiphy_err_ratelimited((dev)->hw->wiphy, "%s: Error - " fmt, \
__func__, ##__VA_ARGS__)
#define rt2x00_warn(dev, fmt, ...) \
- wiphy_warn((dev)->hw->wiphy, "%s: Warning - " fmt, \
+ wiphy_warn_ratelimited((dev)->hw->wiphy, "%s: Warning - " fmt, \
__func__, ##__VA_ARGS__)
#define rt2x00_info(dev, fmt, ...) \
wiphy_info((dev)->hw->wiphy, "%s: Info - " fmt, \
@@ -980,8 +980,6 @@ struct rt2x00_dev {
*/
DECLARE_KFIFO_PTR(txstatus_fifo, u32);
- unsigned long last_nostatus_check;
-
/*
* Timer to ensure tx status reports are read (rt2800usb).
*/
@@ -1016,6 +1014,7 @@ struct rt2x00_dev {
unsigned int extra_tx_headroom;
struct usb_anchor *anchor;
+ unsigned int num_proto_errs;
/* Clock for System On Chip devices. */
struct clk *clk;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 357c0941aaad..1b08b01db27b 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1007,7 +1007,7 @@ void rt2x00lib_set_mac_address(struct rt2x00_dev *rt2x00dev, u8 *eeprom_mac_addr
const char *mac_addr;
mac_addr = of_get_mac_address(rt2x00dev->dev->of_node);
- if (mac_addr)
+ if (!IS_ERR(mac_addr))
ether_addr_copy(eeprom_mac_addr, mac_addr);
if (!is_valid_ether_addr(eeprom_mac_addr)) {
@@ -1391,6 +1391,8 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
mutex_init(&rt2x00dev->conf_mutex);
INIT_LIST_HEAD(&rt2x00dev->bar_list);
spin_lock_init(&rt2x00dev->bar_list_lock);
+ hrtimer_init(&rt2x00dev->txstatus_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
@@ -1515,6 +1517,8 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
cancel_work_sync(&rt2x00dev->sleep_work);
+ hrtimer_cancel(&rt2x00dev->txstatus_timer);
+
/*
* Kill the tx status tasklet.
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.h b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.h
index 184a4148b2f8..03e6cdb0b5a4 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mmio.h
@@ -80,8 +80,6 @@ int rt2x00mmio_regbusy_read(struct rt2x00_dev *rt2x00dev,
*
* @desc: Pointer to device descriptor
* @desc_dma: DMA pointer to &desc.
- * @data: Pointer to device's entry memory.
- * @data_dma: DMA pointer to &data.
*/
struct queue_entry_priv_mmio {
__le32 *desc;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index 4834b4eb0206..03b206440208 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -674,7 +674,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
spin_lock(&queue->tx_lock);
if (unlikely(rt2x00queue_full(queue))) {
- rt2x00_err(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n",
+ rt2x00_dbg(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n",
queue->qid);
ret = -ENOBUFS;
goto out;
@@ -1042,7 +1042,6 @@ void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
*/
tx_queue_for_each(rt2x00dev, queue)
rt2x00queue_start_queue(queue);
- rt2x00dev->last_nostatus_check = jiffies;
rt2x00queue_start_queue(rt2x00dev->rx);
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h
index a15bae29917b..20113f861b9e 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.h
@@ -361,7 +361,6 @@ enum queue_entry_flags {
ENTRY_DATA_PENDING,
ENTRY_DATA_IO_FAILED,
ENTRY_DATA_STATUS_PENDING,
- ENTRY_DATA_STATUS_SET,
};
/**
@@ -387,8 +386,6 @@ struct queue_entry {
unsigned int entry_idx;
- u32 status;
-
void *priv_data;
};
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 086aad22743d..9cdd7f2c92b5 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -31,6 +31,22 @@
#include "rt2x00.h"
#include "rt2x00usb.h"
+static bool rt2x00usb_check_usb_error(struct rt2x00_dev *rt2x00dev, int status)
+{
+ if (status == -ENODEV || status == -ENOENT)
+ return true;
+
+ if (status == -EPROTO || status == -ETIMEDOUT)
+ rt2x00dev->num_proto_errs++;
+ else
+ rt2x00dev->num_proto_errs = 0;
+
+ if (rt2x00dev->num_proto_errs > 3)
+ return true;
+
+ return false;
+}
+
/*
* Interfacing with the HW.
*/
@@ -57,7 +73,7 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
if (status >= 0)
return 0;
- if (status == -ENODEV || status == -ENOENT) {
+ if (rt2x00usb_check_usb_error(rt2x00dev, status)) {
/* Device has disappeared. */
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
break;
@@ -321,7 +337,7 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
if (status) {
- if (status == -ENODEV || status == -ENOENT)
+ if (rt2x00usb_check_usb_error(rt2x00dev, status))
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
rt2x00lib_dmadone(entry);
@@ -410,7 +426,7 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
if (status) {
- if (status == -ENODEV || status == -ENOENT)
+ if (rt2x00usb_check_usb_error(rt2x00dev, status))
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
rt2x00lib_dmadone(entry);
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 44a943d18b84..ee4d8106bba5 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2795,6 +2795,8 @@ static int __init init_ray_cs(void)
rc = pcmcia_register_driver(&ray_driver);
pr_debug("raylink init_module register_pcmcia_driver returns 0x%x\n",
rc);
+ if (rc)
+ return rc;
#ifdef CONFIG_PROC_FS
proc_mkdir("driver/ray_cs", NULL);
@@ -2818,11 +2820,7 @@ static void __exit exit_ray_cs(void)
pr_debug("ray_cs: cleanup_module\n");
#ifdef CONFIG_PROC_FS
- remove_proc_entry("driver/ray_cs/ray_cs", NULL);
- remove_proc_entry("driver/ray_cs/essid", NULL);
- remove_proc_entry("driver/ray_cs/net_type", NULL);
- remove_proc_entry("driver/ray_cs/translate", NULL);
- remove_proc_entry("driver/ray_cs", NULL);
+ remove_proc_subtree("driver/ray_cs", NULL);
#endif
pcmcia_unregister_driver(&ray_driver);
diff --git a/drivers/net/wireless/realtek/Kconfig b/drivers/net/wireless/realtek/Kconfig
index 3db988e689d7..9189fd672578 100644
--- a/drivers/net/wireless/realtek/Kconfig
+++ b/drivers/net/wireless/realtek/Kconfig
@@ -14,5 +14,6 @@ if WLAN_VENDOR_REALTEK
source "drivers/net/wireless/realtek/rtl818x/Kconfig"
source "drivers/net/wireless/realtek/rtlwifi/Kconfig"
source "drivers/net/wireless/realtek/rtl8xxxu/Kconfig"
+source "drivers/net/wireless/realtek/rtw88/Kconfig"
endif # WLAN_VENDOR_REALTEK
diff --git a/drivers/net/wireless/realtek/Makefile b/drivers/net/wireless/realtek/Makefile
index 9c78deb5eea9..118af9963d61 100644
--- a/drivers/net/wireless/realtek/Makefile
+++ b/drivers/net/wireless/realtek/Makefile
@@ -6,4 +6,5 @@ obj-$(CONFIG_RTL8180) += rtl818x/
obj-$(CONFIG_RTL8187) += rtl818x/
obj-$(CONFIG_RTLWIFI) += rtlwifi/
obj-$(CONFIG_RTL8XXXU) += rtl8xxxu/
+obj-$(CONFIG_RTW88) += rtw88/
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 217d2a7a43c7..ac746c322554 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -448,6 +448,11 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
/* <2> work queue */
rtlpriv->works.hw = hw;
rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
+ if (unlikely(!rtlpriv->works.rtl_wq)) {
+ pr_err("Failed to allocate work queue\n");
+ return;
+ }
+
INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
(void *)rtl_watchdog_wq_callback);
INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 48ca52102cef..4055e0ab75ba 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -499,16 +499,16 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
- spin_lock_bh(&rtlpriv->locks.waitq_lock);
+ spin_lock(&rtlpriv->locks.waitq_lock);
if (!skb_queue_empty(&mac->skb_waitq[tid]) &&
(ring->entries - skb_queue_len(&ring->queue) >
rtlhal->max_earlymode_num)) {
skb = skb_dequeue(&mac->skb_waitq[tid]);
} else {
- spin_unlock_bh(&rtlpriv->locks.waitq_lock);
+ spin_unlock(&rtlpriv->locks.waitq_lock);
break;
}
- spin_unlock_bh(&rtlpriv->locks.waitq_lock);
+ spin_unlock(&rtlpriv->locks.waitq_lock);
/* Some macaddr can't do early mode. like
* multicast/broadcast/no_qos data
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
index 203e7b574e84..e2e0bfbc24fe 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
@@ -600,6 +600,8 @@ void rtl88e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
u1rsvdpageloc, 3);
skb = dev_alloc_skb(totalpacketlen);
+ if (!skb)
+ return;
skb_put_data(skb, &reserved_page_packet, totalpacketlen);
rtstatus = rtl_cmd_send_packet(hw, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
index 106011a24827..483dc8bdc555 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
@@ -372,8 +372,9 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rx_fwinfo_88e *p_drvinfo;
struct ieee80211_hdr *hdr;
-
+ u8 wake_match;
u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+
status->packet_report_type = (u8)GET_RX_STATUS_DESC_RPT_SEL(pdesc);
if (status->packet_report_type == TX_REPORT2)
status->length = (u16)GET_RX_RPT2_DESC_PKT_LEN(pdesc);
@@ -399,18 +400,18 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
status->is_cck = RTL8188_RX_HAL_IS_CCK_RATE(status->rate);
status->macid = GET_RX_DESC_MACID(pdesc);
- if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
- status->wake_match = BIT(2);
+ if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc))
+ wake_match = BIT(2);
else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
- status->wake_match = BIT(1);
+ wake_match = BIT(1);
else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc))
- status->wake_match = BIT(0);
+ wake_match = BIT(0);
else
- status->wake_match = 0;
- if (status->wake_match)
+ wake_match = 0;
+ if (wake_match)
RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
"GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
- status->wake_match);
+ wake_match);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
index 18c76990a089..86b1b88cc4ed 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
@@ -623,6 +623,8 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
u1rsvdpageloc, 3);
skb = dev_alloc_skb(totalpacketlen);
+ if (!skb)
+ return;
skb_put_data(skb, &reserved_page_packet, totalpacketlen);
if (cmd_send_packet)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
index f3a336e0ea98..d259794a308b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
@@ -42,12 +42,9 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u32 tx_agc[2] = { 0, 0 }, tmpval = 0;
- bool turbo_scanoff = false;
u8 idx1, idx2;
u8 *ptr;
- if ((rtlefuse->eeprom_regulatory != 0) || (rtlefuse->external_pa))
- turbo_scanoff = true;
if (mac->act_scanning) {
tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
index 7c5b54b71a92..67305ce915ec 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
@@ -744,6 +744,8 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
u1rsvdpageloc, 3);
skb = dev_alloc_skb(totalpacketlen);
+ if (!skb)
+ return;
skb_put_data(skb, &reserved_page_packet, totalpacketlen);
rtstatus = rtl_cmd_send_packet(hw, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index 09cf8180e4ff..d297cfc0fd2b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -331,6 +331,7 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw,
struct rx_fwinfo *p_drvinfo;
struct ieee80211_hdr *hdr;
u32 phystatus = GET_RX_DESC_PHYST(pdesc);
+ u8 wake_match;
if (GET_RX_STATUS_DESC_RPT_SEL(pdesc) == 0)
status->packet_report_type = NORMAL_RX;
@@ -350,18 +351,18 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw,
status->is_cck = RTL92EE_RX_HAL_IS_CCK_RATE(status->rate);
status->macid = GET_RX_DESC_MACID(pdesc);
- if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
- status->wake_match = BIT(2);
+ if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc))
+ wake_match = BIT(2);
else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
- status->wake_match = BIT(1);
+ wake_match = BIT(1);
else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc))
- status->wake_match = BIT(0);
+ wake_match = BIT(0);
else
- status->wake_match = 0;
- if (status->wake_match)
+ wake_match = 0;
+ if (wake_match)
RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
"GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
- status->wake_match);
+ wake_match);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
index 514891ea2c64..d8260c7afe09 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
@@ -663,7 +663,7 @@ void rtl8723e_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
}
-void rtl8723e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
+static void rtl8723e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
index be451a6f7dbe..33481232fad0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
@@ -448,6 +448,8 @@ void rtl8723e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
u1rsvdpageloc, 3);
skb = dev_alloc_skb(totalpacketlen);
+ if (!skb)
+ return;
skb_put_data(skb, &reserved_page_packet, totalpacketlen);
rtstatus = rtl_cmd_send_packet(hw, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index 6bab162e1bb8..655460f61bbc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -1675,6 +1675,7 @@ static void _rtl8723e_read_adapter_info(struct ieee80211_hw *hw,
rtlhal->oem_id = RT_CID_819X_LENOVO;
break;
}
+ break;
case 0x1025:
rtlhal->oem_id = RT_CID_819X_ACER;
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
index 4d7fa27f55ca..aa56058af56e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
@@ -562,6 +562,8 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
u1rsvdpageloc, sizeof(u1rsvdpageloc));
skb = dev_alloc_skb(totalpacketlen);
+ if (!skb)
+ return;
skb_put_data(skb, &reserved_page_packet, totalpacketlen);
rtstatus = rtl_cmd_send_packet(hw, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
index 9ada9a06c6ea..d87ba03fe78f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
@@ -300,7 +300,7 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rx_fwinfo_8723be *p_drvinfo;
struct ieee80211_hdr *hdr;
-
+ u8 wake_match;
u32 phystatus = GET_RX_DESC_PHYST(pdesc);
status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc);
@@ -329,18 +329,18 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
status->packet_report_type = NORMAL_RX;
- if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
- status->wake_match = BIT(2);
+ if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc))
+ wake_match = BIT(2);
else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
- status->wake_match = BIT(1);
+ wake_match = BIT(1);
else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc))
- status->wake_match = BIT(0);
+ wake_match = BIT(0);
else
- status->wake_match = 0;
- if (status->wake_match)
+ wake_match = 0;
+ if (wake_match)
RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
"GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
- status->wake_match);
+ wake_match);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
index dc0eb692088f..fe32d397d287 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
@@ -1623,6 +1623,8 @@ out:
&reserved_page_packet_8812[0], totalpacketlen);
skb = dev_alloc_skb(totalpacketlen);
+ if (!skb)
+ return;
skb_put_data(skb, &reserved_page_packet_8812, totalpacketlen);
rtstatus = rtl_cmd_send_packet(hw, skb);
@@ -1759,6 +1761,8 @@ out:
&reserved_page_packet_8821[0], totalpacketlen);
skb = dev_alloc_skb(totalpacketlen);
+ if (!skb)
+ return;
skb_put_data(skb, &reserved_page_packet_8821, totalpacketlen);
rtstatus = rtl_cmd_send_packet(hw, skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
index db5e628b17ed..7b6faf38e09c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
@@ -436,7 +436,7 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rx_fwinfo_8821ae *p_drvinfo;
struct ieee80211_hdr *hdr;
-
+ u8 wake_match;
u32 phystatus = GET_RX_DESC_PHYST(pdesc);
status->length = (u16)GET_RX_DESC_PKT_LEN(pdesc);
@@ -473,18 +473,18 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
status->packet_report_type = NORMAL_RX;
if (GET_RX_STATUS_DESC_PATTERN_MATCH(pdesc))
- status->wake_match = BIT(2);
+ wake_match = BIT(2);
else if (GET_RX_STATUS_DESC_MAGIC_MATCH(pdesc))
- status->wake_match = BIT(1);
+ wake_match = BIT(1);
else if (GET_RX_STATUS_DESC_UNICAST_MATCH(pdesc))
- status->wake_match = BIT(0);
+ wake_match = BIT(0);
else
- status->wake_match = 0;
+ wake_match = 0;
- if (status->wake_match)
+ if (wake_match)
RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
"GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
- status->wake_match);
+ wake_match);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index e32e9ffa3192..518aaa875361 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -2138,7 +2138,6 @@ struct rtl_stats {
u8 packet_report_type;
u32 macid;
- u8 wake_match;
u32 bt_rx_rssi_percentage;
u32 macid_valid_entry[2];
};
diff --git a/drivers/net/wireless/realtek/rtw88/Kconfig b/drivers/net/wireless/realtek/rtw88/Kconfig
new file mode 100644
index 000000000000..55b1bf3dd9b6
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/Kconfig
@@ -0,0 +1,54 @@
+menuconfig RTW88
+ tristate "Realtek 802.11ac wireless chips support"
+ depends on MAC80211
+ help
+ This module adds support for mac80211-based wireless drivers that
+ enables Realtek IEEE 802.11ac wireless chipsets.
+
+ If you choose to build a module, it'll be called rtw88.
+
+if RTW88
+
+config RTW88_CORE
+ tristate
+
+config RTW88_PCI
+ tristate
+
+config RTW88_8822BE
+ bool "Realtek 8822BE PCI wireless network adapter"
+ depends on PCI
+ select RTW88_CORE
+ select RTW88_PCI
+ help
+ Select this option will enable support for 8822BE chipset
+
+ 802.11ac PCIe wireless network adapter
+
+config RTW88_8822CE
+ bool "Realtek 8822CE PCI wireless network adapter"
+ depends on PCI
+ select RTW88_CORE
+ select RTW88_PCI
+ help
+ Select this option will enable support for 8822CE chipset
+
+ 802.11ac PCIe wireless network adapter
+
+config RTW88_DEBUG
+ bool "Realtek rtw88 debug support"
+ depends on RTW88_CORE
+ help
+ Enable debug support
+
+ If unsure, say Y to simplify debug problems
+
+config RTW88_DEBUGFS
+ bool "Realtek rtw88 debugfs support"
+ depends on RTW88_CORE
+ help
+ Enable debug support
+
+ If unsure, say Y to simplify debug problems
+
+endif
diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile
new file mode 100644
index 000000000000..e0bfefd154af
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+obj-$(CONFIG_RTW88_CORE) += rtw88.o
+rtw88-y += main.o \
+ mac80211.o \
+ util.o \
+ debug.o \
+ tx.o \
+ rx.o \
+ mac.o \
+ phy.o \
+ efuse.o \
+ fw.o \
+ ps.o \
+ sec.o \
+ regd.o
+
+rtw88-$(CONFIG_RTW88_8822BE) += rtw8822b.o rtw8822b_table.o
+rtw88-$(CONFIG_RTW88_8822CE) += rtw8822c.o rtw8822c_table.o
+
+obj-$(CONFIG_RTW88_PCI) += rtwpci.o
+rtwpci-objs := pci.o
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
new file mode 100644
index 000000000000..f0ae26018f97
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -0,0 +1,637 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include "main.h"
+#include "sec.h"
+#include "fw.h"
+#include "debug.h"
+
+#ifdef CONFIG_RTW88_DEBUGFS
+
+struct rtw_debugfs_priv {
+ struct rtw_dev *rtwdev;
+ int (*cb_read)(struct seq_file *m, void *v);
+ ssize_t (*cb_write)(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *loff);
+ union {
+ u32 cb_data;
+ u8 *buf;
+ struct {
+ u32 page_offset;
+ u32 page_num;
+ } rsvd_page;
+ struct {
+ u8 rf_path;
+ u32 rf_addr;
+ u32 rf_mask;
+ };
+ struct {
+ u32 addr;
+ u32 len;
+ } read_reg;
+ };
+};
+
+static int rtw_debugfs_single_show(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+
+ return debugfs_priv->cb_read(m, v);
+}
+
+static ssize_t rtw_debugfs_common_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct rtw_debugfs_priv *debugfs_priv = filp->private_data;
+
+ return debugfs_priv->cb_write(filp, buffer, count, loff);
+}
+
+static ssize_t rtw_debugfs_single_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
+ struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
+
+ return debugfs_priv->cb_write(filp, buffer, count, loff);
+}
+
+static int rtw_debugfs_single_open_rw(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, rtw_debugfs_single_show, inode->i_private);
+}
+
+static int rtw_debugfs_close(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static const struct file_operations file_ops_single_r = {
+ .owner = THIS_MODULE,
+ .open = rtw_debugfs_single_open_rw,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static const struct file_operations file_ops_single_rw = {
+ .owner = THIS_MODULE,
+ .open = rtw_debugfs_single_open_rw,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = rtw_debugfs_single_write,
+};
+
+static const struct file_operations file_ops_common_write = {
+ .owner = THIS_MODULE,
+ .write = rtw_debugfs_common_write,
+ .open = simple_open,
+ .release = rtw_debugfs_close,
+};
+
+static int rtw_debugfs_get_read_reg(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ u32 val, len, addr;
+
+ len = debugfs_priv->read_reg.len;
+ addr = debugfs_priv->read_reg.addr;
+ switch (len) {
+ case 1:
+ val = rtw_read8(rtwdev, addr);
+ seq_printf(m, "reg 0x%03x: 0x%02x\n", addr, val);
+ break;
+ case 2:
+ val = rtw_read16(rtwdev, addr);
+ seq_printf(m, "reg 0x%03x: 0x%04x\n", addr, val);
+ break;
+ case 4:
+ val = rtw_read32(rtwdev, addr);
+ seq_printf(m, "reg 0x%03x: 0x%08x\n", addr, val);
+ break;
+ }
+ return 0;
+}
+
+static int rtw_debugfs_get_rf_read(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ u32 val, addr, mask;
+ u8 path;
+
+ path = debugfs_priv->rf_path;
+ addr = debugfs_priv->rf_addr;
+ mask = debugfs_priv->rf_mask;
+
+ val = rtw_read_rf(rtwdev, path, addr, mask);
+
+ seq_printf(m, "rf_read path:%d addr:0x%08x mask:0x%08x val=0x%08x\n",
+ path, addr, mask, val);
+
+ return 0;
+}
+
+static int rtw_debugfs_copy_from_user(char tmp[], int size,
+ const char __user *buffer, size_t count,
+ int num)
+{
+ int tmp_len;
+
+ if (count < num)
+ return -EFAULT;
+
+ tmp_len = (count > size - 1 ? size - 1 : count);
+
+ if (!buffer || copy_from_user(tmp, buffer, tmp_len))
+ return count;
+
+ tmp[tmp_len] = '\0';
+
+ return 0;
+}
+
+static ssize_t rtw_debugfs_set_read_reg(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
+ struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ char tmp[32 + 1];
+ u32 addr, len;
+ int num;
+
+ rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 2);
+
+ num = sscanf(tmp, "%x %x", &addr, &len);
+
+ if (num != 2)
+ return count;
+
+ if (len != 1 && len != 2 && len != 4) {
+ rtw_warn(rtwdev, "read reg setting wrong len\n");
+ return -EINVAL;
+ }
+ debugfs_priv->read_reg.addr = addr;
+ debugfs_priv->read_reg.len = len;
+
+ return count;
+}
+
+static int rtw_debugfs_get_dump_cam(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ u32 val, command;
+ u32 hw_key_idx = debugfs_priv->cb_data << RTW_SEC_CAM_ENTRY_SHIFT;
+ u32 read_cmd = RTW_SEC_CMD_POLLING;
+ int i;
+
+ seq_printf(m, "cam entry%d\n", debugfs_priv->cb_data);
+ seq_puts(m, "0x0 0x1 0x2 0x3 ");
+ seq_puts(m, "0x4 0x5\n");
+ mutex_lock(&rtwdev->mutex);
+ for (i = 0; i <= 5; i++) {
+ command = read_cmd | (hw_key_idx + i);
+ rtw_write32(rtwdev, RTW_SEC_CMD_REG, command);
+ val = rtw_read32(rtwdev, RTW_SEC_READ_REG);
+ seq_printf(m, "%8.8x", val);
+ if (i < 2)
+ seq_puts(m, " ");
+ }
+ seq_puts(m, "\n");
+ mutex_unlock(&rtwdev->mutex);
+ return 0;
+}
+
+static int rtw_debugfs_get_rsvd_page(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ u8 page_size = rtwdev->chip->page_size;
+ u32 buf_size = debugfs_priv->rsvd_page.page_num * page_size;
+ u32 offset = debugfs_priv->rsvd_page.page_offset * page_size;
+ u8 *buf;
+ int i;
+ int ret;
+
+ buf = vzalloc(buf_size);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = rtw_dump_drv_rsvd_page(rtwdev, offset, buf_size, (u32 *)buf);
+ if (ret) {
+ rtw_err(rtwdev, "failed to dump rsvd page\n");
+ vfree(buf);
+ return ret;
+ }
+
+ for (i = 0 ; i < buf_size ; i += 8) {
+ if (i % page_size == 0)
+ seq_printf(m, "PAGE %d\n", (i + offset) / page_size);
+ seq_printf(m, "%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n",
+ *(buf + i), *(buf + i + 1),
+ *(buf + i + 2), *(buf + i + 3),
+ *(buf + i + 4), *(buf + i + 5),
+ *(buf + i + 6), *(buf + i + 7));
+ }
+ vfree(buf);
+
+ return 0;
+}
+
+static ssize_t rtw_debugfs_set_rsvd_page(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
+ struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ char tmp[32 + 1];
+ u32 offset, page_num;
+ int num;
+
+ rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 2);
+
+ num = sscanf(tmp, "%d %d", &offset, &page_num);
+
+ if (num != 2) {
+ rtw_warn(rtwdev, "invalid arguments\n");
+ return num;
+ }
+
+ debugfs_priv->rsvd_page.page_offset = offset;
+ debugfs_priv->rsvd_page.page_num = page_num;
+
+ return count;
+}
+
+static ssize_t rtw_debugfs_set_single_input(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
+ struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ char tmp[32 + 1];
+ u32 input;
+ int num;
+
+ rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 1);
+
+ num = kstrtoint(tmp, 0, &input);
+
+ if (num) {
+ rtw_warn(rtwdev, "kstrtoint failed\n");
+ return num;
+ }
+
+ debugfs_priv->cb_data = input;
+
+ return count;
+}
+
+static ssize_t rtw_debugfs_set_write_reg(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct rtw_debugfs_priv *debugfs_priv = filp->private_data;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ char tmp[32 + 1];
+ u32 addr, val, len;
+ int num;
+
+ rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 3);
+
+ /* write BB/MAC register */
+ num = sscanf(tmp, "%x %x %x", &addr, &val, &len);
+
+ if (num != 3)
+ return count;
+
+ switch (len) {
+ case 1:
+ rtw_dbg(rtwdev, RTW_DBG_DEBUGFS,
+ "reg write8 0x%03x: 0x%08x\n", addr, val);
+ rtw_write8(rtwdev, addr, (u8)val);
+ break;
+ case 2:
+ rtw_dbg(rtwdev, RTW_DBG_DEBUGFS,
+ "reg write16 0x%03x: 0x%08x\n", addr, val);
+ rtw_write16(rtwdev, addr, (u16)val);
+ break;
+ case 4:
+ rtw_dbg(rtwdev, RTW_DBG_DEBUGFS,
+ "reg write32 0x%03x: 0x%08x\n", addr, val);
+ rtw_write32(rtwdev, addr, (u32)val);
+ break;
+ default:
+ rtw_dbg(rtwdev, RTW_DBG_DEBUGFS,
+ "error write length = %d\n", len);
+ break;
+ }
+
+ return count;
+}
+
+static ssize_t rtw_debugfs_set_rf_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct rtw_debugfs_priv *debugfs_priv = filp->private_data;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ char tmp[32 + 1];
+ u32 path, addr, mask, val;
+ int num;
+
+ rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 4);
+
+ num = sscanf(tmp, "%x %x %x %x", &path, &addr, &mask, &val);
+
+ if (num != 4) {
+ rtw_warn(rtwdev, "invalid args, [path] [addr] [mask] [val]\n");
+ return count;
+ }
+
+ rtw_write_rf(rtwdev, path, addr, mask, val);
+ rtw_dbg(rtwdev, RTW_DBG_DEBUGFS,
+ "write_rf path:%d addr:0x%08x mask:0x%08x, val:0x%08x\n",
+ path, addr, mask, val);
+
+ return count;
+}
+
+static ssize_t rtw_debugfs_set_rf_read(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
+ struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ char tmp[32 + 1];
+ u32 path, addr, mask;
+ int num;
+
+ rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 3);
+
+ num = sscanf(tmp, "%x %x %x", &path, &addr, &mask);
+
+ if (num != 3) {
+ rtw_warn(rtwdev, "invalid args, [path] [addr] [mask] [val]\n");
+ return count;
+ }
+
+ debugfs_priv->rf_path = path;
+ debugfs_priv->rf_addr = addr;
+ debugfs_priv->rf_mask = mask;
+
+ return count;
+}
+
+static int rtw_debug_get_mac_page(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ u32 val;
+ u32 page = debugfs_priv->cb_data;
+ int i, n;
+ int max = 0xff;
+
+ val = rtw_read32(rtwdev, debugfs_priv->cb_data);
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtw_read32(rtwdev, (page | n)));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int rtw_debug_get_bb_page(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ u32 val;
+ u32 page = debugfs_priv->cb_data;
+ int i, n;
+ int max = 0xff;
+
+ val = rtw_read32(rtwdev, debugfs_priv->cb_data);
+ for (n = 0; n <= max; ) {
+ seq_printf(m, "\n%8.8x ", n + page);
+ for (i = 0; i < 4 && n <= max; i++, n += 4)
+ seq_printf(m, "%8.8x ",
+ rtw_read32(rtwdev, (page | n)));
+ }
+ seq_puts(m, "\n");
+ return 0;
+}
+
+static int rtw_debug_get_rf_dump(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ u32 addr, offset, data;
+ u8 path;
+
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++) {
+ seq_printf(m, "RF path:%d\n", path);
+ for (addr = 0; addr < 0x100; addr += 4) {
+ seq_printf(m, "%8.8x ", addr);
+ for (offset = 0; offset < 4; offset++) {
+ data = rtw_read_rf(rtwdev, path, addr + offset,
+ 0xffffffff);
+ seq_printf(m, "%8.8x ", data);
+ }
+ seq_puts(m, "\n");
+ }
+ seq_puts(m, "\n");
+ }
+
+ return 0;
+}
+
+#define rtw_debug_impl_mac(page, addr) \
+static struct rtw_debugfs_priv rtw_debug_priv_mac_ ##page = { \
+ .cb_read = rtw_debug_get_mac_page, \
+ .cb_data = addr, \
+}
+
+rtw_debug_impl_mac(0, 0x0000);
+rtw_debug_impl_mac(1, 0x0100);
+rtw_debug_impl_mac(2, 0x0200);
+rtw_debug_impl_mac(3, 0x0300);
+rtw_debug_impl_mac(4, 0x0400);
+rtw_debug_impl_mac(5, 0x0500);
+rtw_debug_impl_mac(6, 0x0600);
+rtw_debug_impl_mac(7, 0x0700);
+rtw_debug_impl_mac(10, 0x1000);
+rtw_debug_impl_mac(11, 0x1100);
+rtw_debug_impl_mac(12, 0x1200);
+rtw_debug_impl_mac(13, 0x1300);
+rtw_debug_impl_mac(14, 0x1400);
+rtw_debug_impl_mac(15, 0x1500);
+rtw_debug_impl_mac(16, 0x1600);
+rtw_debug_impl_mac(17, 0x1700);
+
+#define rtw_debug_impl_bb(page, addr) \
+static struct rtw_debugfs_priv rtw_debug_priv_bb_ ##page = { \
+ .cb_read = rtw_debug_get_bb_page, \
+ .cb_data = addr, \
+}
+
+rtw_debug_impl_bb(8, 0x0800);
+rtw_debug_impl_bb(9, 0x0900);
+rtw_debug_impl_bb(a, 0x0a00);
+rtw_debug_impl_bb(b, 0x0b00);
+rtw_debug_impl_bb(c, 0x0c00);
+rtw_debug_impl_bb(d, 0x0d00);
+rtw_debug_impl_bb(e, 0x0e00);
+rtw_debug_impl_bb(f, 0x0f00);
+rtw_debug_impl_bb(18, 0x1800);
+rtw_debug_impl_bb(19, 0x1900);
+rtw_debug_impl_bb(1a, 0x1a00);
+rtw_debug_impl_bb(1b, 0x1b00);
+rtw_debug_impl_bb(1c, 0x1c00);
+rtw_debug_impl_bb(1d, 0x1d00);
+rtw_debug_impl_bb(1e, 0x1e00);
+rtw_debug_impl_bb(1f, 0x1f00);
+rtw_debug_impl_bb(2c, 0x2c00);
+rtw_debug_impl_bb(2d, 0x2d00);
+rtw_debug_impl_bb(40, 0x4000);
+rtw_debug_impl_bb(41, 0x4100);
+
+static struct rtw_debugfs_priv rtw_debug_priv_rf_dump = {
+ .cb_read = rtw_debug_get_rf_dump,
+};
+
+static struct rtw_debugfs_priv rtw_debug_priv_write_reg = {
+ .cb_write = rtw_debugfs_set_write_reg,
+};
+
+static struct rtw_debugfs_priv rtw_debug_priv_rf_write = {
+ .cb_write = rtw_debugfs_set_rf_write,
+};
+
+static struct rtw_debugfs_priv rtw_debug_priv_rf_read = {
+ .cb_write = rtw_debugfs_set_rf_read,
+ .cb_read = rtw_debugfs_get_rf_read,
+};
+
+static struct rtw_debugfs_priv rtw_debug_priv_read_reg = {
+ .cb_write = rtw_debugfs_set_read_reg,
+ .cb_read = rtw_debugfs_get_read_reg,
+};
+
+static struct rtw_debugfs_priv rtw_debug_priv_dump_cam = {
+ .cb_write = rtw_debugfs_set_single_input,
+ .cb_read = rtw_debugfs_get_dump_cam,
+};
+
+static struct rtw_debugfs_priv rtw_debug_priv_rsvd_page = {
+ .cb_write = rtw_debugfs_set_rsvd_page,
+ .cb_read = rtw_debugfs_get_rsvd_page,
+};
+
+#define rtw_debugfs_add_core(name, mode, fopname, parent) \
+ do { \
+ rtw_debug_priv_ ##name.rtwdev = rtwdev; \
+ if (!debugfs_create_file(#name, mode, \
+ parent, &rtw_debug_priv_ ##name,\
+ &file_ops_ ##fopname)) \
+ pr_debug("Unable to initialize debugfs:%s\n", \
+ #name); \
+ } while (0)
+
+#define rtw_debugfs_add_w(name) \
+ rtw_debugfs_add_core(name, S_IFREG | 0222, common_write, debugfs_topdir)
+#define rtw_debugfs_add_rw(name) \
+ rtw_debugfs_add_core(name, S_IFREG | 0666, single_rw, debugfs_topdir)
+#define rtw_debugfs_add_r(name) \
+ rtw_debugfs_add_core(name, S_IFREG | 0444, single_r, debugfs_topdir)
+
+void rtw_debugfs_init(struct rtw_dev *rtwdev)
+{
+ struct dentry *debugfs_topdir = rtwdev->debugfs;
+
+ debugfs_topdir = debugfs_create_dir("rtw88",
+ rtwdev->hw->wiphy->debugfsdir);
+ rtw_debugfs_add_w(write_reg);
+ rtw_debugfs_add_rw(read_reg);
+ rtw_debugfs_add_w(rf_write);
+ rtw_debugfs_add_rw(rf_read);
+ rtw_debugfs_add_rw(dump_cam);
+ rtw_debugfs_add_rw(rsvd_page);
+ rtw_debugfs_add_r(mac_0);
+ rtw_debugfs_add_r(mac_1);
+ rtw_debugfs_add_r(mac_2);
+ rtw_debugfs_add_r(mac_3);
+ rtw_debugfs_add_r(mac_4);
+ rtw_debugfs_add_r(mac_5);
+ rtw_debugfs_add_r(mac_6);
+ rtw_debugfs_add_r(mac_7);
+ rtw_debugfs_add_r(bb_8);
+ rtw_debugfs_add_r(bb_9);
+ rtw_debugfs_add_r(bb_a);
+ rtw_debugfs_add_r(bb_b);
+ rtw_debugfs_add_r(bb_c);
+ rtw_debugfs_add_r(bb_d);
+ rtw_debugfs_add_r(bb_e);
+ rtw_debugfs_add_r(bb_f);
+ rtw_debugfs_add_r(mac_10);
+ rtw_debugfs_add_r(mac_11);
+ rtw_debugfs_add_r(mac_12);
+ rtw_debugfs_add_r(mac_13);
+ rtw_debugfs_add_r(mac_14);
+ rtw_debugfs_add_r(mac_15);
+ rtw_debugfs_add_r(mac_16);
+ rtw_debugfs_add_r(mac_17);
+ rtw_debugfs_add_r(bb_18);
+ rtw_debugfs_add_r(bb_19);
+ rtw_debugfs_add_r(bb_1a);
+ rtw_debugfs_add_r(bb_1b);
+ rtw_debugfs_add_r(bb_1c);
+ rtw_debugfs_add_r(bb_1d);
+ rtw_debugfs_add_r(bb_1e);
+ rtw_debugfs_add_r(bb_1f);
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C) {
+ rtw_debugfs_add_r(bb_2c);
+ rtw_debugfs_add_r(bb_2d);
+ rtw_debugfs_add_r(bb_40);
+ rtw_debugfs_add_r(bb_41);
+ }
+ rtw_debugfs_add_r(rf_dump);
+}
+
+#endif /* CONFIG_RTW88_DEBUGFS */
+
+#ifdef CONFIG_RTW88_DEBUG
+
+void __rtw_dbg(struct rtw_dev *rtwdev, enum rtw_debug_mask mask,
+ const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+
+ if (rtw_debug_mask & mask)
+ dev_printk(KERN_DEBUG, rtwdev->dev, "%pV", &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(__rtw_dbg);
+
+#endif /* CONFIG_RTW88_DEBUG */
diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h
new file mode 100644
index 000000000000..45851cbbd2ab
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/debug.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_DEBUG_H
+#define __RTW_DEBUG_H
+
+enum rtw_debug_mask {
+ RTW_DBG_PCI = 0x00000001,
+ RTW_DBG_TX = 0x00000002,
+ RTW_DBG_RX = 0x00000004,
+ RTW_DBG_PHY = 0x00000008,
+ RTW_DBG_FW = 0x00000010,
+ RTW_DBG_EFUSE = 0x00000020,
+ RTW_DBG_COEX = 0x00000040,
+ RTW_DBG_RFK = 0x00000080,
+ RTW_DBG_REGD = 0x00000100,
+ RTW_DBG_DEBUGFS = 0x00000200,
+
+ RTW_DBG_ALL = 0xffffffff
+};
+
+#ifdef CONFIG_RTW88_DEBUGFS
+
+void rtw_debugfs_init(struct rtw_dev *rtwdev);
+
+#else
+
+static inline void rtw_debugfs_init(struct rtw_dev *rtwdev) {}
+
+#endif /* CONFIG_RTW88_DEBUGFS */
+
+#ifdef CONFIG_RTW88_DEBUG
+
+__printf(3, 4)
+void __rtw_dbg(struct rtw_dev *rtwdev, enum rtw_debug_mask mask,
+ const char *fmt, ...);
+
+#define rtw_dbg(rtwdev, a...) __rtw_dbg(rtwdev, ##a)
+
+#else
+
+static inline void rtw_dbg(struct rtw_dev *rtwdev, enum rtw_debug_mask mask,
+ const char *fmt, ...) {}
+
+#endif /* CONFIG_RTW88_DEBUG */
+
+#define rtw_info(rtwdev, a...) dev_info(rtwdev->dev, ##a)
+#define rtw_warn(rtwdev, a...) dev_warn(rtwdev->dev, ##a)
+#define rtw_err(rtwdev, a...) dev_err(rtwdev->dev, ##a)
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/efuse.c b/drivers/net/wireless/realtek/rtw88/efuse.c
new file mode 100644
index 000000000000..212c8376a8c9
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/efuse.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "efuse.h"
+#include "reg.h"
+#include "debug.h"
+
+#define RTW_EFUSE_BANK_WIFI 0x0
+
+static void switch_efuse_bank(struct rtw_dev *rtwdev)
+{
+ rtw_write32_mask(rtwdev, REG_LDO_EFUSE_CTRL, BIT_MASK_EFUSE_BANK_SEL,
+ RTW_EFUSE_BANK_WIFI);
+}
+
+#define invalid_efuse_header(hdr1, hdr2) \
+ ((hdr1) == 0xff || (((hdr1) & 0x1f) == 0xf && (hdr2) == 0xff))
+#define invalid_efuse_content(word_en, i) \
+ (((word_en) & BIT(i)) != 0x0)
+#define get_efuse_blk_idx_2_byte(hdr1, hdr2) \
+ ((((hdr2) & 0xf0) >> 1) | (((hdr1) >> 5) & 0x07))
+#define get_efuse_blk_idx_1_byte(hdr1) \
+ (((hdr1) & 0xf0) >> 4)
+#define block_idx_to_logical_idx(blk_idx, i) \
+ (((blk_idx) << 3) + ((i) << 1))
+
+/* efuse header format
+ *
+ * | 7 5 4 0 | 7 4 3 0 | 15 8 7 0 |
+ * block[2:0] 0 1111 block[6:3] word_en[3:0] byte0 byte1
+ * | header 1 (optional) | header 2 | word N |
+ *
+ * word_en: 4 bits each word. 0 -> write; 1 -> not write
+ * N: 1~4, depends on word_en
+ */
+static int rtw_dump_logical_efuse_map(struct rtw_dev *rtwdev, u8 *phy_map,
+ u8 *log_map)
+{
+ u32 physical_size = rtwdev->efuse.physical_size;
+ u32 protect_size = rtwdev->efuse.protect_size;
+ u32 logical_size = rtwdev->efuse.logical_size;
+ u32 phy_idx, log_idx;
+ u8 hdr1, hdr2;
+ u8 blk_idx;
+ u8 word_en;
+ int i;
+
+ for (phy_idx = 0; phy_idx < physical_size - protect_size;) {
+ hdr1 = phy_map[phy_idx];
+ hdr2 = phy_map[phy_idx + 1];
+ if (invalid_efuse_header(hdr1, hdr2))
+ break;
+
+ if ((hdr1 & 0x1f) == 0xf) {
+ /* 2-byte header format */
+ blk_idx = get_efuse_blk_idx_2_byte(hdr1, hdr2);
+ word_en = hdr2 & 0xf;
+ phy_idx += 2;
+ } else {
+ /* 1-byte header format */
+ blk_idx = get_efuse_blk_idx_1_byte(hdr1);
+ word_en = hdr1 & 0xf;
+ phy_idx += 1;
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (invalid_efuse_content(word_en, i))
+ continue;
+
+ log_idx = block_idx_to_logical_idx(blk_idx, i);
+ if (phy_idx + 1 > physical_size - protect_size ||
+ log_idx + 1 > logical_size)
+ return -EINVAL;
+
+ log_map[log_idx] = phy_map[phy_idx];
+ log_map[log_idx + 1] = phy_map[phy_idx + 1];
+ phy_idx += 2;
+ }
+ }
+ return 0;
+}
+
+static int rtw_dump_physical_efuse_map(struct rtw_dev *rtwdev, u8 *map)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u32 size = rtwdev->efuse.physical_size;
+ u32 efuse_ctl;
+ u32 addr;
+ u32 cnt;
+
+ switch_efuse_bank(rtwdev);
+
+ /* disable 2.5V LDO */
+ chip->ops->cfg_ldo25(rtwdev, false);
+
+ efuse_ctl = rtw_read32(rtwdev, REG_EFUSE_CTRL);
+
+ for (addr = 0; addr < size; addr++) {
+ efuse_ctl &= ~(BIT_MASK_EF_DATA | BITS_EF_ADDR);
+ efuse_ctl |= (addr & BIT_MASK_EF_ADDR) << BIT_SHIFT_EF_ADDR;
+ rtw_write32(rtwdev, REG_EFUSE_CTRL, efuse_ctl & (~BIT_EF_FLAG));
+
+ cnt = 1000000;
+ do {
+ udelay(1);
+ efuse_ctl = rtw_read32(rtwdev, REG_EFUSE_CTRL);
+ if (--cnt == 0)
+ return -EBUSY;
+ } while (!(efuse_ctl & BIT_EF_FLAG));
+
+ *(map + addr) = (u8)(efuse_ctl & BIT_MASK_EF_DATA);
+ }
+
+ return 0;
+}
+
+int rtw_parse_efuse_map(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u32 phy_size = efuse->physical_size;
+ u32 log_size = efuse->logical_size;
+ u8 *phy_map = NULL;
+ u8 *log_map = NULL;
+ int ret = 0;
+
+ phy_map = kmalloc(phy_size, GFP_KERNEL);
+ log_map = kmalloc(log_size, GFP_KERNEL);
+ if (!phy_map || !log_map) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ ret = rtw_dump_physical_efuse_map(rtwdev, phy_map);
+ if (ret) {
+ rtw_err(rtwdev, "failed to dump efuse physical map\n");
+ goto out_free;
+ }
+
+ memset(log_map, 0xff, log_size);
+ ret = rtw_dump_logical_efuse_map(rtwdev, phy_map, log_map);
+ if (ret) {
+ rtw_err(rtwdev, "failed to dump efuse logical map\n");
+ goto out_free;
+ }
+
+ ret = chip->ops->read_efuse(rtwdev, log_map);
+ if (ret) {
+ rtw_err(rtwdev, "failed to read efuse map\n");
+ goto out_free;
+ }
+
+out_free:
+ kfree(log_map);
+ kfree(phy_map);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/realtek/rtw88/efuse.h b/drivers/net/wireless/realtek/rtw88/efuse.h
new file mode 100644
index 000000000000..115bbe85946a
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/efuse.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_EFUSE_H__
+#define __RTW_EFUSE_H__
+
+#define EFUSE_HW_CAP_IGNORE 0
+#define EFUSE_HW_CAP_PTCL_VHT 3
+#define EFUSE_HW_CAP_SUPP_BW80 7
+#define EFUSE_HW_CAP_SUPP_BW40 6
+
+#define GET_EFUSE_HW_CAP_HCI(hw_cap) \
+ le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(3, 0))
+#define GET_EFUSE_HW_CAP_BW(hw_cap) \
+ le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(18, 16))
+#define GET_EFUSE_HW_CAP_NSS(hw_cap) \
+ le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(20, 19))
+#define GET_EFUSE_HW_CAP_ANT_NUM(hw_cap) \
+ le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(23, 21))
+#define GET_EFUSE_HW_CAP_PTCL(hw_cap) \
+ le32_get_bits(*((__le32 *)(hw_cap) + 0x01), GENMASK(27, 26))
+
+int rtw_parse_efuse_map(struct rtw_dev *rtwdev);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
new file mode 100644
index 000000000000..cf4265cda224
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -0,0 +1,633 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "fw.h"
+#include "tx.h"
+#include "reg.h"
+#include "debug.h"
+
+void rtw_fw_c2h_cmd_handle_ext(struct rtw_dev *rtwdev, struct sk_buff *skb)
+{
+ struct rtw_c2h_cmd *c2h;
+ u8 sub_cmd_id;
+
+ c2h = get_c2h_from_skb(skb);
+ sub_cmd_id = c2h->payload[0];
+
+ switch (sub_cmd_id) {
+ case C2H_CCX_RPT:
+ rtw_tx_report_handle(rtwdev, skb);
+ break;
+ default:
+ break;
+ }
+}
+
+void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
+{
+ struct rtw_c2h_cmd *c2h;
+ u32 pkt_offset;
+ u8 len;
+
+ pkt_offset = *((u32 *)skb->cb);
+ c2h = (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
+ len = skb->len - pkt_offset - 2;
+
+ rtw_dbg(rtwdev, RTW_DBG_FW, "recv C2H, id=0x%02x, seq=0x%02x, len=%d\n",
+ c2h->id, c2h->seq, len);
+
+ switch (c2h->id) {
+ case C2H_HALMAC:
+ rtw_fw_c2h_cmd_handle_ext(rtwdev, skb);
+ break;
+ default:
+ break;
+ }
+}
+
+void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev, u8 *h2c)
+{
+ u8 box;
+ u8 box_state;
+ u32 box_reg, box_ex_reg;
+ u32 h2c_wait;
+ int idx;
+
+ rtw_dbg(rtwdev, RTW_DBG_FW,
+ "send H2C content %02x%02x%02x%02x %02x%02x%02x%02x\n",
+ h2c[3], h2c[2], h2c[1], h2c[0],
+ h2c[7], h2c[6], h2c[5], h2c[4]);
+
+ spin_lock(&rtwdev->h2c.lock);
+
+ box = rtwdev->h2c.last_box_num;
+ switch (box) {
+ case 0:
+ box_reg = REG_HMEBOX0;
+ box_ex_reg = REG_HMEBOX0_EX;
+ break;
+ case 1:
+ box_reg = REG_HMEBOX1;
+ box_ex_reg = REG_HMEBOX1_EX;
+ break;
+ case 2:
+ box_reg = REG_HMEBOX2;
+ box_ex_reg = REG_HMEBOX2_EX;
+ break;
+ case 3:
+ box_reg = REG_HMEBOX3;
+ box_ex_reg = REG_HMEBOX3_EX;
+ break;
+ default:
+ WARN(1, "invalid h2c mail box number\n");
+ goto out;
+ }
+
+ h2c_wait = 20;
+ do {
+ box_state = rtw_read8(rtwdev, REG_HMETFR);
+ } while ((box_state >> box) & 0x1 && --h2c_wait > 0);
+
+ if (!h2c_wait) {
+ rtw_err(rtwdev, "failed to send h2c command\n");
+ goto out;
+ }
+
+ for (idx = 0; idx < 4; idx++)
+ rtw_write8(rtwdev, box_reg + idx, h2c[idx]);
+ for (idx = 0; idx < 4; idx++)
+ rtw_write8(rtwdev, box_ex_reg + idx, h2c[idx + 4]);
+
+ if (++rtwdev->h2c.last_box_num >= 4)
+ rtwdev->h2c.last_box_num = 0;
+
+out:
+ spin_unlock(&rtwdev->h2c.lock);
+}
+
+static void rtw_fw_send_h2c_packet(struct rtw_dev *rtwdev, u8 *h2c_pkt)
+{
+ int ret;
+
+ spin_lock(&rtwdev->h2c.lock);
+
+ FW_OFFLOAD_H2C_SET_SEQ_NUM(h2c_pkt, rtwdev->h2c.seq);
+ ret = rtw_hci_write_data_h2c(rtwdev, h2c_pkt, H2C_PKT_SIZE);
+ if (ret)
+ rtw_err(rtwdev, "failed to send h2c packet\n");
+ rtwdev->h2c.seq++;
+
+ spin_unlock(&rtwdev->h2c.lock);
+}
+
+void
+rtw_fw_send_general_info(struct rtw_dev *rtwdev)
+{
+ struct rtw_fifo_conf *fifo = &rtwdev->fifo;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u16 total_size = H2C_PKT_HDR_SIZE + 4;
+
+ rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_GENERAL_INFO);
+
+ SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
+
+ GENERAL_INFO_SET_FW_TX_BOUNDARY(h2c_pkt,
+ fifo->rsvd_fw_txbuf_addr -
+ fifo->rsvd_boundary);
+
+ rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
+}
+
+void
+rtw_fw_send_phydm_info(struct rtw_dev *rtwdev)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u16 total_size = H2C_PKT_HDR_SIZE + 8;
+ u8 fw_rf_type = 0;
+
+ if (hal->rf_type == RF_1T1R)
+ fw_rf_type = FW_RF_1T1R;
+ else if (hal->rf_type == RF_2T2R)
+ fw_rf_type = FW_RF_2T2R;
+
+ rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_PHYDM_INFO);
+
+ SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
+ PHYDM_INFO_SET_REF_TYPE(h2c_pkt, efuse->rfe_option);
+ PHYDM_INFO_SET_RF_TYPE(h2c_pkt, fw_rf_type);
+ PHYDM_INFO_SET_CUT_VER(h2c_pkt, hal->cut_version);
+ PHYDM_INFO_SET_RX_ANT_STATUS(h2c_pkt, hal->antenna_tx);
+ PHYDM_INFO_SET_TX_ANT_STATUS(h2c_pkt, hal->antenna_rx);
+
+ rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u16 total_size = H2C_PKT_HDR_SIZE + 1;
+
+ rtw_h2c_pkt_set_header(h2c_pkt, H2C_PKT_IQK);
+ SET_PKT_H2C_TOTAL_LEN(h2c_pkt, total_size);
+ IQK_SET_CLEAR(h2c_pkt, para->clear);
+ IQK_SET_SEGMENT_IQK(h2c_pkt, para->segment_iqk);
+
+ rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u8 rssi = ewma_rssi_read(&si->avg_rssi);
+ bool stbc_en = si->stbc_en ? true : false;
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSSI_MONITOR);
+
+ SET_RSSI_INFO_MACID(h2c_pkt, si->mac_id);
+ SET_RSSI_INFO_RSSI(h2c_pkt, rssi);
+ SET_RSSI_INFO_STBC(h2c_pkt, stbc_en);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ bool no_update = si->updated;
+ bool disable_pt = true;
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RA_INFO);
+
+ SET_RA_INFO_MACID(h2c_pkt, si->mac_id);
+ SET_RA_INFO_RATE_ID(h2c_pkt, si->rate_id);
+ SET_RA_INFO_INIT_RA_LVL(h2c_pkt, si->init_ra_lv);
+ SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable);
+ SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode);
+ SET_RA_INFO_LDPC(h2c_pkt, si->ldpc_en);
+ SET_RA_INFO_NO_UPDATE(h2c_pkt, no_update);
+ SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable);
+ SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt);
+ SET_RA_INFO_RA_MASK0(h2c_pkt, (si->ra_mask & 0xff));
+ SET_RA_INFO_RA_MASK1(h2c_pkt, (si->ra_mask & 0xff00) >> 8);
+ SET_RA_INFO_RA_MASK2(h2c_pkt, (si->ra_mask & 0xff0000) >> 16);
+ SET_RA_INFO_RA_MASK3(h2c_pkt, (si->ra_mask & 0xff000000) >> 24);
+
+ si->init_ra_lv = 0;
+ si->updated = true;
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool connect)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_MEDIA_STATUS_RPT);
+ MEDIA_STATUS_RPT_SET_OP_MODE(h2c_pkt, connect);
+ MEDIA_STATUS_RPT_SET_MACID(h2c_pkt, mac_id);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev)
+{
+ struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_SET_PWR_MODE);
+
+ SET_PWR_MODE_SET_MODE(h2c_pkt, conf->mode);
+ SET_PWR_MODE_SET_RLBM(h2c_pkt, conf->rlbm);
+ SET_PWR_MODE_SET_SMART_PS(h2c_pkt, conf->smart_ps);
+ SET_PWR_MODE_SET_AWAKE_INTERVAL(h2c_pkt, conf->awake_interval);
+ SET_PWR_MODE_SET_PORT_ID(h2c_pkt, conf->port_id);
+ SET_PWR_MODE_SET_PWR_STATE(h2c_pkt, conf->state);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+static u8 rtw_get_rsvd_page_location(struct rtw_dev *rtwdev,
+ enum rtw_rsvd_packet_type type)
+{
+ struct rtw_rsvd_page *rsvd_pkt;
+ u8 location = 0;
+
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ if (type == rsvd_pkt->type)
+ location = rsvd_pkt->page;
+ }
+
+ return location;
+}
+
+void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev)
+{
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+ u8 location = 0;
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_RSVD_PAGE);
+
+ location = rtw_get_rsvd_page_location(rtwdev, RSVD_PROBE_RESP);
+ *(h2c_pkt + 1) = location;
+ rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PROBE_RESP loc: %d\n", location);
+
+ location = rtw_get_rsvd_page_location(rtwdev, RSVD_PS_POLL);
+ *(h2c_pkt + 2) = location;
+ rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_PS_POLL loc: %d\n", location);
+
+ location = rtw_get_rsvd_page_location(rtwdev, RSVD_NULL);
+ *(h2c_pkt + 3) = location;
+ rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_NULL loc: %d\n", location);
+
+ location = rtw_get_rsvd_page_location(rtwdev, RSVD_QOS_NULL);
+ *(h2c_pkt + 4) = location;
+ rtw_dbg(rtwdev, RTW_DBG_FW, "RSVD_QOS_NULL loc: %d\n", location);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
+static struct sk_buff *
+rtw_beacon_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct sk_buff *skb_new;
+
+ if (vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_ADHOC &&
+ !ieee80211_vif_is_mesh(vif)) {
+ skb_new = alloc_skb(1, GFP_KERNEL);
+ if (!skb_new)
+ return NULL;
+ skb_put(skb_new, 1);
+ } else {
+ skb_new = ieee80211_beacon_get(hw, vif);
+ }
+
+ return skb_new;
+}
+
+static struct sk_buff *rtw_get_rsvd_page_skb(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum rtw_rsvd_packet_type type)
+{
+ struct sk_buff *skb_new;
+
+ switch (type) {
+ case RSVD_BEACON:
+ skb_new = rtw_beacon_get(hw, vif);
+ break;
+ case RSVD_PS_POLL:
+ skb_new = ieee80211_pspoll_get(hw, vif);
+ break;
+ case RSVD_PROBE_RESP:
+ skb_new = ieee80211_proberesp_get(hw, vif);
+ break;
+ case RSVD_NULL:
+ skb_new = ieee80211_nullfunc_get(hw, vif, false);
+ break;
+ case RSVD_QOS_NULL:
+ skb_new = ieee80211_nullfunc_get(hw, vif, true);
+ break;
+ default:
+ return NULL;
+ }
+
+ if (!skb_new)
+ return NULL;
+
+ return skb_new;
+}
+
+static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb)
+{
+ struct rtw_tx_pkt_info pkt_info;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 *pkt_desc;
+
+ memset(&pkt_info, 0, sizeof(pkt_info));
+ rtw_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb);
+ pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
+ memset(pkt_desc, 0, chip->tx_pkt_desc_sz);
+ rtw_tx_fill_tx_desc(&pkt_info, skb);
+}
+
+static inline u8 rtw_len_to_page(unsigned int len, u8 page_size)
+{
+ return DIV_ROUND_UP(len, page_size);
+}
+
+static void rtw_rsvd_page_list_to_buf(struct rtw_dev *rtwdev, u8 page_size,
+ u8 page_margin, u32 page, u8 *buf,
+ struct rtw_rsvd_page *rsvd_pkt)
+{
+ struct sk_buff *skb = rsvd_pkt->skb;
+
+ if (rsvd_pkt->add_txdesc)
+ rtw_fill_rsvd_page_desc(rtwdev, skb);
+
+ if (page >= 1)
+ memcpy(buf + page_margin + page_size * (page - 1),
+ skb->data, skb->len);
+ else
+ memcpy(buf, skb->data, skb->len);
+}
+
+void rtw_add_rsvd_page(struct rtw_dev *rtwdev, enum rtw_rsvd_packet_type type,
+ bool txdesc)
+{
+ struct rtw_rsvd_page *rsvd_pkt;
+
+ lockdep_assert_held(&rtwdev->mutex);
+
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ if (rsvd_pkt->type == type)
+ return;
+ }
+
+ rsvd_pkt = kmalloc(sizeof(*rsvd_pkt), GFP_KERNEL);
+ if (!rsvd_pkt)
+ return;
+
+ rsvd_pkt->type = type;
+ rsvd_pkt->add_txdesc = txdesc;
+ list_add_tail(&rsvd_pkt->list, &rtwdev->rsvd_page_list);
+}
+
+void rtw_reset_rsvd_page(struct rtw_dev *rtwdev)
+{
+ struct rtw_rsvd_page *rsvd_pkt, *tmp;
+
+ lockdep_assert_held(&rtwdev->mutex);
+
+ list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list, list) {
+ if (rsvd_pkt->type == RSVD_BEACON)
+ continue;
+ list_del(&rsvd_pkt->list);
+ kfree(rsvd_pkt);
+ }
+}
+
+int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
+ u8 *buf, u32 size)
+{
+ u8 bckp[2];
+ u8 val;
+ u16 rsvd_pg_head;
+ int ret;
+
+ lockdep_assert_held(&rtwdev->mutex);
+
+ if (!size)
+ return -EINVAL;
+
+ pg_addr &= BIT_MASK_BCN_HEAD_1_V1;
+ rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, pg_addr | BIT_BCN_VALID_V1);
+
+ val = rtw_read8(rtwdev, REG_CR + 1);
+ bckp[0] = val;
+ val |= BIT_ENSWBCN >> 8;
+ rtw_write8(rtwdev, REG_CR + 1, val);
+
+ val = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL + 2);
+ bckp[1] = val;
+ val &= ~(BIT_EN_BCNQ_DL >> 16);
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, val);
+
+ ret = rtw_hci_write_data_rsvd_page(rtwdev, buf, size);
+ if (ret) {
+ rtw_err(rtwdev, "failed to write data to rsvd page\n");
+ goto restore;
+ }
+
+ if (!check_hw_ready(rtwdev, REG_FIFOPAGE_CTRL_2, BIT_BCN_VALID_V1, 1)) {
+ rtw_err(rtwdev, "error beacon valid\n");
+ ret = -EBUSY;
+ }
+
+restore:
+ rsvd_pg_head = rtwdev->fifo.rsvd_boundary;
+ rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2,
+ rsvd_pg_head | BIT_BCN_VALID_V1);
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 2, bckp[1]);
+ rtw_write8(rtwdev, REG_CR + 1, bckp[0]);
+
+ return ret;
+}
+
+static int rtw_download_drv_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
+{
+ u32 pg_size;
+ u32 pg_num = 0;
+ u16 pg_addr = 0;
+
+ pg_size = rtwdev->chip->page_size;
+ pg_num = size / pg_size + ((size & (pg_size - 1)) ? 1 : 0);
+ if (pg_num > rtwdev->fifo.rsvd_drv_pg_num)
+ return -ENOMEM;
+
+ pg_addr = rtwdev->fifo.rsvd_drv_addr;
+
+ return rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
+}
+
+static u8 *rtw_build_rsvd_page(struct rtw_dev *rtwdev,
+ struct ieee80211_vif *vif, u32 *size)
+{
+ struct ieee80211_hw *hw = rtwdev->hw;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct sk_buff *iter;
+ struct rtw_rsvd_page *rsvd_pkt;
+ u32 page = 0;
+ u8 total_page = 0;
+ u8 page_size, page_margin, tx_desc_sz;
+ u8 *buf;
+
+ page_size = chip->page_size;
+ tx_desc_sz = chip->tx_pkt_desc_sz;
+ page_margin = page_size - tx_desc_sz;
+
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ iter = rtw_get_rsvd_page_skb(hw, vif, rsvd_pkt->type);
+ if (!iter) {
+ rtw_err(rtwdev, "fail to build rsvd packet\n");
+ goto release_skb;
+ }
+ rsvd_pkt->skb = iter;
+ rsvd_pkt->page = total_page;
+ if (rsvd_pkt->add_txdesc)
+ total_page += rtw_len_to_page(iter->len + tx_desc_sz,
+ page_size);
+ else
+ total_page += rtw_len_to_page(iter->len, page_size);
+ }
+
+ if (total_page > rtwdev->fifo.rsvd_drv_pg_num) {
+ rtw_err(rtwdev, "rsvd page over size: %d\n", total_page);
+ goto release_skb;
+ }
+
+ *size = (total_page - 1) * page_size + page_margin;
+ buf = kzalloc(*size, GFP_KERNEL);
+ if (!buf)
+ goto release_skb;
+
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list) {
+ rtw_rsvd_page_list_to_buf(rtwdev, page_size, page_margin,
+ page, buf, rsvd_pkt);
+ page += rtw_len_to_page(rsvd_pkt->skb->len, page_size);
+ }
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list)
+ kfree_skb(rsvd_pkt->skb);
+
+ return buf;
+
+release_skb:
+ list_for_each_entry(rsvd_pkt, &rtwdev->rsvd_page_list, list)
+ kfree_skb(rsvd_pkt->skb);
+
+ return NULL;
+}
+
+static int
+rtw_download_beacon(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
+{
+ struct ieee80211_hw *hw = rtwdev->hw;
+ struct sk_buff *skb;
+ int ret = 0;
+
+ skb = rtw_beacon_get(hw, vif);
+ if (!skb) {
+ rtw_err(rtwdev, "failed to get beacon skb\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = rtw_download_drv_rsvd_page(rtwdev, skb->data, skb->len);
+ if (ret)
+ rtw_err(rtwdev, "failed to download drv rsvd page\n");
+
+ dev_kfree_skb(skb);
+
+out:
+ return ret;
+}
+
+int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev, struct ieee80211_vif *vif)
+{
+ u8 *buf;
+ u32 size;
+ int ret;
+
+ buf = rtw_build_rsvd_page(rtwdev, vif, &size);
+ if (!buf) {
+ rtw_err(rtwdev, "failed to build rsvd page pkt\n");
+ return -ENOMEM;
+ }
+
+ ret = rtw_download_drv_rsvd_page(rtwdev, buf, size);
+ if (ret) {
+ rtw_err(rtwdev, "failed to download drv rsvd page\n");
+ goto free;
+ }
+
+ ret = rtw_download_beacon(rtwdev, vif);
+ if (ret) {
+ rtw_err(rtwdev, "failed to download beacon\n");
+ goto free;
+ }
+
+free:
+ kfree(buf);
+
+ return ret;
+}
+
+int rtw_dump_drv_rsvd_page(struct rtw_dev *rtwdev,
+ u32 offset, u32 size, u32 *buf)
+{
+ struct rtw_fifo_conf *fifo = &rtwdev->fifo;
+ u32 residue, i;
+ u16 start_pg;
+ u16 idx = 0;
+ u16 ctl;
+ u8 rcr;
+
+ if (size & 0x3) {
+ rtw_warn(rtwdev, "should be 4-byte aligned\n");
+ return -EINVAL;
+ }
+
+ offset += fifo->rsvd_boundary << TX_PAGE_SIZE_SHIFT;
+ residue = offset & (FIFO_PAGE_SIZE - 1);
+ start_pg = offset >> FIFO_PAGE_SIZE_SHIFT;
+ start_pg += RSVD_PAGE_START_ADDR;
+
+ rcr = rtw_read8(rtwdev, REG_RCR + 2);
+ ctl = rtw_read16(rtwdev, REG_PKTBUF_DBG_CTRL) & 0xf000;
+
+ /* disable rx clock gate */
+ rtw_write8(rtwdev, REG_RCR, rcr | BIT(3));
+
+ do {
+ rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, start_pg | ctl);
+
+ for (i = FIFO_DUMP_ADDR + residue;
+ i < FIFO_DUMP_ADDR + FIFO_PAGE_SIZE; i += 4) {
+ buf[idx++] = rtw_read32(rtwdev, i);
+ size -= 4;
+ if (size == 0)
+ goto out;
+ }
+
+ residue = 0;
+ start_pg++;
+ } while (size);
+
+out:
+ rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, ctl);
+ rtw_write8(rtwdev, REG_RCR + 2, rcr);
+ return 0;
+}
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
new file mode 100644
index 000000000000..703466393ecb
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -0,0 +1,222 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_FW_H_
+#define __RTW_FW_H_
+
+#define H2C_PKT_SIZE 32
+#define H2C_PKT_HDR_SIZE 8
+
+/* FW bin information */
+#define FW_HDR_SIZE 64
+#define FW_HDR_CHKSUM_SIZE 8
+#define FW_HDR_VERSION 4
+#define FW_HDR_SUBVERSION 6
+#define FW_HDR_SUBINDEX 7
+#define FW_HDR_MONTH 16
+#define FW_HDR_DATE 17
+#define FW_HDR_HOUR 18
+#define FW_HDR_MIN 19
+#define FW_HDR_YEAR 20
+#define FW_HDR_MEM_USAGE 24
+#define FW_HDR_H2C_FMT_VER 28
+#define FW_HDR_DMEM_ADDR 32
+#define FW_HDR_DMEM_SIZE 36
+#define FW_HDR_IMEM_SIZE 48
+#define FW_HDR_EMEM_SIZE 52
+#define FW_HDR_EMEM_ADDR 56
+#define FW_HDR_IMEM_ADDR 60
+
+#define FIFO_PAGE_SIZE_SHIFT 12
+#define FIFO_PAGE_SIZE 4096
+#define RSVD_PAGE_START_ADDR 0x780
+#define FIFO_DUMP_ADDR 0x8000
+
+enum rtw_c2h_cmd_id {
+ C2H_BT_INFO = 0x09,
+ C2H_HW_FEATURE_REPORT = 0x19,
+ C2H_HW_FEATURE_DUMP = 0xfd,
+ C2H_HALMAC = 0xff,
+};
+
+enum rtw_c2h_cmd_id_ext {
+ C2H_CCX_RPT = 0x0f,
+};
+
+struct rtw_c2h_cmd {
+ u8 id;
+ u8 seq;
+ u8 payload[0];
+} __packed;
+
+enum rtw_rsvd_packet_type {
+ RSVD_BEACON,
+ RSVD_PS_POLL,
+ RSVD_PROBE_RESP,
+ RSVD_NULL,
+ RSVD_QOS_NULL,
+};
+
+enum rtw_fw_rf_type {
+ FW_RF_1T2R = 0,
+ FW_RF_2T4R = 1,
+ FW_RF_2T2R = 2,
+ FW_RF_2T3R = 3,
+ FW_RF_1T1R = 4,
+ FW_RF_2T2R_GREEN = 5,
+ FW_RF_3T3R = 6,
+ FW_RF_3T4R = 7,
+ FW_RF_4T4R = 8,
+ FW_RF_MAX_TYPE = 0xF,
+};
+
+struct rtw_iqk_para {
+ u8 clear;
+ u8 segment_iqk;
+};
+
+struct rtw_rsvd_page {
+ struct list_head list;
+ struct sk_buff *skb;
+ enum rtw_rsvd_packet_type type;
+ u8 page;
+ bool add_txdesc;
+};
+
+/* C2H */
+#define GET_CCX_REPORT_SEQNUM(c2h_payload) (c2h_payload[8] & 0xfc)
+#define GET_CCX_REPORT_STATUS(c2h_payload) (c2h_payload[9] & 0xc0)
+
+/* PKT H2C */
+#define H2C_PKT_CMD_ID 0xFF
+#define H2C_PKT_CATEGORY 0x01
+
+#define H2C_PKT_GENERAL_INFO 0x0D
+#define H2C_PKT_PHYDM_INFO 0x11
+#define H2C_PKT_IQK 0x0E
+
+#define SET_PKT_H2C_CATEGORY(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(6, 0))
+#define SET_PKT_H2C_CMD_ID(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_PKT_H2C_SUB_CMD_ID(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 16))
+#define SET_PKT_H2C_TOTAL_LEN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 0))
+
+static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
+{
+ SET_PKT_H2C_CATEGORY(h2c_pkt, H2C_PKT_CATEGORY);
+ SET_PKT_H2C_CMD_ID(h2c_pkt, H2C_PKT_CMD_ID);
+ SET_PKT_H2C_SUB_CMD_ID(h2c_pkt, sub_id);
+}
+
+#define FW_OFFLOAD_H2C_SET_SEQ_NUM(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(31, 16))
+#define GENERAL_INFO_SET_FW_TX_BOUNDARY(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(23, 16))
+
+#define PHYDM_INFO_SET_REF_TYPE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(7, 0))
+#define PHYDM_INFO_SET_RF_TYPE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(15, 8))
+#define PHYDM_INFO_SET_CUT_VER(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(23, 16))
+#define PHYDM_INFO_SET_RX_ANT_STATUS(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(27, 24))
+#define PHYDM_INFO_SET_TX_ANT_STATUS(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, GENMASK(31, 28))
+#define IQK_SET_CLEAR(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(0))
+#define IQK_SET_SEGMENT_IQK(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x02, value, BIT(1))
+
+/* Command H2C */
+#define H2C_CMD_RSVD_PAGE 0x0
+#define H2C_CMD_MEDIA_STATUS_RPT 0x01
+#define H2C_CMD_SET_PWR_MODE 0x20
+#define H2C_CMD_RA_INFO 0x40
+#define H2C_CMD_RSSI_MONITOR 0x42
+
+#define SET_H2C_CMD_ID_CLASS(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(7, 0))
+
+#define MEDIA_STATUS_RPT_SET_OP_MODE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define MEDIA_STATUS_RPT_SET_MACID(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+
+#define SET_PWR_MODE_SET_MODE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(14, 8))
+#define SET_PWR_MODE_SET_RLBM(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(19, 16))
+#define SET_PWR_MODE_SET_SMART_PS(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 20))
+#define SET_PWR_MODE_SET_AWAKE_INTERVAL(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+#define SET_PWR_MODE_SET_PORT_ID(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 5))
+#define SET_PWR_MODE_SET_PWR_STATE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8))
+#define SET_RSSI_INFO_MACID(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_RSSI_INFO_RSSI(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+#define SET_RSSI_INFO_STBC(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, BIT(1))
+#define SET_RA_INFO_MACID(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 8))
+#define SET_RA_INFO_RATE_ID(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(20, 16))
+#define SET_RA_INFO_INIT_RA_LVL(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(22, 21))
+#define SET_RA_INFO_SGI_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(23))
+#define SET_RA_INFO_BW_MODE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(25, 24))
+#define SET_RA_INFO_LDPC(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(26))
+#define SET_RA_INFO_NO_UPDATE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(27))
+#define SET_RA_INFO_VHT_EN(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(29, 28))
+#define SET_RA_INFO_DIS_PT(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(30))
+#define SET_RA_INFO_RA_MASK0(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0))
+#define SET_RA_INFO_RA_MASK1(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(15, 8))
+#define SET_RA_INFO_RA_MASK2(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(23, 16))
+#define SET_RA_INFO_RA_MASK3(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(31, 24))
+
+static inline struct rtw_c2h_cmd *get_c2h_from_skb(struct sk_buff *skb)
+{
+ u32 pkt_offset;
+
+ pkt_offset = *((u32 *)skb->cb);
+ return (struct rtw_c2h_cmd *)(skb->data + pkt_offset);
+}
+
+void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb);
+void rtw_fw_send_general_info(struct rtw_dev *rtwdev);
+void rtw_fw_send_phydm_info(struct rtw_dev *rtwdev);
+
+void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para);
+void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev);
+void rtw_fw_send_rssi_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
+void rtw_fw_send_ra_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
+void rtw_fw_media_status_report(struct rtw_dev *rtwdev, u8 mac_id, bool conn);
+void rtw_add_rsvd_page(struct rtw_dev *rtwdev, enum rtw_rsvd_packet_type type,
+ bool txdesc);
+int rtw_fw_write_data_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
+ u8 *buf, u32 size);
+void rtw_reset_rsvd_page(struct rtw_dev *rtwdev);
+int rtw_fw_download_rsvd_page(struct rtw_dev *rtwdev,
+ struct ieee80211_vif *vif);
+void rtw_send_rsvd_page_h2c(struct rtw_dev *rtwdev);
+int rtw_dump_drv_rsvd_page(struct rtw_dev *rtwdev,
+ u32 offset, u32 size, u32 *buf);
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/hci.h b/drivers/net/wireless/realtek/rtw88/hci.h
new file mode 100644
index 000000000000..2676582a85a0
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/hci.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_HCI_H__
+#define __RTW_HCI_H__
+
+/* ops for PCI, USB and SDIO */
+struct rtw_hci_ops {
+ int (*tx)(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct sk_buff *skb);
+ int (*setup)(struct rtw_dev *rtwdev);
+ int (*start)(struct rtw_dev *rtwdev);
+ void (*stop)(struct rtw_dev *rtwdev);
+
+ int (*write_data_rsvd_page)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
+ int (*write_data_h2c)(struct rtw_dev *rtwdev, u8 *buf, u32 size);
+
+ u8 (*read8)(struct rtw_dev *rtwdev, u32 addr);
+ u16 (*read16)(struct rtw_dev *rtwdev, u32 addr);
+ u32 (*read32)(struct rtw_dev *rtwdev, u32 addr);
+ void (*write8)(struct rtw_dev *rtwdev, u32 addr, u8 val);
+ void (*write16)(struct rtw_dev *rtwdev, u32 addr, u16 val);
+ void (*write32)(struct rtw_dev *rtwdev, u32 addr, u32 val);
+};
+
+static inline int rtw_hci_tx(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct sk_buff *skb)
+{
+ return rtwdev->hci.ops->tx(rtwdev, pkt_info, skb);
+}
+
+static inline int rtw_hci_setup(struct rtw_dev *rtwdev)
+{
+ return rtwdev->hci.ops->setup(rtwdev);
+}
+
+static inline int rtw_hci_start(struct rtw_dev *rtwdev)
+{
+ return rtwdev->hci.ops->start(rtwdev);
+}
+
+static inline void rtw_hci_stop(struct rtw_dev *rtwdev)
+{
+ rtwdev->hci.ops->stop(rtwdev);
+}
+
+static inline int
+rtw_hci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf, u32 size)
+{
+ return rtwdev->hci.ops->write_data_rsvd_page(rtwdev, buf, size);
+}
+
+static inline int
+rtw_hci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
+{
+ return rtwdev->hci.ops->write_data_h2c(rtwdev, buf, size);
+}
+
+static inline u8 rtw_read8(struct rtw_dev *rtwdev, u32 addr)
+{
+ return rtwdev->hci.ops->read8(rtwdev, addr);
+}
+
+static inline u16 rtw_read16(struct rtw_dev *rtwdev, u32 addr)
+{
+ return rtwdev->hci.ops->read16(rtwdev, addr);
+}
+
+static inline u32 rtw_read32(struct rtw_dev *rtwdev, u32 addr)
+{
+ return rtwdev->hci.ops->read32(rtwdev, addr);
+}
+
+static inline void rtw_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
+{
+ rtwdev->hci.ops->write8(rtwdev, addr, val);
+}
+
+static inline void rtw_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
+{
+ rtwdev->hci.ops->write16(rtwdev, addr, val);
+}
+
+static inline void rtw_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
+{
+ rtwdev->hci.ops->write32(rtwdev, addr, val);
+}
+
+static inline void rtw_write8_set(struct rtw_dev *rtwdev, u32 addr, u8 bit)
+{
+ u8 val;
+
+ val = rtw_read8(rtwdev, addr);
+ rtw_write8(rtwdev, addr, val | bit);
+}
+
+static inline void rtw_writ16_set(struct rtw_dev *rtwdev, u32 addr, u16 bit)
+{
+ u16 val;
+
+ val = rtw_read16(rtwdev, addr);
+ rtw_write16(rtwdev, addr, val | bit);
+}
+
+static inline void rtw_write32_set(struct rtw_dev *rtwdev, u32 addr, u32 bit)
+{
+ u32 val;
+
+ val = rtw_read32(rtwdev, addr);
+ rtw_write32(rtwdev, addr, val | bit);
+}
+
+static inline void rtw_write8_clr(struct rtw_dev *rtwdev, u32 addr, u8 bit)
+{
+ u8 val;
+
+ val = rtw_read8(rtwdev, addr);
+ rtw_write8(rtwdev, addr, val & ~bit);
+}
+
+static inline void rtw_write16_clr(struct rtw_dev *rtwdev, u32 addr, u16 bit)
+{
+ u16 val;
+
+ val = rtw_read16(rtwdev, addr);
+ rtw_write16(rtwdev, addr, val & ~bit);
+}
+
+static inline void rtw_write32_clr(struct rtw_dev *rtwdev, u32 addr, u32 bit)
+{
+ u32 val;
+
+ val = rtw_read32(rtwdev, addr);
+ rtw_write32(rtwdev, addr, val & ~bit);
+}
+
+static inline u32
+rtw_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&rtwdev->rf_lock, flags);
+ val = rtwdev->chip->ops->read_rf(rtwdev, rf_path, addr, mask);
+ spin_unlock_irqrestore(&rtwdev->rf_lock, flags);
+
+ return val;
+}
+
+static inline void
+rtw_write_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask, u32 data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtwdev->rf_lock, flags);
+ rtwdev->chip->ops->write_rf(rtwdev, rf_path, addr, mask, data);
+ spin_unlock_irqrestore(&rtwdev->rf_lock, flags);
+}
+
+static inline u32
+rtw_read32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask)
+{
+ u32 shift = __ffs(mask);
+ u32 orig;
+ u32 ret;
+
+ orig = rtw_read32(rtwdev, addr);
+ ret = (orig & mask) >> shift;
+
+ return ret;
+}
+
+static inline void
+rtw_write32_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
+{
+ u32 shift = __ffs(mask);
+ u32 orig;
+ u32 set;
+
+ WARN(addr & 0x3, "should be 4-byte aligned, addr = 0x%08x\n", addr);
+
+ orig = rtw_read32(rtwdev, addr);
+ set = (orig & ~mask) | ((data << shift) & mask);
+ rtw_write32(rtwdev, addr, set);
+}
+
+static inline void
+rtw_write8_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u8 data)
+{
+ u32 shift;
+ u8 orig, set;
+
+ mask &= 0xff;
+ shift = __ffs(mask);
+
+ orig = rtw_read8(rtwdev, addr);
+ set = (orig & ~mask) | ((data << shift) & mask);
+ rtw_write8(rtwdev, addr, set);
+}
+
+static inline enum rtw_hci_type rtw_hci_type(struct rtw_dev *rtwdev)
+{
+ return rtwdev->hci.type;
+}
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
new file mode 100644
index 000000000000..25a923bc6366
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -0,0 +1,965 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "mac.h"
+#include "reg.h"
+#include "fw.h"
+#include "debug.h"
+
+void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ u8 primary_ch_idx)
+{
+ u8 txsc40 = 0, txsc20 = 0;
+ u32 value32;
+ u8 value8;
+
+ txsc20 = primary_ch_idx;
+ if (txsc20 == 1 || txsc20 == 3)
+ txsc40 = 9;
+ else
+ txsc40 = 10;
+ rtw_write8(rtwdev, REG_DATA_SC,
+ BIT_TXSC_20M(txsc20) | BIT_TXSC_40M(txsc40));
+
+ value32 = rtw_read32(rtwdev, REG_WMAC_TRXPTCL_CTL);
+ value32 &= ~BIT_RFMOD;
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_80:
+ value32 |= BIT_RFMOD_80M;
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ value32 |= BIT_RFMOD_40M;
+ break;
+ case RTW_CHANNEL_WIDTH_20:
+ default:
+ break;
+ }
+ rtw_write32(rtwdev, REG_WMAC_TRXPTCL_CTL, value32);
+
+ value32 = rtw_read32(rtwdev, REG_AFE_CTRL1) & ~(BIT_MAC_CLK_SEL);
+ value32 |= (MAC_CLK_HW_DEF_80M << BIT_SHIFT_MAC_CLK_SEL);
+ rtw_write32(rtwdev, REG_AFE_CTRL1, value32);
+
+ rtw_write8(rtwdev, REG_USTIME_TSF, MAC_CLK_SPEED);
+ rtw_write8(rtwdev, REG_USTIME_EDCA, MAC_CLK_SPEED);
+
+ value8 = rtw_read8(rtwdev, REG_CCK_CHECK);
+ value8 = value8 & ~BIT_CHECK_CCK_EN;
+ if (channel > 35)
+ value8 |= BIT_CHECK_CCK_EN;
+ rtw_write8(rtwdev, REG_CCK_CHECK, value8);
+}
+
+static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
+{
+ u32 value32;
+ u8 value8;
+
+ rtw_write8(rtwdev, REG_RSV_CTRL, 0);
+
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_PCIE:
+ rtw_write32_set(rtwdev, REG_HCI_OPT_CTRL, BIT_BT_DIG_CLK_EN);
+ break;
+ case RTW_HCI_TYPE_USB:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* config PIN Mux */
+ value32 = rtw_read32(rtwdev, REG_PAD_CTRL1);
+ value32 |= BIT_PAPE_WLBT_SEL | BIT_LNAON_WLBT_SEL;
+ rtw_write32(rtwdev, REG_PAD_CTRL1, value32);
+
+ value32 = rtw_read32(rtwdev, REG_LED_CFG);
+ value32 &= ~(BIT_PAPE_SEL_EN | BIT_LNAON_SEL_EN);
+ rtw_write32(rtwdev, REG_LED_CFG, value32);
+
+ value32 = rtw_read32(rtwdev, REG_GPIO_MUXCFG);
+ value32 |= BIT_WLRFE_4_5_EN;
+ rtw_write32(rtwdev, REG_GPIO_MUXCFG, value32);
+
+ /* disable BB/RF */
+ value8 = rtw_read8(rtwdev, REG_SYS_FUNC_EN);
+ value8 &= ~(BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST);
+ rtw_write8(rtwdev, REG_SYS_FUNC_EN, value8);
+
+ value8 = rtw_read8(rtwdev, REG_RF_CTRL);
+ value8 &= ~(BIT_RF_SDM_RSTB | BIT_RF_RSTB | BIT_RF_EN);
+ rtw_write8(rtwdev, REG_RF_CTRL, value8);
+
+ value32 = rtw_read32(rtwdev, REG_WLRF1);
+ value32 &= ~BIT_WLRF1_BBRF_EN;
+ rtw_write32(rtwdev, REG_WLRF1, value32);
+
+ return 0;
+}
+
+static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev,
+ struct rtw_pwr_seq_cmd *cmd)
+{
+ u8 value;
+ u8 flag = 0;
+ u32 offset;
+ u32 cnt = RTW_PWR_POLLING_CNT;
+
+ if (cmd->base == RTW_PWR_ADDR_SDIO)
+ offset = cmd->offset | SDIO_LOCAL_OFFSET;
+ else
+ offset = cmd->offset;
+
+ do {
+ cnt--;
+ value = rtw_read8(rtwdev, offset);
+ value &= cmd->mask;
+ if (value == (cmd->value & cmd->mask))
+ return 0;
+ if (cnt == 0) {
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_PCIE &&
+ flag == 0) {
+ value = rtw_read8(rtwdev, REG_SYS_PW_CTRL);
+ value |= BIT(3);
+ rtw_write8(rtwdev, REG_SYS_PW_CTRL, value);
+ value &= ~BIT(3);
+ rtw_write8(rtwdev, REG_SYS_PW_CTRL, value);
+ cnt = RTW_PWR_POLLING_CNT;
+ flag = 1;
+ } else {
+ return -EBUSY;
+ }
+ } else {
+ udelay(50);
+ }
+ } while (1);
+}
+
+static int rtw_sub_pwr_seq_parser(struct rtw_dev *rtwdev, u8 intf_mask,
+ u8 cut_mask, struct rtw_pwr_seq_cmd *cmd)
+{
+ struct rtw_pwr_seq_cmd *cur_cmd;
+ u32 offset;
+ u8 value;
+
+ for (cur_cmd = cmd; cur_cmd->cmd != RTW_PWR_CMD_END; cur_cmd++) {
+ if (!(cur_cmd->intf_mask & intf_mask) ||
+ !(cur_cmd->cut_mask & cut_mask))
+ continue;
+
+ switch (cur_cmd->cmd) {
+ case RTW_PWR_CMD_WRITE:
+ offset = cur_cmd->offset;
+
+ if (cur_cmd->base == RTW_PWR_ADDR_SDIO)
+ offset |= SDIO_LOCAL_OFFSET;
+
+ value = rtw_read8(rtwdev, offset);
+ value &= ~cur_cmd->mask;
+ value |= (cur_cmd->value & cur_cmd->mask);
+ rtw_write8(rtwdev, offset, value);
+ break;
+ case RTW_PWR_CMD_POLLING:
+ if (rtw_pwr_cmd_polling(rtwdev, cur_cmd))
+ return -EBUSY;
+ break;
+ case RTW_PWR_CMD_DELAY:
+ if (cur_cmd->value == RTW_PWR_DELAY_US)
+ udelay(cur_cmd->offset);
+ else
+ mdelay(cur_cmd->offset);
+ break;
+ case RTW_PWR_CMD_READ:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
+ struct rtw_pwr_seq_cmd **cmd_seq)
+{
+ u8 cut_mask;
+ u8 intf_mask;
+ u8 cut;
+ u32 idx = 0;
+ struct rtw_pwr_seq_cmd *cmd;
+ int ret;
+
+ cut = rtwdev->hal.cut_version;
+ cut_mask = cut_version_to_mask(cut);
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_PCIE:
+ intf_mask = BIT(2);
+ break;
+ case RTW_HCI_TYPE_USB:
+ intf_mask = BIT(1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ do {
+ cmd = cmd_seq[idx];
+ if (!cmd)
+ break;
+
+ ret = rtw_sub_pwr_seq_parser(rtwdev, intf_mask, cut_mask, cmd);
+ if (ret)
+ return -EBUSY;
+
+ idx++;
+ } while (1);
+
+ return 0;
+}
+
+static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_pwr_seq_cmd **pwr_seq;
+ u8 rpwm;
+ bool cur_pwr;
+
+ rpwm = rtw_read8(rtwdev, rtwdev->hci.rpwm_addr);
+
+ /* Check FW still exist or not */
+ if (rtw_read16(rtwdev, REG_MCUFW_CTRL) == 0xC078) {
+ rpwm = (rpwm ^ BIT_RPWM_TOGGLE) & BIT_RPWM_TOGGLE;
+ rtw_write8(rtwdev, rtwdev->hci.rpwm_addr, rpwm);
+ }
+
+ if (rtw_read8(rtwdev, REG_CR) == 0xea)
+ cur_pwr = false;
+ else if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
+ (rtw_read8(rtwdev, REG_SYS_STATUS1 + 1) & BIT(0)))
+ cur_pwr = false;
+ else
+ cur_pwr = true;
+
+ if (pwr_on && cur_pwr)
+ return -EALREADY;
+
+ pwr_seq = pwr_on ? chip->pwr_on_seq : chip->pwr_off_seq;
+ if (rtw_pwr_seq_parser(rtwdev, pwr_seq))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int rtw_mac_init_system_cfg(struct rtw_dev *rtwdev)
+{
+ u8 sys_func_en = rtwdev->chip->sys_func_en;
+ u8 value8;
+ u32 value, tmp;
+
+ value = rtw_read32(rtwdev, REG_CPU_DMEM_CON);
+ value |= BIT_WL_PLATFORM_RST | BIT_DDMA_EN;
+ rtw_write32(rtwdev, REG_CPU_DMEM_CON, value);
+
+ rtw_write8(rtwdev, REG_SYS_FUNC_EN + 1, sys_func_en);
+ value8 = (rtw_read8(rtwdev, REG_CR_EXT + 3) & 0xF0) | 0x0C;
+ rtw_write8(rtwdev, REG_CR_EXT + 3, value8);
+
+ /* disable boot-from-flash for driver's DL FW */
+ tmp = rtw_read32(rtwdev, REG_MCUFW_CTRL);
+ if (tmp & BIT_BOOT_FSPI_EN) {
+ rtw_write32(rtwdev, REG_MCUFW_CTRL, tmp & (~BIT_BOOT_FSPI_EN));
+ value = rtw_read32(rtwdev, REG_GPIO_MUXCFG) & (~BIT_FSPI_EN);
+ rtw_write32(rtwdev, REG_GPIO_MUXCFG, value);
+ }
+
+ return 0;
+}
+
+int rtw_mac_power_on(struct rtw_dev *rtwdev)
+{
+ int ret = 0;
+
+ ret = rtw_mac_pre_system_cfg(rtwdev);
+ if (ret)
+ goto err;
+
+ ret = rtw_mac_power_switch(rtwdev, true);
+ if (ret)
+ goto err;
+
+ ret = rtw_mac_init_system_cfg(rtwdev);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ rtw_err(rtwdev, "mac power on failed");
+ return ret;
+}
+
+void rtw_mac_power_off(struct rtw_dev *rtwdev)
+{
+ rtw_mac_power_switch(rtwdev, false);
+}
+
+static bool check_firmware_size(const u8 *data, u32 size)
+{
+ u32 dmem_size;
+ u32 imem_size;
+ u32 emem_size;
+ u32 real_size;
+
+ dmem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_DMEM_SIZE)));
+ imem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_IMEM_SIZE)));
+ emem_size = ((*(data + FW_HDR_MEM_USAGE)) & BIT(4)) ?
+ le32_to_cpu(*((__le32 *)(data + FW_HDR_EMEM_SIZE))) : 0;
+
+ dmem_size += FW_HDR_CHKSUM_SIZE;
+ imem_size += FW_HDR_CHKSUM_SIZE;
+ emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
+ real_size = FW_HDR_SIZE + dmem_size + imem_size + emem_size;
+ if (real_size != size)
+ return false;
+
+ return true;
+}
+
+static void wlan_cpu_enable(struct rtw_dev *rtwdev, bool enable)
+{
+ if (enable) {
+ /* cpu io interface enable */
+ rtw_write8_set(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF);
+
+ /* cpu enable */
+ rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
+ } else {
+ /* cpu io interface disable */
+ rtw_write8_clr(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
+
+ /* cpu disable */
+ rtw_write8_clr(rtwdev, REG_RSV_CTRL + 1, BIT_WLMCU_IOIF);
+ }
+}
+
+#define DLFW_RESTORE_REG_NUM 6
+
+static void download_firmware_reg_backup(struct rtw_dev *rtwdev,
+ struct rtw_backup_info *bckp)
+{
+ u8 tmp;
+ u8 bckp_idx = 0;
+
+ /* set HIQ to hi priority */
+ bckp[bckp_idx].len = 1;
+ bckp[bckp_idx].reg = REG_TXDMA_PQ_MAP + 1;
+ bckp[bckp_idx].val = rtw_read8(rtwdev, REG_TXDMA_PQ_MAP + 1);
+ bckp_idx++;
+ tmp = RTW_DMA_MAPPING_HIGH << 6;
+ rtw_write8(rtwdev, REG_TXDMA_PQ_MAP + 1, tmp);
+
+ /* DLFW only use HIQ, map HIQ to hi priority */
+ bckp[bckp_idx].len = 1;
+ bckp[bckp_idx].reg = REG_CR;
+ bckp[bckp_idx].val = rtw_read8(rtwdev, REG_CR);
+ bckp_idx++;
+ bckp[bckp_idx].len = 4;
+ bckp[bckp_idx].reg = REG_H2CQ_CSR;
+ bckp[bckp_idx].val = BIT_H2CQ_FULL;
+ bckp_idx++;
+ tmp = BIT_HCI_TXDMA_EN | BIT_TXDMA_EN;
+ rtw_write8(rtwdev, REG_CR, tmp);
+ rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
+
+ /* Config hi priority queue and public priority queue page number */
+ bckp[bckp_idx].len = 2;
+ bckp[bckp_idx].reg = REG_FIFOPAGE_INFO_1;
+ bckp[bckp_idx].val = rtw_read16(rtwdev, REG_FIFOPAGE_INFO_1);
+ bckp_idx++;
+ bckp[bckp_idx].len = 4;
+ bckp[bckp_idx].reg = REG_RQPN_CTRL_2;
+ bckp[bckp_idx].val = rtw_read32(rtwdev, REG_RQPN_CTRL_2) | BIT_LD_RQPN;
+ bckp_idx++;
+ rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, 0x200);
+ rtw_write32(rtwdev, REG_RQPN_CTRL_2, bckp[bckp_idx - 1].val);
+
+ /* Disable beacon related functions */
+ tmp = rtw_read8(rtwdev, REG_BCN_CTRL);
+ bckp[bckp_idx].len = 1;
+ bckp[bckp_idx].reg = REG_BCN_CTRL;
+ bckp[bckp_idx].val = tmp;
+ bckp_idx++;
+ tmp = (u8)((tmp & (~BIT_EN_BCN_FUNCTION)) | BIT_DIS_TSF_UDT);
+ rtw_write8(rtwdev, REG_BCN_CTRL, tmp);
+
+ WARN(bckp_idx != DLFW_RESTORE_REG_NUM, "wrong backup number\n");
+}
+
+static void download_firmware_reset_platform(struct rtw_dev *rtwdev)
+{
+ rtw_write8_clr(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16);
+ rtw_write8_clr(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8);
+ rtw_write8_set(rtwdev, REG_CPU_DMEM_CON + 2, BIT_WL_PLATFORM_RST >> 16);
+ rtw_write8_set(rtwdev, REG_SYS_CLK_CTRL + 1, BIT_CPU_CLK_EN >> 8);
+}
+
+static void download_firmware_reg_restore(struct rtw_dev *rtwdev,
+ struct rtw_backup_info *bckp,
+ u8 bckp_num)
+{
+ rtw_restore_reg(rtwdev, bckp, bckp_num);
+}
+
+#define TX_DESC_SIZE 48
+
+static int send_firmware_pkt_rsvd_page(struct rtw_dev *rtwdev, u16 pg_addr,
+ const u8 *data, u32 size)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmemdup(data, size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = rtw_fw_write_data_rsvd_page(rtwdev, pg_addr, buf, size);
+ kfree(buf);
+ return ret;
+}
+
+static int
+send_firmware_pkt(struct rtw_dev *rtwdev, u16 pg_addr, const u8 *data, u32 size)
+{
+ int ret;
+
+ if (rtw_hci_type(rtwdev) == RTW_HCI_TYPE_USB &&
+ !((size + TX_DESC_SIZE) & (512 - 1)))
+ size += 1;
+
+ ret = send_firmware_pkt_rsvd_page(rtwdev, pg_addr, data, size);
+ if (ret)
+ rtw_err(rtwdev, "failed to download rsvd page\n");
+
+ return ret;
+}
+
+static int
+iddma_enable(struct rtw_dev *rtwdev, u32 src, u32 dst, u32 ctrl)
+{
+ rtw_write32(rtwdev, REG_DDMA_CH0SA, src);
+ rtw_write32(rtwdev, REG_DDMA_CH0DA, dst);
+ rtw_write32(rtwdev, REG_DDMA_CH0CTRL, ctrl);
+
+ if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int iddma_download_firmware(struct rtw_dev *rtwdev, u32 src, u32 dst,
+ u32 len, u8 first)
+{
+ u32 ch0_ctrl = BIT_DDMACH0_CHKSUM_EN | BIT_DDMACH0_OWN;
+
+ if (!check_hw_ready(rtwdev, REG_DDMA_CH0CTRL, BIT_DDMACH0_OWN, 0))
+ return -EBUSY;
+
+ ch0_ctrl |= len & BIT_MASK_DDMACH0_DLEN;
+ if (!first)
+ ch0_ctrl |= BIT_DDMACH0_CHKSUM_CONT;
+
+ if (iddma_enable(rtwdev, src, dst, ch0_ctrl))
+ return -EBUSY;
+
+ return 0;
+}
+
+static bool
+check_fw_checksum(struct rtw_dev *rtwdev, u32 addr)
+{
+ u8 fw_ctrl;
+
+ fw_ctrl = rtw_read8(rtwdev, REG_MCUFW_CTRL);
+
+ if (rtw_read32(rtwdev, REG_DDMA_CH0CTRL) & BIT_DDMACH0_CHKSUM_STS) {
+ if (addr < OCPBASE_DMEM_88XX) {
+ fw_ctrl |= BIT_IMEM_DW_OK;
+ fw_ctrl &= ~BIT_IMEM_CHKSUM_OK;
+ rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
+ } else {
+ fw_ctrl |= BIT_DMEM_DW_OK;
+ fw_ctrl &= ~BIT_DMEM_CHKSUM_OK;
+ rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
+ }
+
+ rtw_err(rtwdev, "invalid fw checksum\n");
+
+ return false;
+ }
+
+ if (addr < OCPBASE_DMEM_88XX) {
+ fw_ctrl |= (BIT_IMEM_DW_OK | BIT_IMEM_CHKSUM_OK);
+ rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
+ } else {
+ fw_ctrl |= (BIT_DMEM_DW_OK | BIT_DMEM_CHKSUM_OK);
+ rtw_write8(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
+ }
+
+ return true;
+}
+
+static int
+download_firmware_to_mem(struct rtw_dev *rtwdev, const u8 *data,
+ u32 src, u32 dst, u32 size)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u32 desc_size = chip->tx_pkt_desc_sz;
+ u8 first_part;
+ u32 mem_offset;
+ u32 residue_size;
+ u32 pkt_size;
+ u32 max_size = 0x1000;
+ u32 val;
+ int ret;
+
+ mem_offset = 0;
+ first_part = 1;
+ residue_size = size;
+
+ val = rtw_read32(rtwdev, REG_DDMA_CH0CTRL);
+ val |= BIT_DDMACH0_RESET_CHKSUM_STS;
+ rtw_write32(rtwdev, REG_DDMA_CH0CTRL, val);
+
+ while (residue_size) {
+ if (residue_size >= max_size)
+ pkt_size = max_size;
+ else
+ pkt_size = residue_size;
+
+ ret = send_firmware_pkt(rtwdev, (u16)(src >> 7),
+ data + mem_offset, pkt_size);
+ if (ret)
+ return ret;
+
+ ret = iddma_download_firmware(rtwdev, OCPBASE_TXBUF_88XX +
+ src + desc_size,
+ dst + mem_offset, pkt_size,
+ first_part);
+ if (ret)
+ return ret;
+
+ first_part = 0;
+ mem_offset += pkt_size;
+ residue_size -= pkt_size;
+ }
+
+ if (!check_fw_checksum(rtwdev, dst))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void update_firmware_info(struct rtw_dev *rtwdev,
+ struct rtw_fw_state *fw)
+{
+ const u8 *data = fw->firmware->data;
+
+ fw->h2c_version =
+ le16_to_cpu(*((__le16 *)(data + FW_HDR_H2C_FMT_VER)));
+ fw->version =
+ le16_to_cpu(*((__le16 *)(data + FW_HDR_VERSION)));
+ fw->sub_version = *(data + FW_HDR_SUBVERSION);
+ fw->sub_index = *(data + FW_HDR_SUBINDEX);
+
+ rtw_dbg(rtwdev, RTW_DBG_FW, "fw h2c version: %x\n", fw->h2c_version);
+ rtw_dbg(rtwdev, RTW_DBG_FW, "fw version: %x\n", fw->version);
+ rtw_dbg(rtwdev, RTW_DBG_FW, "fw sub version: %x\n", fw->sub_version);
+ rtw_dbg(rtwdev, RTW_DBG_FW, "fw sub index: %x\n", fw->sub_index);
+}
+
+static int
+start_download_firmware(struct rtw_dev *rtwdev, const u8 *data, u32 size)
+{
+ const u8 *cur_fw;
+ u16 val;
+ u32 imem_size;
+ u32 dmem_size;
+ u32 emem_size;
+ u32 addr;
+ int ret;
+
+ dmem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_DMEM_SIZE)));
+ imem_size = le32_to_cpu(*((__le32 *)(data + FW_HDR_IMEM_SIZE)));
+ emem_size = ((*(data + FW_HDR_MEM_USAGE)) & BIT(4)) ?
+ le32_to_cpu(*((__le32 *)(data + FW_HDR_EMEM_SIZE))) : 0;
+ dmem_size += FW_HDR_CHKSUM_SIZE;
+ imem_size += FW_HDR_CHKSUM_SIZE;
+ emem_size += emem_size ? FW_HDR_CHKSUM_SIZE : 0;
+
+ val = (u16)(rtw_read16(rtwdev, REG_MCUFW_CTRL) & 0x3800);
+ val |= BIT_MCUFWDL_EN;
+ rtw_write16(rtwdev, REG_MCUFW_CTRL, val);
+
+ cur_fw = data + FW_HDR_SIZE;
+ addr = le32_to_cpu(*((__le32 *)(data + FW_HDR_DMEM_ADDR)));
+ addr &= ~BIT(31);
+ ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, dmem_size);
+ if (ret)
+ return ret;
+
+ cur_fw = data + FW_HDR_SIZE + dmem_size;
+ addr = le32_to_cpu(*((__le32 *)(data + FW_HDR_IMEM_ADDR)));
+ addr &= ~BIT(31);
+ ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr, imem_size);
+ if (ret)
+ return ret;
+
+ if (emem_size) {
+ cur_fw = data + FW_HDR_SIZE + dmem_size + imem_size;
+ addr = le32_to_cpu(*((__le32 *)(data + FW_HDR_EMEM_ADDR)));
+ addr &= ~BIT(31);
+ ret = download_firmware_to_mem(rtwdev, cur_fw, 0, addr,
+ emem_size);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int download_firmware_validate(struct rtw_dev *rtwdev)
+{
+ u32 fw_key;
+
+ if (!check_hw_ready(rtwdev, REG_MCUFW_CTRL, FW_READY_MASK, FW_READY)) {
+ fw_key = rtw_read32(rtwdev, REG_FW_DBG7) & FW_KEY_MASK;
+ if (fw_key == ILLEGAL_KEY_GROUP)
+ rtw_err(rtwdev, "invalid fw key\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void download_firmware_end_flow(struct rtw_dev *rtwdev)
+{
+ u16 fw_ctrl;
+
+ rtw_write32(rtwdev, REG_TXDMA_STATUS, BTI_PAGE_OVF);
+
+ /* Check IMEM & DMEM checksum is OK or not */
+ fw_ctrl = rtw_read16(rtwdev, REG_MCUFW_CTRL);
+ if ((fw_ctrl & BIT_CHECK_SUM_OK) != BIT_CHECK_SUM_OK)
+ return;
+
+ fw_ctrl = (fw_ctrl | BIT_FW_DW_RDY) & ~BIT_MCUFWDL_EN;
+ rtw_write16(rtwdev, REG_MCUFW_CTRL, fw_ctrl);
+}
+
+int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
+{
+ struct rtw_backup_info bckp[DLFW_RESTORE_REG_NUM];
+ const u8 *data = fw->firmware->data;
+ u32 size = fw->firmware->size;
+ u32 ltecoex_bckp;
+ int ret;
+
+ if (!check_firmware_size(data, size))
+ return -EINVAL;
+
+ if (!ltecoex_read_reg(rtwdev, 0x38, &ltecoex_bckp))
+ return -EBUSY;
+
+ wlan_cpu_enable(rtwdev, false);
+
+ download_firmware_reg_backup(rtwdev, bckp);
+ download_firmware_reset_platform(rtwdev);
+
+ ret = start_download_firmware(rtwdev, data, size);
+ if (ret)
+ goto dlfw_fail;
+
+ download_firmware_reg_restore(rtwdev, bckp, DLFW_RESTORE_REG_NUM);
+
+ download_firmware_end_flow(rtwdev);
+
+ wlan_cpu_enable(rtwdev, true);
+
+ if (!ltecoex_reg_write(rtwdev, 0x38, ltecoex_bckp))
+ return -EBUSY;
+
+ ret = download_firmware_validate(rtwdev);
+ if (ret)
+ goto dlfw_fail;
+
+ update_firmware_info(rtwdev, fw);
+
+ /* reset desc and index */
+ rtw_hci_setup(rtwdev);
+
+ rtwdev->h2c.last_box_num = 0;
+ rtwdev->h2c.seq = 0;
+
+ rtw_fw_send_general_info(rtwdev);
+ rtw_fw_send_phydm_info(rtwdev);
+
+ rtw_flag_set(rtwdev, RTW_FLAG_FW_RUNNING);
+
+ return 0;
+
+dlfw_fail:
+ /* Disable FWDL_EN */
+ rtw_write8_clr(rtwdev, REG_MCUFW_CTRL, BIT_MCUFWDL_EN);
+ rtw_write8_set(rtwdev, REG_SYS_FUNC_EN + 1, BIT_FEN_CPUEN);
+
+ return ret;
+}
+
+static int txdma_queue_mapping(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_rqpn *rqpn = NULL;
+ u16 txdma_pq_map = 0;
+
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_PCIE:
+ rqpn = &chip->rqpn_table[1];
+ break;
+ case RTW_HCI_TYPE_USB:
+ if (rtwdev->hci.bulkout_num == 2)
+ rqpn = &chip->rqpn_table[2];
+ else if (rtwdev->hci.bulkout_num == 3)
+ rqpn = &chip->rqpn_table[3];
+ else if (rtwdev->hci.bulkout_num == 4)
+ rqpn = &chip->rqpn_table[4];
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ txdma_pq_map |= BIT_TXDMA_HIQ_MAP(rqpn->dma_map_hi);
+ txdma_pq_map |= BIT_TXDMA_MGQ_MAP(rqpn->dma_map_mg);
+ txdma_pq_map |= BIT_TXDMA_BKQ_MAP(rqpn->dma_map_bk);
+ txdma_pq_map |= BIT_TXDMA_BEQ_MAP(rqpn->dma_map_be);
+ txdma_pq_map |= BIT_TXDMA_VIQ_MAP(rqpn->dma_map_vi);
+ txdma_pq_map |= BIT_TXDMA_VOQ_MAP(rqpn->dma_map_vo);
+ rtw_write16(rtwdev, REG_TXDMA_PQ_MAP, txdma_pq_map);
+
+ rtw_write8(rtwdev, REG_CR, 0);
+ rtw_write8(rtwdev, REG_CR, MAC_TRX_ENABLE);
+ rtw_write32(rtwdev, REG_H2CQ_CSR, BIT_H2CQ_FULL);
+
+ return 0;
+}
+
+static int set_trx_fifo_info(struct rtw_dev *rtwdev)
+{
+ struct rtw_fifo_conf *fifo = &rtwdev->fifo;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u16 cur_pg_addr;
+ u8 csi_buf_pg_num = chip->csi_buf_pg_num;
+
+ /* config rsvd page num */
+ fifo->rsvd_drv_pg_num = 8;
+ fifo->txff_pg_num = chip->txff_size >> 7;
+ fifo->rsvd_pg_num = fifo->rsvd_drv_pg_num +
+ RSVD_PG_H2C_EXTRAINFO_NUM +
+ RSVD_PG_H2C_STATICINFO_NUM +
+ RSVD_PG_H2CQ_NUM +
+ RSVD_PG_CPU_INSTRUCTION_NUM +
+ RSVD_PG_FW_TXBUF_NUM +
+ csi_buf_pg_num;
+
+ if (fifo->rsvd_pg_num > fifo->txff_pg_num)
+ return -ENOMEM;
+
+ fifo->acq_pg_num = fifo->txff_pg_num - fifo->rsvd_pg_num;
+ fifo->rsvd_boundary = fifo->txff_pg_num - fifo->rsvd_pg_num;
+
+ cur_pg_addr = fifo->txff_pg_num;
+ cur_pg_addr -= csi_buf_pg_num;
+ fifo->rsvd_csibuf_addr = cur_pg_addr;
+ cur_pg_addr -= RSVD_PG_FW_TXBUF_NUM;
+ fifo->rsvd_fw_txbuf_addr = cur_pg_addr;
+ cur_pg_addr -= RSVD_PG_CPU_INSTRUCTION_NUM;
+ fifo->rsvd_cpu_instr_addr = cur_pg_addr;
+ cur_pg_addr -= RSVD_PG_H2CQ_NUM;
+ fifo->rsvd_h2cq_addr = cur_pg_addr;
+ cur_pg_addr -= RSVD_PG_H2C_STATICINFO_NUM;
+ fifo->rsvd_h2c_sta_info_addr = cur_pg_addr;
+ cur_pg_addr -= RSVD_PG_H2C_EXTRAINFO_NUM;
+ fifo->rsvd_h2c_info_addr = cur_pg_addr;
+ cur_pg_addr -= fifo->rsvd_drv_pg_num;
+ fifo->rsvd_drv_addr = cur_pg_addr;
+
+ if (fifo->rsvd_boundary != fifo->rsvd_drv_addr) {
+ rtw_err(rtwdev, "wrong rsvd driver address\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int priority_queue_cfg(struct rtw_dev *rtwdev)
+{
+ struct rtw_fifo_conf *fifo = &rtwdev->fifo;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_page_table *pg_tbl = NULL;
+ u16 pubq_num;
+ int ret;
+
+ ret = set_trx_fifo_info(rtwdev);
+ if (ret)
+ return ret;
+
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_PCIE:
+ pg_tbl = &chip->page_table[1];
+ break;
+ case RTW_HCI_TYPE_USB:
+ if (rtwdev->hci.bulkout_num == 2)
+ pg_tbl = &chip->page_table[2];
+ else if (rtwdev->hci.bulkout_num == 3)
+ pg_tbl = &chip->page_table[3];
+ else if (rtwdev->hci.bulkout_num == 4)
+ pg_tbl = &chip->page_table[4];
+ else
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ pubq_num = fifo->acq_pg_num - pg_tbl->hq_num - pg_tbl->lq_num -
+ pg_tbl->nq_num - pg_tbl->exq_num - pg_tbl->gapq_num;
+ rtw_write16(rtwdev, REG_FIFOPAGE_INFO_1, pg_tbl->hq_num);
+ rtw_write16(rtwdev, REG_FIFOPAGE_INFO_2, pg_tbl->lq_num);
+ rtw_write16(rtwdev, REG_FIFOPAGE_INFO_3, pg_tbl->nq_num);
+ rtw_write16(rtwdev, REG_FIFOPAGE_INFO_4, pg_tbl->exq_num);
+ rtw_write16(rtwdev, REG_FIFOPAGE_INFO_5, pubq_num);
+ rtw_write32_set(rtwdev, REG_RQPN_CTRL_2, BIT_LD_RQPN);
+
+ rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2, fifo->rsvd_boundary);
+ rtw_write8_set(rtwdev, REG_FWHW_TXQ_CTRL + 2, BIT_EN_WR_FREE_TAIL >> 16);
+
+ rtw_write16(rtwdev, REG_BCNQ_BDNY_V1, fifo->rsvd_boundary);
+ rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary);
+ rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary);
+ rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1);
+ rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1);
+
+ if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0))
+ return -EBUSY;
+
+ rtw_write8(rtwdev, REG_CR + 3, 0);
+
+ return 0;
+}
+
+static int init_h2c(struct rtw_dev *rtwdev)
+{
+ struct rtw_fifo_conf *fifo = &rtwdev->fifo;
+ u8 value8;
+ u32 value32;
+ u32 h2cq_addr;
+ u32 h2cq_size;
+ u32 h2cq_free;
+ u32 wp, rp;
+
+ h2cq_addr = fifo->rsvd_h2cq_addr << TX_PAGE_SIZE_SHIFT;
+ h2cq_size = RSVD_PG_H2CQ_NUM << TX_PAGE_SIZE_SHIFT;
+
+ value32 = rtw_read32(rtwdev, REG_H2C_HEAD);
+ value32 = (value32 & 0xFFFC0000) | h2cq_addr;
+ rtw_write32(rtwdev, REG_H2C_HEAD, value32);
+
+ value32 = rtw_read32(rtwdev, REG_H2C_READ_ADDR);
+ value32 = (value32 & 0xFFFC0000) | h2cq_addr;
+ rtw_write32(rtwdev, REG_H2C_READ_ADDR, value32);
+
+ value32 = rtw_read32(rtwdev, REG_H2C_TAIL);
+ value32 &= 0xFFFC0000;
+ value32 |= (h2cq_addr + h2cq_size);
+ rtw_write32(rtwdev, REG_H2C_TAIL, value32);
+
+ value8 = rtw_read8(rtwdev, REG_H2C_INFO);
+ value8 = (u8)((value8 & 0xFC) | 0x01);
+ rtw_write8(rtwdev, REG_H2C_INFO, value8);
+
+ value8 = rtw_read8(rtwdev, REG_H2C_INFO);
+ value8 = (u8)((value8 & 0xFB) | 0x04);
+ rtw_write8(rtwdev, REG_H2C_INFO, value8);
+
+ value8 = rtw_read8(rtwdev, REG_TXDMA_OFFSET_CHK + 1);
+ value8 = (u8)((value8 & 0x7f) | 0x80);
+ rtw_write8(rtwdev, REG_TXDMA_OFFSET_CHK + 1, value8);
+
+ wp = rtw_read32(rtwdev, REG_H2C_PKT_WRITEADDR) & 0x3FFFF;
+ rp = rtw_read32(rtwdev, REG_H2C_PKT_READADDR) & 0x3FFFF;
+ h2cq_free = wp >= rp ? h2cq_size - (wp - rp) : rp - wp;
+
+ if (h2cq_size != h2cq_free) {
+ rtw_err(rtwdev, "H2C queue mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rtw_init_trx_cfg(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ ret = txdma_queue_mapping(rtwdev);
+ if (ret)
+ return ret;
+
+ ret = priority_queue_cfg(rtwdev);
+ if (ret)
+ return ret;
+
+ ret = init_h2c(rtwdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtw_drv_info_cfg(struct rtw_dev *rtwdev)
+{
+ u8 value8;
+
+ rtw_write8(rtwdev, REG_RX_DRVINFO_SZ, PHY_STATUS_SIZE);
+ value8 = rtw_read8(rtwdev, REG_TRXFF_BNDY + 1);
+ value8 &= 0xF0;
+ /* For rxdesc len = 0 issue */
+ value8 |= 0xF;
+ rtw_write8(rtwdev, REG_TRXFF_BNDY + 1, value8);
+ rtw_write32_set(rtwdev, REG_RCR, BIT_APP_PHYSTS);
+ rtw_write32_clr(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, BIT(8) | BIT(9));
+
+ return 0;
+}
+
+int rtw_mac_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ int ret;
+
+ ret = rtw_init_trx_cfg(rtwdev);
+ if (ret)
+ return ret;
+
+ ret = chip->ops->mac_init(rtwdev);
+ if (ret)
+ return ret;
+
+ ret = rtw_drv_info_cfg(rtwdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/net/wireless/realtek/rtw88/mac.h b/drivers/net/wireless/realtek/rtw88/mac.h
new file mode 100644
index 000000000000..efe6f731f240
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/mac.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_MAC_H__
+#define __RTW_MAC_H__
+
+#define RTW_HW_PORT_NUM 5
+#define cut_version_to_mask(cut) (0x1 << ((cut) + 1))
+#define SDIO_LOCAL_OFFSET 0x10250000
+#define DDMA_POLLING_COUNT 1000
+#define C2H_PKT_BUF 256
+#define PHY_STATUS_SIZE 4
+#define ILLEGAL_KEY_GROUP 0xFAAAAA00
+
+/* HW memory address */
+#define OCPBASE_TXBUF_88XX 0x18780000
+#define OCPBASE_DMEM_88XX 0x00200000
+#define OCPBASE_EMEM_88XX 0x00100000
+
+#define RSVD_PG_DRV_NUM 16
+#define RSVD_PG_H2C_EXTRAINFO_NUM 24
+#define RSVD_PG_H2C_STATICINFO_NUM 8
+#define RSVD_PG_H2CQ_NUM 8
+#define RSVD_PG_CPU_INSTRUCTION_NUM 0
+#define RSVD_PG_FW_TXBUF_NUM 4
+
+void rtw_set_channel_mac(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ u8 primary_ch_idx);
+int rtw_mac_power_on(struct rtw_dev *rtwdev);
+void rtw_mac_power_off(struct rtw_dev *rtwdev);
+int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw);
+int rtw_mac_init(struct rtw_dev *rtwdev);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
new file mode 100644
index 000000000000..abded63f138d
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -0,0 +1,481 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "sec.h"
+#include "tx.h"
+#include "fw.h"
+#include "mac.h"
+#include "ps.h"
+#include "reg.h"
+#include "debug.h"
+
+static void rtw_ops_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_tx_pkt_info pkt_info = {0};
+
+ if (!rtw_flag_check(rtwdev, RTW_FLAG_RUNNING))
+ goto out;
+
+ rtw_tx_pkt_info_update(rtwdev, &pkt_info, control, skb);
+ if (rtw_hci_tx(rtwdev, &pkt_info, skb))
+ goto out;
+
+ return;
+
+out:
+ ieee80211_free_txskb(hw, skb);
+}
+
+static int rtw_ops_start(struct ieee80211_hw *hw)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ int ret;
+
+ mutex_lock(&rtwdev->mutex);
+ ret = rtw_core_start(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret;
+}
+
+static void rtw_ops_stop(struct ieee80211_hw *hw)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw_core_stop(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ int ret = 0;
+
+ mutex_lock(&rtwdev->mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ if (hw->conf.flags & IEEE80211_CONF_IDLE) {
+ rtw_enter_ips(rtwdev);
+ } else {
+ ret = rtw_leave_ips(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to leave idle state\n");
+ goto out;
+ }
+ }
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
+ rtw_set_channel(rtwdev);
+
+out:
+ mutex_unlock(&rtwdev->mutex);
+ return ret;
+}
+
+static const struct rtw_vif_port rtw_vif_port[] = {
+ [0] = {
+ .mac_addr = {.addr = 0x0610},
+ .bssid = {.addr = 0x0618},
+ .net_type = {.addr = 0x0100, .mask = 0x30000},
+ .aid = {.addr = 0x06a8, .mask = 0x7ff},
+ },
+ [1] = {
+ .mac_addr = {.addr = 0x0700},
+ .bssid = {.addr = 0x0708},
+ .net_type = {.addr = 0x0100, .mask = 0xc0000},
+ .aid = {.addr = 0x0710, .mask = 0x7ff},
+ },
+ [2] = {
+ .mac_addr = {.addr = 0x1620},
+ .bssid = {.addr = 0x1628},
+ .net_type = {.addr = 0x1100, .mask = 0x3},
+ .aid = {.addr = 0x1600, .mask = 0x7ff},
+ },
+ [3] = {
+ .mac_addr = {.addr = 0x1630},
+ .bssid = {.addr = 0x1638},
+ .net_type = {.addr = 0x1100, .mask = 0xc},
+ .aid = {.addr = 0x1604, .mask = 0x7ff},
+ },
+ [4] = {
+ .mac_addr = {.addr = 0x1640},
+ .bssid = {.addr = 0x1648},
+ .net_type = {.addr = 0x1100, .mask = 0x30},
+ .aid = {.addr = 0x1608, .mask = 0x7ff},
+ },
+};
+
+static int rtw_ops_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ enum rtw_net_type net_type;
+ u32 config = 0;
+ u8 port = 0;
+
+ rtwvif->port = port;
+ rtwvif->vif = vif;
+ rtwvif->stats.tx_unicast = 0;
+ rtwvif->stats.rx_unicast = 0;
+ rtwvif->stats.tx_cnt = 0;
+ rtwvif->stats.rx_cnt = 0;
+ rtwvif->in_lps = false;
+ rtwvif->conf = &rtw_vif_port[port];
+
+ mutex_lock(&rtwdev->mutex);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
+ net_type = RTW_NET_AP_MODE;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ net_type = RTW_NET_AD_HOC;
+ break;
+ case NL80211_IFTYPE_STATION:
+ default:
+ net_type = RTW_NET_NO_LINK;
+ break;
+ }
+
+ ether_addr_copy(rtwvif->mac_addr, vif->addr);
+ config |= PORT_SET_MAC_ADDR;
+ rtwvif->net_type = net_type;
+ config |= PORT_SET_NET_TYPE;
+ rtw_vif_port_config(rtwdev, rtwvif, config);
+
+ mutex_unlock(&rtwdev->mutex);
+
+ rtw_info(rtwdev, "start vif %pM on port %d\n", vif->addr, rtwvif->port);
+ return 0;
+}
+
+static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ u32 config = 0;
+
+ rtw_info(rtwdev, "stop vif %pM on port %d\n", vif->addr, rtwvif->port);
+
+ mutex_lock(&rtwdev->mutex);
+
+ eth_zero_addr(rtwvif->mac_addr);
+ config |= PORT_SET_MAC_ADDR;
+ rtwvif->net_type = RTW_NET_NO_LINK;
+ config |= PORT_SET_NET_TYPE;
+ rtw_vif_port_config(rtwdev, rtwvif, config);
+
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static void rtw_ops_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *new_flags,
+ u64 multicast)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ *new_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_FCSFAIL |
+ FIF_BCN_PRBRESP_PROMISC;
+
+ mutex_lock(&rtwdev->mutex);
+
+ if (changed_flags & FIF_ALLMULTI) {
+ if (*new_flags & FIF_ALLMULTI)
+ rtwdev->hal.rcr |= BIT_AM | BIT_AB;
+ else
+ rtwdev->hal.rcr &= ~(BIT_AM | BIT_AB);
+ }
+ if (changed_flags & FIF_FCSFAIL) {
+ if (*new_flags & FIF_FCSFAIL)
+ rtwdev->hal.rcr |= BIT_ACRC32;
+ else
+ rtwdev->hal.rcr &= ~(BIT_ACRC32);
+ }
+ if (changed_flags & FIF_OTHER_BSS) {
+ if (*new_flags & FIF_OTHER_BSS)
+ rtwdev->hal.rcr |= BIT_AAP;
+ else
+ rtwdev->hal.rcr &= ~(BIT_AAP);
+ }
+ if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
+ if (*new_flags & FIF_BCN_PRBRESP_PROMISC)
+ rtwdev->hal.rcr &= ~(BIT_CBSSID_BCN | BIT_CBSSID_DATA);
+ else
+ rtwdev->hal.rcr |= BIT_CBSSID_BCN;
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_RX,
+ "config rx filter, changed=0x%08x, new=0x%08x, rcr=0x%08x\n",
+ changed_flags, *new_flags, rtwdev->hal.rcr);
+
+ rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr);
+
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf,
+ u32 changed)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ u32 config = 0;
+
+ mutex_lock(&rtwdev->mutex);
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ struct rtw_chip_info *chip = rtwdev->chip;
+ enum rtw_net_type net_type;
+
+ if (conf->assoc) {
+ net_type = RTW_NET_MGD_LINKED;
+ chip->ops->do_iqk(rtwdev);
+
+ rtwvif->aid = conf->aid;
+ rtw_add_rsvd_page(rtwdev, RSVD_PS_POLL, true);
+ rtw_add_rsvd_page(rtwdev, RSVD_QOS_NULL, true);
+ rtw_add_rsvd_page(rtwdev, RSVD_NULL, true);
+ rtw_fw_download_rsvd_page(rtwdev, vif);
+ rtw_send_rsvd_page_h2c(rtwdev);
+ } else {
+ net_type = RTW_NET_NO_LINK;
+ rtwvif->aid = 0;
+ rtw_reset_rsvd_page(rtwdev);
+ }
+
+ rtwvif->net_type = net_type;
+ config |= PORT_SET_NET_TYPE;
+ config |= PORT_SET_AID;
+ }
+
+ if (changed & BSS_CHANGED_BSSID) {
+ ether_addr_copy(rtwvif->bssid, conf->bssid);
+ config |= PORT_SET_BSSID;
+ }
+
+ if (changed & BSS_CHANGED_BEACON)
+ rtw_fw_download_rsvd_page(rtwdev, vif);
+
+ rtw_vif_port_config(rtwdev, rtwvif, config);
+
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
+{
+ unsigned long mac_id;
+
+ mac_id = find_first_zero_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
+ if (mac_id < RTW_MAX_MAC_ID_NUM)
+ set_bit(mac_id, rtwdev->mac_id_map);
+
+ return mac_id;
+}
+
+static void rtw_release_macid(struct rtw_dev *rtwdev, u8 mac_id)
+{
+ clear_bit(mac_id, rtwdev->mac_id_map);
+}
+
+static int rtw_ops_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&rtwdev->mutex);
+
+ si->mac_id = rtw_acquire_macid(rtwdev);
+ if (si->mac_id >= RTW_MAX_MAC_ID_NUM) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ si->sta = sta;
+ si->vif = vif;
+ si->init_ra_lv = 1;
+ ewma_rssi_init(&si->avg_rssi);
+
+ rtw_update_sta_info(rtwdev, si);
+ rtw_fw_media_status_report(rtwdev, si->mac_id, true);
+
+ rtwdev->sta_cnt++;
+
+ rtw_info(rtwdev, "sta %pM joined with macid %d\n",
+ sta->addr, si->mac_id);
+
+out:
+ mutex_unlock(&rtwdev->mutex);
+ return ret;
+}
+
+static int rtw_ops_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+
+ mutex_lock(&rtwdev->mutex);
+
+ rtw_release_macid(rtwdev, si->mac_id);
+ rtw_fw_media_status_report(rtwdev, si->mac_id, false);
+
+ rtwdev->sta_cnt--;
+
+ rtw_info(rtwdev, "sta %pM with macid %d left\n",
+ sta->addr, si->mac_id);
+
+ mutex_unlock(&rtwdev->mutex);
+ return 0;
+}
+
+static int rtw_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_sec_desc *sec = &rtwdev->sec;
+ u8 hw_key_type;
+ u8 hw_key_idx;
+ int ret = 0;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ hw_key_type = RTW_CAM_WEP40;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ hw_key_type = RTW_CAM_WEP104;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ hw_key_type = RTW_CAM_TKIP;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ hw_key_type = RTW_CAM_AES;
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ /* suppress error messages */
+ return -EOPNOTSUPP;
+ default:
+ return -ENOTSUPP;
+ }
+
+ mutex_lock(&rtwdev->mutex);
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ hw_key_idx = rtw_sec_get_free_cam(sec);
+ } else {
+ /* multiple interfaces? */
+ hw_key_idx = key->keyidx;
+ }
+
+ if (hw_key_idx > sec->total_cam_num) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ switch (cmd) {
+ case SET_KEY:
+ /* need sw generated IV */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ key->hw_key_idx = hw_key_idx;
+ rtw_sec_write_cam(rtwdev, sec, sta, key,
+ hw_key_type, hw_key_idx);
+ break;
+ case DISABLE_KEY:
+ rtw_sec_clear_cam(rtwdev, sec, key->hw_key_idx);
+ break;
+ }
+
+out:
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret;
+}
+
+static int rtw_ops_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ieee80211_sta *sta = params->sta;
+ u16 tid = params->tid;
+
+ switch (params->action) {
+ case IEEE80211_AMPDU_TX_START:
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ case IEEE80211_AMPDU_RX_START:
+ case IEEE80211_AMPDU_RX_STOP:
+ break;
+ default:
+ WARN_ON(1);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static void rtw_ops_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+
+ rtw_leave_lps(rtwdev, rtwvif);
+
+ rtw_flag_set(rtwdev, RTW_FLAG_DIG_DISABLE);
+ rtw_flag_set(rtwdev, RTW_FLAG_SCANNING);
+}
+
+static void rtw_ops_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ rtw_flag_clear(rtwdev, RTW_FLAG_SCANNING);
+ rtw_flag_clear(rtwdev, RTW_FLAG_DIG_DISABLE);
+}
+
+const struct ieee80211_ops rtw_ops = {
+ .tx = rtw_ops_tx,
+ .start = rtw_ops_start,
+ .stop = rtw_ops_stop,
+ .config = rtw_ops_config,
+ .add_interface = rtw_ops_add_interface,
+ .remove_interface = rtw_ops_remove_interface,
+ .configure_filter = rtw_ops_configure_filter,
+ .bss_info_changed = rtw_ops_bss_info_changed,
+ .sta_add = rtw_ops_sta_add,
+ .sta_remove = rtw_ops_sta_remove,
+ .set_key = rtw_ops_set_key,
+ .ampdu_action = rtw_ops_ampdu_action,
+ .sw_scan_start = rtw_ops_sw_scan_start,
+ .sw_scan_complete = rtw_ops_sw_scan_complete,
+};
+EXPORT_SYMBOL(rtw_ops);
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
new file mode 100644
index 000000000000..f447361f7573
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -0,0 +1,1211 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "regd.h"
+#include "fw.h"
+#include "ps.h"
+#include "sec.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "efuse.h"
+#include "debug.h"
+
+static bool rtw_fw_support_lps;
+unsigned int rtw_debug_mask;
+EXPORT_SYMBOL(rtw_debug_mask);
+
+module_param_named(support_lps, rtw_fw_support_lps, bool, 0644);
+module_param_named(debug_mask, rtw_debug_mask, uint, 0644);
+
+MODULE_PARM_DESC(support_lps, "Set Y to enable LPS support");
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+
+static struct ieee80211_channel rtw_channeltable_2g[] = {
+ {.center_freq = 2412, .hw_value = 1,},
+ {.center_freq = 2417, .hw_value = 2,},
+ {.center_freq = 2422, .hw_value = 3,},
+ {.center_freq = 2427, .hw_value = 4,},
+ {.center_freq = 2432, .hw_value = 5,},
+ {.center_freq = 2437, .hw_value = 6,},
+ {.center_freq = 2442, .hw_value = 7,},
+ {.center_freq = 2447, .hw_value = 8,},
+ {.center_freq = 2452, .hw_value = 9,},
+ {.center_freq = 2457, .hw_value = 10,},
+ {.center_freq = 2462, .hw_value = 11,},
+ {.center_freq = 2467, .hw_value = 12,},
+ {.center_freq = 2472, .hw_value = 13,},
+ {.center_freq = 2484, .hw_value = 14,},
+};
+
+static struct ieee80211_channel rtw_channeltable_5g[] = {
+ {.center_freq = 5180, .hw_value = 36,},
+ {.center_freq = 5200, .hw_value = 40,},
+ {.center_freq = 5220, .hw_value = 44,},
+ {.center_freq = 5240, .hw_value = 48,},
+ {.center_freq = 5260, .hw_value = 52,},
+ {.center_freq = 5280, .hw_value = 56,},
+ {.center_freq = 5300, .hw_value = 60,},
+ {.center_freq = 5320, .hw_value = 64,},
+ {.center_freq = 5500, .hw_value = 100,},
+ {.center_freq = 5520, .hw_value = 104,},
+ {.center_freq = 5540, .hw_value = 108,},
+ {.center_freq = 5560, .hw_value = 112,},
+ {.center_freq = 5580, .hw_value = 116,},
+ {.center_freq = 5600, .hw_value = 120,},
+ {.center_freq = 5620, .hw_value = 124,},
+ {.center_freq = 5640, .hw_value = 128,},
+ {.center_freq = 5660, .hw_value = 132,},
+ {.center_freq = 5680, .hw_value = 136,},
+ {.center_freq = 5700, .hw_value = 140,},
+ {.center_freq = 5745, .hw_value = 149,},
+ {.center_freq = 5765, .hw_value = 153,},
+ {.center_freq = 5785, .hw_value = 157,},
+ {.center_freq = 5805, .hw_value = 161,},
+ {.center_freq = 5825, .hw_value = 165,
+ .flags = IEEE80211_CHAN_NO_HT40MINUS},
+};
+
+static struct ieee80211_rate rtw_ratetable[] = {
+ {.bitrate = 10, .hw_value = 0x00,},
+ {.bitrate = 20, .hw_value = 0x01,},
+ {.bitrate = 55, .hw_value = 0x02,},
+ {.bitrate = 110, .hw_value = 0x03,},
+ {.bitrate = 60, .hw_value = 0x04,},
+ {.bitrate = 90, .hw_value = 0x05,},
+ {.bitrate = 120, .hw_value = 0x06,},
+ {.bitrate = 180, .hw_value = 0x07,},
+ {.bitrate = 240, .hw_value = 0x08,},
+ {.bitrate = 360, .hw_value = 0x09,},
+ {.bitrate = 480, .hw_value = 0x0a,},
+ {.bitrate = 540, .hw_value = 0x0b,},
+};
+
+static struct ieee80211_supported_band rtw_band_2ghz = {
+ .band = NL80211_BAND_2GHZ,
+
+ .channels = rtw_channeltable_2g,
+ .n_channels = ARRAY_SIZE(rtw_channeltable_2g),
+
+ .bitrates = rtw_ratetable,
+ .n_bitrates = ARRAY_SIZE(rtw_ratetable),
+
+ .ht_cap = {0},
+ .vht_cap = {0},
+};
+
+static struct ieee80211_supported_band rtw_band_5ghz = {
+ .band = NL80211_BAND_5GHZ,
+
+ .channels = rtw_channeltable_5g,
+ .n_channels = ARRAY_SIZE(rtw_channeltable_5g),
+
+ /* 5G has no CCK rates */
+ .bitrates = rtw_ratetable + 4,
+ .n_bitrates = ARRAY_SIZE(rtw_ratetable) - 4,
+
+ .ht_cap = {0},
+ .vht_cap = {0},
+};
+
+struct rtw_watch_dog_iter_data {
+ struct rtw_vif *rtwvif;
+ bool active;
+ u8 assoc_cnt;
+};
+
+static void rtw_vif_watch_dog_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_watch_dog_iter_data *iter_data = data;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ if (vif->bss_conf.assoc) {
+ iter_data->assoc_cnt++;
+ iter_data->rtwvif = rtwvif;
+ }
+ if (rtwvif->stats.tx_cnt > RTW_LPS_THRESHOLD ||
+ rtwvif->stats.rx_cnt > RTW_LPS_THRESHOLD)
+ iter_data->active = true;
+ } else {
+ /* only STATION mode can enter lps */
+ iter_data->active = true;
+ }
+
+ rtwvif->stats.tx_unicast = 0;
+ rtwvif->stats.rx_unicast = 0;
+ rtwvif->stats.tx_cnt = 0;
+ rtwvif->stats.rx_cnt = 0;
+}
+
+/* process TX/RX statistics periodically for hardware,
+ * the information helps hardware to enhance performance
+ */
+static void rtw_watch_dog_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
+ watch_dog_work.work);
+ struct rtw_watch_dog_iter_data data = {};
+
+ if (!rtw_flag_check(rtwdev, RTW_FLAG_RUNNING))
+ return;
+
+ ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work,
+ RTW_WATCH_DOG_DELAY_TIME);
+
+ /* reset tx/rx statictics */
+ rtwdev->stats.tx_unicast = 0;
+ rtwdev->stats.rx_unicast = 0;
+ rtwdev->stats.tx_cnt = 0;
+ rtwdev->stats.rx_cnt = 0;
+
+ rtw_iterate_vifs(rtwdev, rtw_vif_watch_dog_iter, &data);
+
+ /* fw supports only one station associated to enter lps, if there are
+ * more than two stations associated to the AP, then we can not enter
+ * lps, because fw does not handle the overlapped beacon interval
+ */
+ if (rtw_fw_support_lps &&
+ data.rtwvif && !data.active && data.assoc_cnt == 1)
+ rtw_enter_lps(rtwdev, data.rtwvif);
+
+ if (rtw_flag_check(rtwdev, RTW_FLAG_SCANNING))
+ return;
+
+ rtw_phy_dynamic_mechanism(rtwdev);
+
+ rtwdev->watch_dog_cnt++;
+}
+
+static void rtw_c2h_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev, c2h_work);
+ struct sk_buff *skb, *tmp;
+
+ skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) {
+ skb_unlink(skb, &rtwdev->c2h_queue);
+ rtw_fw_c2h_cmd_handle(rtwdev, skb);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
+ struct rtw_channel_params *chan_params)
+{
+ struct ieee80211_channel *channel = chandef->chan;
+ enum nl80211_chan_width width = chandef->width;
+ u32 primary_freq, center_freq;
+ u8 center_chan;
+ u8 bandwidth = RTW_CHANNEL_WIDTH_20;
+ u8 primary_chan_idx = 0;
+
+ center_chan = channel->hw_value;
+ primary_freq = channel->center_freq;
+ center_freq = chandef->center_freq1;
+
+ switch (width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ bandwidth = RTW_CHANNEL_WIDTH_20;
+ primary_chan_idx = 0;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ bandwidth = RTW_CHANNEL_WIDTH_40;
+ if (primary_freq > center_freq) {
+ primary_chan_idx = 1;
+ center_chan -= 2;
+ } else {
+ primary_chan_idx = 2;
+ center_chan += 2;
+ }
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ bandwidth = RTW_CHANNEL_WIDTH_80;
+ if (primary_freq > center_freq) {
+ if (primary_freq - center_freq == 10) {
+ primary_chan_idx = 1;
+ center_chan -= 2;
+ } else {
+ primary_chan_idx = 3;
+ center_chan -= 6;
+ }
+ } else {
+ if (center_freq - primary_freq == 10) {
+ primary_chan_idx = 2;
+ center_chan += 2;
+ } else {
+ primary_chan_idx = 4;
+ center_chan += 6;
+ }
+ }
+ break;
+ default:
+ center_chan = 0;
+ break;
+ }
+
+ chan_params->center_chan = center_chan;
+ chan_params->bandwidth = bandwidth;
+ chan_params->primary_chan_idx = primary_chan_idx;
+}
+
+void rtw_set_channel(struct rtw_dev *rtwdev)
+{
+ struct ieee80211_hw *hw = rtwdev->hw;
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_channel_params ch_param;
+ u8 center_chan, bandwidth, primary_chan_idx;
+
+ rtw_get_channel_params(&hw->conf.chandef, &ch_param);
+ if (WARN(ch_param.center_chan == 0, "Invalid channel\n"))
+ return;
+
+ center_chan = ch_param.center_chan;
+ bandwidth = ch_param.bandwidth;
+ primary_chan_idx = ch_param.primary_chan_idx;
+
+ hal->current_band_width = bandwidth;
+ hal->current_channel = center_chan;
+ hal->current_band_type = center_chan > 14 ? RTW_BAND_5G : RTW_BAND_2G;
+ chip->ops->set_channel(rtwdev, center_chan, bandwidth, primary_chan_idx);
+
+ rtw_phy_set_tx_power_level(rtwdev, center_chan);
+}
+
+static void rtw_vif_write_addr(struct rtw_dev *rtwdev, u32 start, u8 *addr)
+{
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ rtw_write8(rtwdev, start + i, addr[i]);
+}
+
+void rtw_vif_port_config(struct rtw_dev *rtwdev,
+ struct rtw_vif *rtwvif,
+ u32 config)
+{
+ u32 addr, mask;
+
+ if (config & PORT_SET_MAC_ADDR) {
+ addr = rtwvif->conf->mac_addr.addr;
+ rtw_vif_write_addr(rtwdev, addr, rtwvif->mac_addr);
+ }
+ if (config & PORT_SET_BSSID) {
+ addr = rtwvif->conf->bssid.addr;
+ rtw_vif_write_addr(rtwdev, addr, rtwvif->bssid);
+ }
+ if (config & PORT_SET_NET_TYPE) {
+ addr = rtwvif->conf->net_type.addr;
+ mask = rtwvif->conf->net_type.mask;
+ rtw_write32_mask(rtwdev, addr, mask, rtwvif->net_type);
+ }
+ if (config & PORT_SET_AID) {
+ addr = rtwvif->conf->aid.addr;
+ mask = rtwvif->conf->aid.mask;
+ rtw_write32_mask(rtwdev, addr, mask, rtwvif->aid);
+ }
+}
+
+static u8 hw_bw_cap_to_bitamp(u8 bw_cap)
+{
+ u8 bw = 0;
+
+ switch (bw_cap) {
+ case EFUSE_HW_CAP_IGNORE:
+ case EFUSE_HW_CAP_SUPP_BW80:
+ bw |= BIT(RTW_CHANNEL_WIDTH_80);
+ /* fall through */
+ case EFUSE_HW_CAP_SUPP_BW40:
+ bw |= BIT(RTW_CHANNEL_WIDTH_40);
+ /* fall through */
+ default:
+ bw |= BIT(RTW_CHANNEL_WIDTH_20);
+ break;
+ }
+
+ return bw;
+}
+
+static void rtw_hw_config_rf_ant_num(struct rtw_dev *rtwdev, u8 hw_ant_num)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+
+ if (hw_ant_num == EFUSE_HW_CAP_IGNORE ||
+ hw_ant_num >= hal->rf_path_num)
+ return;
+
+ switch (hw_ant_num) {
+ case 1:
+ hal->rf_type = RF_1T1R;
+ hal->rf_path_num = 1;
+ hal->antenna_tx = BB_PATH_A;
+ hal->antenna_rx = BB_PATH_A;
+ break;
+ default:
+ WARN(1, "invalid hw configuration from efuse\n");
+ break;
+ }
+}
+
+static u64 get_vht_ra_mask(struct ieee80211_sta *sta)
+{
+ u64 ra_mask = 0;
+ u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map);
+ u8 vht_mcs_cap;
+ int i, nss;
+
+ /* 4SS, every two bits for MCS7/8/9 */
+ for (i = 0, nss = 12; i < 4; i++, mcs_map >>= 2, nss += 10) {
+ vht_mcs_cap = mcs_map & 0x3;
+ switch (vht_mcs_cap) {
+ case 2: /* MCS9 */
+ ra_mask |= 0x3ffULL << nss;
+ break;
+ case 1: /* MCS8 */
+ ra_mask |= 0x1ffULL << nss;
+ break;
+ case 0: /* MCS7 */
+ ra_mask |= 0x0ffULL << nss;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return ra_mask;
+}
+
+static u8 get_rate_id(u8 wireless_set, enum rtw_bandwidth bw_mode, u8 tx_num)
+{
+ u8 rate_id = 0;
+
+ switch (wireless_set) {
+ case WIRELESS_CCK:
+ rate_id = RTW_RATEID_B_20M;
+ break;
+ case WIRELESS_OFDM:
+ rate_id = RTW_RATEID_G;
+ break;
+ case WIRELESS_CCK | WIRELESS_OFDM:
+ rate_id = RTW_RATEID_BG;
+ break;
+ case WIRELESS_OFDM | WIRELESS_HT:
+ if (tx_num == 1)
+ rate_id = RTW_RATEID_GN_N1SS;
+ else if (tx_num == 2)
+ rate_id = RTW_RATEID_GN_N2SS;
+ else if (tx_num == 3)
+ rate_id = RTW_RATEID_ARFR5_N_3SS;
+ break;
+ case WIRELESS_CCK | WIRELESS_OFDM | WIRELESS_HT:
+ if (bw_mode == RTW_CHANNEL_WIDTH_40) {
+ if (tx_num == 1)
+ rate_id = RTW_RATEID_BGN_40M_1SS;
+ else if (tx_num == 2)
+ rate_id = RTW_RATEID_BGN_40M_2SS;
+ else if (tx_num == 3)
+ rate_id = RTW_RATEID_ARFR5_N_3SS;
+ else if (tx_num == 4)
+ rate_id = RTW_RATEID_ARFR7_N_4SS;
+ } else {
+ if (tx_num == 1)
+ rate_id = RTW_RATEID_BGN_20M_1SS;
+ else if (tx_num == 2)
+ rate_id = RTW_RATEID_BGN_20M_2SS;
+ else if (tx_num == 3)
+ rate_id = RTW_RATEID_ARFR5_N_3SS;
+ else if (tx_num == 4)
+ rate_id = RTW_RATEID_ARFR7_N_4SS;
+ }
+ break;
+ case WIRELESS_OFDM | WIRELESS_VHT:
+ if (tx_num == 1)
+ rate_id = RTW_RATEID_ARFR1_AC_1SS;
+ else if (tx_num == 2)
+ rate_id = RTW_RATEID_ARFR0_AC_2SS;
+ else if (tx_num == 3)
+ rate_id = RTW_RATEID_ARFR4_AC_3SS;
+ else if (tx_num == 4)
+ rate_id = RTW_RATEID_ARFR6_AC_4SS;
+ break;
+ case WIRELESS_CCK | WIRELESS_OFDM | WIRELESS_VHT:
+ if (bw_mode >= RTW_CHANNEL_WIDTH_80) {
+ if (tx_num == 1)
+ rate_id = RTW_RATEID_ARFR1_AC_1SS;
+ else if (tx_num == 2)
+ rate_id = RTW_RATEID_ARFR0_AC_2SS;
+ else if (tx_num == 3)
+ rate_id = RTW_RATEID_ARFR4_AC_3SS;
+ else if (tx_num == 4)
+ rate_id = RTW_RATEID_ARFR6_AC_4SS;
+ } else {
+ if (tx_num == 1)
+ rate_id = RTW_RATEID_ARFR2_AC_2G_1SS;
+ else if (tx_num == 2)
+ rate_id = RTW_RATEID_ARFR3_AC_2G_2SS;
+ else if (tx_num == 3)
+ rate_id = RTW_RATEID_ARFR4_AC_3SS;
+ else if (tx_num == 4)
+ rate_id = RTW_RATEID_ARFR6_AC_4SS;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return rate_id;
+}
+
+#define RA_MASK_CCK_RATES 0x0000f
+#define RA_MASK_OFDM_RATES 0x00ff0
+#define RA_MASK_HT_RATES_1SS (0xff000ULL << 0)
+#define RA_MASK_HT_RATES_2SS (0xff000ULL << 8)
+#define RA_MASK_HT_RATES_3SS (0xff000ULL << 16)
+#define RA_MASK_HT_RATES (RA_MASK_HT_RATES_1SS | \
+ RA_MASK_HT_RATES_2SS | \
+ RA_MASK_HT_RATES_3SS)
+#define RA_MASK_VHT_RATES_1SS (0x3ff000ULL << 0)
+#define RA_MASK_VHT_RATES_2SS (0x3ff000ULL << 10)
+#define RA_MASK_VHT_RATES_3SS (0x3ff000ULL << 20)
+#define RA_MASK_VHT_RATES (RA_MASK_VHT_RATES_1SS | \
+ RA_MASK_VHT_RATES_2SS | \
+ RA_MASK_VHT_RATES_3SS)
+#define RA_MASK_CCK_IN_HT 0x00005
+#define RA_MASK_CCK_IN_VHT 0x00005
+#define RA_MASK_OFDM_IN_VHT 0x00010
+#define RA_MASK_OFDM_IN_HT_2G 0x00010
+#define RA_MASK_OFDM_IN_HT_5G 0x00030
+
+void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si)
+{
+ struct ieee80211_sta *sta = si->sta;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 rssi_level;
+ u8 wireless_set;
+ u8 bw_mode;
+ u8 rate_id;
+ u8 rf_type = RF_1T1R;
+ u8 stbc_en = 0;
+ u8 ldpc_en = 0;
+ u8 tx_num = 1;
+ u64 ra_mask = 0;
+ bool is_vht_enable = false;
+ bool is_support_sgi = false;
+
+ if (sta->vht_cap.vht_supported) {
+ is_vht_enable = true;
+ ra_mask |= get_vht_ra_mask(sta);
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
+ stbc_en = VHT_STBC_EN;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
+ ldpc_en = VHT_LDPC_EN;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ is_support_sgi = true;
+ } else if (sta->ht_cap.ht_supported) {
+ ra_mask |= (sta->ht_cap.mcs.rx_mask[NL80211_BAND_5GHZ] << 20) |
+ (sta->ht_cap.mcs.rx_mask[NL80211_BAND_2GHZ] << 12);
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+ stbc_en = HT_STBC_EN;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
+ ldpc_en = HT_LDPC_EN;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20 ||
+ sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ is_support_sgi = true;
+ }
+
+ if (hal->current_band_type == RTW_BAND_5G) {
+ ra_mask |= (u64)sta->supp_rates[NL80211_BAND_5GHZ] << 4;
+ if (sta->vht_cap.vht_supported) {
+ ra_mask &= RA_MASK_VHT_RATES | RA_MASK_OFDM_IN_VHT;
+ wireless_set = WIRELESS_OFDM | WIRELESS_VHT;
+ } else if (sta->ht_cap.ht_supported) {
+ ra_mask &= RA_MASK_HT_RATES | RA_MASK_OFDM_IN_HT_5G;
+ wireless_set = WIRELESS_OFDM | WIRELESS_HT;
+ } else {
+ wireless_set = WIRELESS_OFDM;
+ }
+ } else if (hal->current_band_type == RTW_BAND_2G) {
+ ra_mask |= sta->supp_rates[NL80211_BAND_2GHZ];
+ if (sta->vht_cap.vht_supported) {
+ ra_mask &= RA_MASK_VHT_RATES | RA_MASK_CCK_IN_VHT |
+ RA_MASK_OFDM_IN_VHT;
+ wireless_set = WIRELESS_CCK | WIRELESS_OFDM |
+ WIRELESS_HT | WIRELESS_VHT;
+ } else if (sta->ht_cap.ht_supported) {
+ ra_mask &= RA_MASK_HT_RATES | RA_MASK_CCK_IN_HT |
+ RA_MASK_OFDM_IN_HT_2G;
+ wireless_set = WIRELESS_CCK | WIRELESS_OFDM |
+ WIRELESS_HT;
+ } else if (sta->supp_rates[0] <= 0xf) {
+ wireless_set = WIRELESS_CCK;
+ } else {
+ wireless_set = WIRELESS_CCK | WIRELESS_OFDM;
+ }
+ } else {
+ rtw_err(rtwdev, "Unknown band type\n");
+ wireless_set = 0;
+ }
+
+ if (efuse->hw_cap.nss == 1) {
+ ra_mask &= RA_MASK_VHT_RATES_1SS;
+ ra_mask &= RA_MASK_HT_RATES_1SS;
+ }
+
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_80:
+ bw_mode = RTW_CHANNEL_WIDTH_80;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ bw_mode = RTW_CHANNEL_WIDTH_40;
+ break;
+ default:
+ bw_mode = RTW_CHANNEL_WIDTH_20;
+ break;
+ }
+
+ if (sta->vht_cap.vht_supported && ra_mask & 0xffc00000) {
+ tx_num = 2;
+ rf_type = RF_2T2R;
+ } else if (sta->ht_cap.ht_supported && ra_mask & 0xfff00000) {
+ tx_num = 2;
+ rf_type = RF_2T2R;
+ }
+
+ rate_id = get_rate_id(wireless_set, bw_mode, tx_num);
+
+ if (wireless_set != WIRELESS_CCK) {
+ rssi_level = si->rssi_level;
+ if (rssi_level == 0)
+ ra_mask &= 0xffffffffffffffffULL;
+ else if (rssi_level == 1)
+ ra_mask &= 0xfffffffffffffff0ULL;
+ else if (rssi_level == 2)
+ ra_mask &= 0xffffffffffffefe0ULL;
+ else if (rssi_level == 3)
+ ra_mask &= 0xffffffffffffcfc0ULL;
+ else if (rssi_level == 4)
+ ra_mask &= 0xffffffffffff8f80ULL;
+ else if (rssi_level >= 5)
+ ra_mask &= 0xffffffffffff0f00ULL;
+ }
+
+ si->bw_mode = bw_mode;
+ si->stbc_en = stbc_en;
+ si->ldpc_en = ldpc_en;
+ si->rf_type = rf_type;
+ si->wireless_set = wireless_set;
+ si->sgi_enable = is_support_sgi;
+ si->vht_enable = is_vht_enable;
+ si->ra_mask = ra_mask;
+ si->rate_id = rate_id;
+
+ rtw_fw_send_ra_info(rtwdev, si);
+}
+
+static int rtw_power_on(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_fw_state *fw = &rtwdev->fw;
+ int ret;
+
+ ret = rtw_hci_setup(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to setup hci\n");
+ goto err;
+ }
+
+ /* power on MAC before firmware downloaded */
+ ret = rtw_mac_power_on(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to power on mac\n");
+ goto err;
+ }
+
+ wait_for_completion(&fw->completion);
+ if (!fw->firmware) {
+ ret = -EINVAL;
+ rtw_err(rtwdev, "failed to load firmware\n");
+ goto err;
+ }
+
+ ret = rtw_download_firmware(rtwdev, fw);
+ if (ret) {
+ rtw_err(rtwdev, "failed to download firmware\n");
+ goto err_off;
+ }
+
+ /* config mac after firmware downloaded */
+ ret = rtw_mac_init(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to configure mac\n");
+ goto err_off;
+ }
+
+ chip->ops->phy_set_param(rtwdev);
+
+ ret = rtw_hci_start(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to start hci\n");
+ goto err_off;
+ }
+
+ return 0;
+
+err_off:
+ rtw_mac_power_off(rtwdev);
+
+err:
+ return ret;
+}
+
+int rtw_core_start(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw_power_on(rtwdev);
+ if (ret)
+ return ret;
+
+ rtw_sec_enable_sec_engine(rtwdev);
+
+ /* rcr reset after powered on */
+ rtw_write32(rtwdev, REG_RCR, rtwdev->hal.rcr);
+
+ ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->watch_dog_work,
+ RTW_WATCH_DOG_DELAY_TIME);
+
+ rtw_flag_set(rtwdev, RTW_FLAG_RUNNING);
+
+ return 0;
+}
+
+static void rtw_power_off(struct rtw_dev *rtwdev)
+{
+ rtwdev->hci.ops->stop(rtwdev);
+ rtw_mac_power_off(rtwdev);
+}
+
+void rtw_core_stop(struct rtw_dev *rtwdev)
+{
+ rtw_flag_clear(rtwdev, RTW_FLAG_RUNNING);
+ rtw_flag_clear(rtwdev, RTW_FLAG_FW_RUNNING);
+
+ cancel_delayed_work_sync(&rtwdev->watch_dog_work);
+
+ rtw_power_off(rtwdev);
+}
+
+static void rtw_init_ht_cap(struct rtw_dev *rtwdev,
+ struct ieee80211_sta_ht_cap *ht_cap)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+
+ ht_cap->ht_supported = true;
+ ht_cap->cap = 0;
+ ht_cap->cap |= IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_MAX_AMSDU |
+ IEEE80211_HT_CAP_LDPC_CODING |
+ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+ if (efuse->hw_cap.bw & BIT(RTW_CHANNEL_WIDTH_40))
+ ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_DSSSCCK40 |
+ IEEE80211_HT_CAP_SGI_40;
+ ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
+ ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ if (efuse->hw_cap.nss > 1) {
+ ht_cap->mcs.rx_mask[0] = 0xFF;
+ ht_cap->mcs.rx_mask[1] = 0xFF;
+ ht_cap->mcs.rx_mask[4] = 0x01;
+ ht_cap->mcs.rx_highest = cpu_to_le16(300);
+ } else {
+ ht_cap->mcs.rx_mask[0] = 0xFF;
+ ht_cap->mcs.rx_mask[1] = 0x00;
+ ht_cap->mcs.rx_mask[4] = 0x01;
+ ht_cap->mcs.rx_highest = cpu_to_le16(150);
+ }
+}
+
+static void rtw_init_vht_cap(struct rtw_dev *rtwdev,
+ struct ieee80211_sta_vht_cap *vht_cap)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u16 mcs_map;
+ __le16 highest;
+
+ if (efuse->hw_cap.ptcl != EFUSE_HW_CAP_IGNORE &&
+ efuse->hw_cap.ptcl != EFUSE_HW_CAP_PTCL_VHT)
+ return;
+
+ vht_cap->vht_supported = true;
+ vht_cap->cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHT_CAP_RXLDPC |
+ IEEE80211_VHT_CAP_SHORT_GI_80 |
+ IEEE80211_VHT_CAP_TXSTBC |
+ IEEE80211_VHT_CAP_RXSTBC_1 |
+ IEEE80211_VHT_CAP_HTC_VHT |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
+ 0;
+ mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
+ if (efuse->hw_cap.nss > 1) {
+ highest = cpu_to_le16(780);
+ mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << 2;
+ } else {
+ highest = cpu_to_le16(390);
+ mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << 2;
+ }
+
+ vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
+ vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+ vht_cap->vht_mcs.rx_highest = highest;
+ vht_cap->vht_mcs.tx_highest = highest;
+}
+
+static void rtw_set_supported_band(struct ieee80211_hw *hw,
+ struct rtw_chip_info *chip)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+ struct ieee80211_supported_band *sband;
+
+ if (chip->band & RTW_BAND_2G) {
+ sband = kmemdup(&rtw_band_2ghz, sizeof(*sband), GFP_KERNEL);
+ if (!sband)
+ goto err_out;
+ if (chip->ht_supported)
+ rtw_init_ht_cap(rtwdev, &sband->ht_cap);
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
+ }
+
+ if (chip->band & RTW_BAND_5G) {
+ sband = kmemdup(&rtw_band_5ghz, sizeof(*sband), GFP_KERNEL);
+ if (!sband)
+ goto err_out;
+ if (chip->ht_supported)
+ rtw_init_ht_cap(rtwdev, &sband->ht_cap);
+ if (chip->vht_supported)
+ rtw_init_vht_cap(rtwdev, &sband->vht_cap);
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
+ }
+
+ return;
+
+err_out:
+ rtw_err(rtwdev, "failed to set supported band\n");
+ kfree(sband);
+}
+
+static void rtw_unset_supported_band(struct ieee80211_hw *hw,
+ struct rtw_chip_info *chip)
+{
+ kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]);
+ kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]);
+}
+
+static void rtw_load_firmware_cb(const struct firmware *firmware, void *context)
+{
+ struct rtw_dev *rtwdev = context;
+ struct rtw_fw_state *fw = &rtwdev->fw;
+
+ if (!firmware)
+ rtw_err(rtwdev, "failed to request firmware\n");
+
+ fw->firmware = firmware;
+ complete_all(&fw->completion);
+}
+
+static int rtw_load_firmware(struct rtw_dev *rtwdev, const char *fw_name)
+{
+ struct rtw_fw_state *fw = &rtwdev->fw;
+ int ret;
+
+ init_completion(&fw->completion);
+
+ ret = request_firmware_nowait(THIS_MODULE, true, fw_name, rtwdev->dev,
+ GFP_KERNEL, rtwdev, rtw_load_firmware_cb);
+ if (ret) {
+ rtw_err(rtwdev, "async firmware request failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtw_chip_parameter_setup(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u32 wl_bt_pwr_ctrl;
+ int ret = 0;
+
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_PCIE:
+ rtwdev->hci.rpwm_addr = 0x03d9;
+ break;
+ default:
+ rtw_err(rtwdev, "unsupported hci type\n");
+ return -EINVAL;
+ }
+
+ wl_bt_pwr_ctrl = rtw_read32(rtwdev, REG_WL_BT_PWR_CTRL);
+ if (wl_bt_pwr_ctrl & BIT_BT_FUNC_EN)
+ rtwdev->efuse.btcoex = true;
+ hal->chip_version = rtw_read32(rtwdev, REG_SYS_CFG1);
+ hal->fab_version = BIT_GET_VENDOR_ID(hal->chip_version) >> 2;
+ hal->cut_version = BIT_GET_CHIP_VER(hal->chip_version);
+ hal->mp_chip = (hal->chip_version & BIT_RTL_ID) ? 0 : 1;
+ if (hal->chip_version & BIT_RF_TYPE_ID) {
+ hal->rf_type = RF_2T2R;
+ hal->rf_path_num = 2;
+ hal->antenna_tx = BB_PATH_AB;
+ hal->antenna_rx = BB_PATH_AB;
+ } else {
+ hal->rf_type = RF_1T1R;
+ hal->rf_path_num = 1;
+ hal->antenna_tx = BB_PATH_A;
+ hal->antenna_rx = BB_PATH_A;
+ }
+
+ if (hal->fab_version == 2)
+ hal->fab_version = 1;
+ else if (hal->fab_version == 1)
+ hal->fab_version = 2;
+
+ efuse->physical_size = chip->phy_efuse_size;
+ efuse->logical_size = chip->log_efuse_size;
+ efuse->protect_size = chip->ptct_efuse_size;
+
+ /* default use ack */
+ rtwdev->hal.rcr |= BIT_VHT_DACK;
+
+ return ret;
+}
+
+static int rtw_chip_efuse_enable(struct rtw_dev *rtwdev)
+{
+ struct rtw_fw_state *fw = &rtwdev->fw;
+ int ret;
+
+ ret = rtw_hci_setup(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to setup hci\n");
+ goto err;
+ }
+
+ ret = rtw_mac_power_on(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to power on mac\n");
+ goto err;
+ }
+
+ rtw_write8(rtwdev, REG_C2HEVT, C2H_HW_FEATURE_DUMP);
+
+ wait_for_completion(&fw->completion);
+ if (!fw->firmware) {
+ ret = -EINVAL;
+ rtw_err(rtwdev, "failed to load firmware\n");
+ goto err;
+ }
+
+ ret = rtw_download_firmware(rtwdev, fw);
+ if (ret) {
+ rtw_err(rtwdev, "failed to download firmware\n");
+ goto err_off;
+ }
+
+ return 0;
+
+err_off:
+ rtw_mac_power_off(rtwdev);
+
+err:
+ return ret;
+}
+
+static int rtw_dump_hw_feature(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u8 hw_feature[HW_FEATURE_LEN];
+ u8 id;
+ u8 bw;
+ int i;
+
+ id = rtw_read8(rtwdev, REG_C2HEVT);
+ if (id != C2H_HW_FEATURE_REPORT) {
+ rtw_err(rtwdev, "failed to read hw feature report\n");
+ return -EBUSY;
+ }
+
+ for (i = 0; i < HW_FEATURE_LEN; i++)
+ hw_feature[i] = rtw_read8(rtwdev, REG_C2HEVT + 2 + i);
+
+ rtw_write8(rtwdev, REG_C2HEVT, 0);
+
+ bw = GET_EFUSE_HW_CAP_BW(hw_feature);
+ efuse->hw_cap.bw = hw_bw_cap_to_bitamp(bw);
+ efuse->hw_cap.hci = GET_EFUSE_HW_CAP_HCI(hw_feature);
+ efuse->hw_cap.nss = GET_EFUSE_HW_CAP_NSS(hw_feature);
+ efuse->hw_cap.ptcl = GET_EFUSE_HW_CAP_PTCL(hw_feature);
+ efuse->hw_cap.ant_num = GET_EFUSE_HW_CAP_ANT_NUM(hw_feature);
+
+ rtw_hw_config_rf_ant_num(rtwdev, efuse->hw_cap.ant_num);
+
+ if (efuse->hw_cap.nss == EFUSE_HW_CAP_IGNORE)
+ efuse->hw_cap.nss = rtwdev->hal.rf_path_num;
+
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "hw cap: hci=0x%02x, bw=0x%02x, ptcl=0x%02x, ant_num=%d, nss=%d\n",
+ efuse->hw_cap.hci, efuse->hw_cap.bw, efuse->hw_cap.ptcl,
+ efuse->hw_cap.ant_num, efuse->hw_cap.nss);
+
+ return 0;
+}
+
+static void rtw_chip_efuse_disable(struct rtw_dev *rtwdev)
+{
+ rtw_hci_stop(rtwdev);
+ rtw_mac_power_off(rtwdev);
+}
+
+static int rtw_chip_efuse_info_setup(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ int ret;
+
+ mutex_lock(&rtwdev->mutex);
+
+ /* power on mac to read efuse */
+ ret = rtw_chip_efuse_enable(rtwdev);
+ if (ret)
+ goto out;
+
+ ret = rtw_parse_efuse_map(rtwdev);
+ if (ret)
+ goto out;
+
+ ret = rtw_dump_hw_feature(rtwdev);
+ if (ret)
+ goto out;
+
+ ret = rtw_check_supported_rfe(rtwdev);
+ if (ret)
+ goto out;
+
+ if (efuse->crystal_cap == 0xff)
+ efuse->crystal_cap = 0;
+ if (efuse->pa_type_2g == 0xff)
+ efuse->pa_type_2g = 0;
+ if (efuse->pa_type_5g == 0xff)
+ efuse->pa_type_5g = 0;
+ if (efuse->lna_type_2g == 0xff)
+ efuse->lna_type_2g = 0;
+ if (efuse->lna_type_5g == 0xff)
+ efuse->lna_type_5g = 0;
+ if (efuse->channel_plan == 0xff)
+ efuse->channel_plan = 0x7f;
+ if (efuse->bt_setting & BIT(0))
+ efuse->share_ant = true;
+ if (efuse->regd == 0xff)
+ efuse->regd = 0;
+
+ efuse->ext_pa_2g = efuse->pa_type_2g & BIT(4) ? 1 : 0;
+ efuse->ext_lna_2g = efuse->lna_type_2g & BIT(3) ? 1 : 0;
+ efuse->ext_pa_5g = efuse->pa_type_5g & BIT(0) ? 1 : 0;
+ efuse->ext_lna_2g = efuse->lna_type_5g & BIT(3) ? 1 : 0;
+
+ rtw_chip_efuse_disable(rtwdev);
+
+out:
+ mutex_unlock(&rtwdev->mutex);
+ return ret;
+}
+
+static int rtw_chip_board_info_setup(struct rtw_dev *rtwdev)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ const struct rtw_rfe_def *rfe_def = rtw_get_rfe_def(rtwdev);
+
+ if (!rfe_def)
+ return -ENODEV;
+
+ rtw_phy_setup_phy_cond(rtwdev, 0);
+
+ rtw_hw_init_tx_power(hal);
+ rtw_load_table(rtwdev, rfe_def->phy_pg_tbl);
+ rtw_load_table(rtwdev, rfe_def->txpwr_lmt_tbl);
+ rtw_phy_tx_power_by_rate_config(hal);
+ rtw_phy_tx_power_limit_config(hal);
+
+ return 0;
+}
+
+int rtw_chip_info_setup(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw_chip_parameter_setup(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to setup chip parameters\n");
+ goto err_out;
+ }
+
+ ret = rtw_chip_efuse_info_setup(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to setup chip efuse info\n");
+ goto err_out;
+ }
+
+ ret = rtw_chip_board_info_setup(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to setup chip board info\n");
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ return ret;
+}
+EXPORT_SYMBOL(rtw_chip_info_setup);
+
+int rtw_core_init(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ INIT_LIST_HEAD(&rtwdev->rsvd_page_list);
+
+ timer_setup(&rtwdev->tx_report.purge_timer,
+ rtw_tx_report_purge_timer, 0);
+
+ INIT_DELAYED_WORK(&rtwdev->watch_dog_work, rtw_watch_dog_work);
+ INIT_DELAYED_WORK(&rtwdev->lps_work, rtw_lps_work);
+ INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work);
+ skb_queue_head_init(&rtwdev->c2h_queue);
+ skb_queue_head_init(&rtwdev->tx_report.queue);
+
+ spin_lock_init(&rtwdev->dm_lock);
+ spin_lock_init(&rtwdev->rf_lock);
+ spin_lock_init(&rtwdev->h2c.lock);
+ spin_lock_init(&rtwdev->tx_report.q_lock);
+
+ mutex_init(&rtwdev->mutex);
+ mutex_init(&rtwdev->hal.tx_power_mutex);
+
+ rtwdev->sec.total_cam_num = 32;
+ rtwdev->hal.current_channel = 1;
+ set_bit(RTW_BC_MC_MACID, rtwdev->mac_id_map);
+
+ mutex_lock(&rtwdev->mutex);
+ rtw_add_rsvd_page(rtwdev, RSVD_BEACON, false);
+ mutex_unlock(&rtwdev->mutex);
+
+ /* default rx filter setting */
+ rtwdev->hal.rcr = BIT_APP_FCS | BIT_APP_MIC | BIT_APP_ICV |
+ BIT_HTC_LOC_CTRL | BIT_APP_PHYSTS |
+ BIT_AB | BIT_AM | BIT_APM;
+
+ ret = rtw_load_firmware(rtwdev, rtwdev->chip->fw_name);
+ if (ret) {
+ rtw_warn(rtwdev, "no firmware loaded\n");
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw_core_init);
+
+void rtw_core_deinit(struct rtw_dev *rtwdev)
+{
+ struct rtw_fw_state *fw = &rtwdev->fw;
+ struct rtw_rsvd_page *rsvd_pkt, *tmp;
+ unsigned long flags;
+
+ if (fw->firmware)
+ release_firmware(fw->firmware);
+
+ spin_lock_irqsave(&rtwdev->tx_report.q_lock, flags);
+ skb_queue_purge(&rtwdev->tx_report.queue);
+ spin_unlock_irqrestore(&rtwdev->tx_report.q_lock, flags);
+
+ list_for_each_entry_safe(rsvd_pkt, tmp, &rtwdev->rsvd_page_list, list) {
+ list_del(&rsvd_pkt->list);
+ kfree(rsvd_pkt);
+ }
+
+ mutex_destroy(&rtwdev->mutex);
+ mutex_destroy(&rtwdev->hal.tx_power_mutex);
+}
+EXPORT_SYMBOL(rtw_core_deinit);
+
+int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
+{
+ int max_tx_headroom = 0;
+ int ret;
+
+ /* TODO: USB & SDIO may need extra room? */
+ max_tx_headroom = rtwdev->chip->tx_pkt_desc_sz;
+
+ hw->extra_tx_headroom = max_tx_headroom;
+ hw->queues = IEEE80211_NUM_ACS;
+ hw->sta_data_size = sizeof(struct rtw_sta_info);
+ hw->vif_data_size = sizeof(struct rtw_vif);
+
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
+ WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
+
+ rtw_set_supported_band(hw, rtwdev->chip);
+ SET_IEEE80211_PERM_ADDR(hw, rtwdev->efuse.addr);
+
+ rtw_regd_init(rtwdev, rtw_regd_notifier);
+
+ ret = ieee80211_register_hw(hw);
+ if (ret) {
+ rtw_err(rtwdev, "failed to register hw\n");
+ return ret;
+ }
+
+ if (regulatory_hint(hw->wiphy, rtwdev->regd.alpha2))
+ rtw_err(rtwdev, "regulatory_hint fail\n");
+
+ rtw_debugfs_init(rtwdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw_register_hw);
+
+void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ ieee80211_unregister_hw(hw);
+ rtw_unset_supported_band(hw, chip);
+}
+EXPORT_SYMBOL(rtw_unregister_hw);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ac wireless core module");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
new file mode 100644
index 000000000000..00fc77fb9b54
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -0,0 +1,1104 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTK_MAIN_H_
+#define __RTK_MAIN_H_
+
+#include <net/mac80211.h>
+#include <linux/vmalloc.h>
+#include <linux/firmware.h>
+#include <linux/average.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+#include "util.h"
+
+#define RTW_MAX_MAC_ID_NUM 32
+#define RTW_MAX_SEC_CAM_NUM 32
+
+#define RTW_WATCH_DOG_DELAY_TIME round_jiffies_relative(HZ * 2)
+
+#define RFREG_MASK 0xfffff
+#define INV_RF_DATA 0xffffffff
+#define TX_PAGE_SIZE_SHIFT 7
+
+#define RTW_CHANNEL_WIDTH_MAX 3
+#define RTW_RF_PATH_MAX 4
+#define HW_FEATURE_LEN 13
+
+extern unsigned int rtw_debug_mask;
+extern const struct ieee80211_ops rtw_ops;
+extern struct rtw_chip_info rtw8822b_hw_spec;
+extern struct rtw_chip_info rtw8822c_hw_spec;
+
+#define RTW_MAX_CHANNEL_NUM_2G 14
+#define RTW_MAX_CHANNEL_NUM_5G 49
+
+struct rtw_dev;
+
+enum rtw_hci_type {
+ RTW_HCI_TYPE_PCIE,
+ RTW_HCI_TYPE_USB,
+ RTW_HCI_TYPE_SDIO,
+
+ RTW_HCI_TYPE_UNDEFINE,
+};
+
+struct rtw_hci {
+ struct rtw_hci_ops *ops;
+ enum rtw_hci_type type;
+
+ u32 rpwm_addr;
+
+ u8 bulkout_num;
+};
+
+enum rtw_supported_band {
+ RTW_BAND_2G = 1 << 0,
+ RTW_BAND_5G = 1 << 1,
+ RTW_BAND_60G = 1 << 2,
+
+ RTW_BAND_MAX,
+};
+
+enum rtw_bandwidth {
+ RTW_CHANNEL_WIDTH_20 = 0,
+ RTW_CHANNEL_WIDTH_40 = 1,
+ RTW_CHANNEL_WIDTH_80 = 2,
+ RTW_CHANNEL_WIDTH_160 = 3,
+ RTW_CHANNEL_WIDTH_80_80 = 4,
+ RTW_CHANNEL_WIDTH_5 = 5,
+ RTW_CHANNEL_WIDTH_10 = 6,
+};
+
+enum rtw_net_type {
+ RTW_NET_NO_LINK = 0,
+ RTW_NET_AD_HOC = 1,
+ RTW_NET_MGD_LINKED = 2,
+ RTW_NET_AP_MODE = 3,
+};
+
+enum rtw_rf_type {
+ RF_1T1R = 0,
+ RF_1T2R = 1,
+ RF_2T2R = 2,
+ RF_2T3R = 3,
+ RF_2T4R = 4,
+ RF_3T3R = 5,
+ RF_3T4R = 6,
+ RF_4T4R = 7,
+ RF_TYPE_MAX,
+};
+
+enum rtw_rf_path {
+ RF_PATH_A = 0,
+ RF_PATH_B = 1,
+ RF_PATH_C = 2,
+ RF_PATH_D = 3,
+};
+
+enum rtw_bb_path {
+ BB_PATH_A = BIT(0),
+ BB_PATH_B = BIT(1),
+ BB_PATH_C = BIT(2),
+ BB_PATH_D = BIT(3),
+
+ BB_PATH_AB = (BB_PATH_A | BB_PATH_B),
+ BB_PATH_AC = (BB_PATH_A | BB_PATH_C),
+ BB_PATH_AD = (BB_PATH_A | BB_PATH_D),
+ BB_PATH_BC = (BB_PATH_B | BB_PATH_C),
+ BB_PATH_BD = (BB_PATH_B | BB_PATH_D),
+ BB_PATH_CD = (BB_PATH_C | BB_PATH_D),
+
+ BB_PATH_ABC = (BB_PATH_A | BB_PATH_B | BB_PATH_C),
+ BB_PATH_ABD = (BB_PATH_A | BB_PATH_B | BB_PATH_D),
+ BB_PATH_ACD = (BB_PATH_A | BB_PATH_C | BB_PATH_D),
+ BB_PATH_BCD = (BB_PATH_B | BB_PATH_C | BB_PATH_D),
+
+ BB_PATH_ABCD = (BB_PATH_A | BB_PATH_B | BB_PATH_C | BB_PATH_D),
+};
+
+enum rtw_rate_section {
+ RTW_RATE_SECTION_CCK = 0,
+ RTW_RATE_SECTION_OFDM,
+ RTW_RATE_SECTION_HT_1S,
+ RTW_RATE_SECTION_HT_2S,
+ RTW_RATE_SECTION_VHT_1S,
+ RTW_RATE_SECTION_VHT_2S,
+
+ /* keep last */
+ RTW_RATE_SECTION_MAX,
+};
+
+enum rtw_wireless_set {
+ WIRELESS_CCK = 0x00000001,
+ WIRELESS_OFDM = 0x00000002,
+ WIRELESS_HT = 0x00000004,
+ WIRELESS_VHT = 0x00000008,
+};
+
+#define HT_STBC_EN BIT(0)
+#define VHT_STBC_EN BIT(1)
+#define HT_LDPC_EN BIT(0)
+#define VHT_LDPC_EN BIT(1)
+
+enum rtw_chip_type {
+ RTW_CHIP_TYPE_8822B,
+ RTW_CHIP_TYPE_8822C,
+};
+
+enum rtw_tx_queue_type {
+ /* the order of AC queues matters */
+ RTW_TX_QUEUE_BK = 0x0,
+ RTW_TX_QUEUE_BE = 0x1,
+ RTW_TX_QUEUE_VI = 0x2,
+ RTW_TX_QUEUE_VO = 0x3,
+
+ RTW_TX_QUEUE_BCN = 0x4,
+ RTW_TX_QUEUE_MGMT = 0x5,
+ RTW_TX_QUEUE_HI0 = 0x6,
+ RTW_TX_QUEUE_H2C = 0x7,
+ /* keep it last */
+ RTK_MAX_TX_QUEUE_NUM
+};
+
+enum rtw_rx_queue_type {
+ RTW_RX_QUEUE_MPDU = 0x0,
+ RTW_RX_QUEUE_C2H = 0x1,
+ /* keep it last */
+ RTK_MAX_RX_QUEUE_NUM
+};
+
+enum rtw_rate_index {
+ RTW_RATEID_BGN_40M_2SS = 0,
+ RTW_RATEID_BGN_40M_1SS = 1,
+ RTW_RATEID_BGN_20M_2SS = 2,
+ RTW_RATEID_BGN_20M_1SS = 3,
+ RTW_RATEID_GN_N2SS = 4,
+ RTW_RATEID_GN_N1SS = 5,
+ RTW_RATEID_BG = 6,
+ RTW_RATEID_G = 7,
+ RTW_RATEID_B_20M = 8,
+ RTW_RATEID_ARFR0_AC_2SS = 9,
+ RTW_RATEID_ARFR1_AC_1SS = 10,
+ RTW_RATEID_ARFR2_AC_2G_1SS = 11,
+ RTW_RATEID_ARFR3_AC_2G_2SS = 12,
+ RTW_RATEID_ARFR4_AC_3SS = 13,
+ RTW_RATEID_ARFR5_N_3SS = 14,
+ RTW_RATEID_ARFR7_N_4SS = 15,
+ RTW_RATEID_ARFR6_AC_4SS = 16
+};
+
+enum rtw_trx_desc_rate {
+ DESC_RATE1M = 0x00,
+ DESC_RATE2M = 0x01,
+ DESC_RATE5_5M = 0x02,
+ DESC_RATE11M = 0x03,
+
+ DESC_RATE6M = 0x04,
+ DESC_RATE9M = 0x05,
+ DESC_RATE12M = 0x06,
+ DESC_RATE18M = 0x07,
+ DESC_RATE24M = 0x08,
+ DESC_RATE36M = 0x09,
+ DESC_RATE48M = 0x0a,
+ DESC_RATE54M = 0x0b,
+
+ DESC_RATEMCS0 = 0x0c,
+ DESC_RATEMCS1 = 0x0d,
+ DESC_RATEMCS2 = 0x0e,
+ DESC_RATEMCS3 = 0x0f,
+ DESC_RATEMCS4 = 0x10,
+ DESC_RATEMCS5 = 0x11,
+ DESC_RATEMCS6 = 0x12,
+ DESC_RATEMCS7 = 0x13,
+ DESC_RATEMCS8 = 0x14,
+ DESC_RATEMCS9 = 0x15,
+ DESC_RATEMCS10 = 0x16,
+ DESC_RATEMCS11 = 0x17,
+ DESC_RATEMCS12 = 0x18,
+ DESC_RATEMCS13 = 0x19,
+ DESC_RATEMCS14 = 0x1a,
+ DESC_RATEMCS15 = 0x1b,
+ DESC_RATEMCS16 = 0x1c,
+ DESC_RATEMCS17 = 0x1d,
+ DESC_RATEMCS18 = 0x1e,
+ DESC_RATEMCS19 = 0x1f,
+ DESC_RATEMCS20 = 0x20,
+ DESC_RATEMCS21 = 0x21,
+ DESC_RATEMCS22 = 0x22,
+ DESC_RATEMCS23 = 0x23,
+ DESC_RATEMCS24 = 0x24,
+ DESC_RATEMCS25 = 0x25,
+ DESC_RATEMCS26 = 0x26,
+ DESC_RATEMCS27 = 0x27,
+ DESC_RATEMCS28 = 0x28,
+ DESC_RATEMCS29 = 0x29,
+ DESC_RATEMCS30 = 0x2a,
+ DESC_RATEMCS31 = 0x2b,
+
+ DESC_RATEVHT1SS_MCS0 = 0x2c,
+ DESC_RATEVHT1SS_MCS1 = 0x2d,
+ DESC_RATEVHT1SS_MCS2 = 0x2e,
+ DESC_RATEVHT1SS_MCS3 = 0x2f,
+ DESC_RATEVHT1SS_MCS4 = 0x30,
+ DESC_RATEVHT1SS_MCS5 = 0x31,
+ DESC_RATEVHT1SS_MCS6 = 0x32,
+ DESC_RATEVHT1SS_MCS7 = 0x33,
+ DESC_RATEVHT1SS_MCS8 = 0x34,
+ DESC_RATEVHT1SS_MCS9 = 0x35,
+
+ DESC_RATEVHT2SS_MCS0 = 0x36,
+ DESC_RATEVHT2SS_MCS1 = 0x37,
+ DESC_RATEVHT2SS_MCS2 = 0x38,
+ DESC_RATEVHT2SS_MCS3 = 0x39,
+ DESC_RATEVHT2SS_MCS4 = 0x3a,
+ DESC_RATEVHT2SS_MCS5 = 0x3b,
+ DESC_RATEVHT2SS_MCS6 = 0x3c,
+ DESC_RATEVHT2SS_MCS7 = 0x3d,
+ DESC_RATEVHT2SS_MCS8 = 0x3e,
+ DESC_RATEVHT2SS_MCS9 = 0x3f,
+
+ DESC_RATEVHT3SS_MCS0 = 0x40,
+ DESC_RATEVHT3SS_MCS1 = 0x41,
+ DESC_RATEVHT3SS_MCS2 = 0x42,
+ DESC_RATEVHT3SS_MCS3 = 0x43,
+ DESC_RATEVHT3SS_MCS4 = 0x44,
+ DESC_RATEVHT3SS_MCS5 = 0x45,
+ DESC_RATEVHT3SS_MCS6 = 0x46,
+ DESC_RATEVHT3SS_MCS7 = 0x47,
+ DESC_RATEVHT3SS_MCS8 = 0x48,
+ DESC_RATEVHT3SS_MCS9 = 0x49,
+
+ DESC_RATEVHT4SS_MCS0 = 0x4a,
+ DESC_RATEVHT4SS_MCS1 = 0x4b,
+ DESC_RATEVHT4SS_MCS2 = 0x4c,
+ DESC_RATEVHT4SS_MCS3 = 0x4d,
+ DESC_RATEVHT4SS_MCS4 = 0x4e,
+ DESC_RATEVHT4SS_MCS5 = 0x4f,
+ DESC_RATEVHT4SS_MCS6 = 0x50,
+ DESC_RATEVHT4SS_MCS7 = 0x51,
+ DESC_RATEVHT4SS_MCS8 = 0x52,
+ DESC_RATEVHT4SS_MCS9 = 0x53,
+
+ DESC_RATE_MAX,
+};
+
+enum rtw_regulatory_domains {
+ RTW_REGD_FCC = 0,
+ RTW_REGD_MKK = 1,
+ RTW_REGD_ETSI = 2,
+ RTW_REGD_WW = 3,
+
+ RTW_REGD_MAX
+};
+
+enum rtw_flags {
+ RTW_FLAG_RUNNING,
+ RTW_FLAG_FW_RUNNING,
+ RTW_FLAG_SCANNING,
+ RTW_FLAG_INACTIVE_PS,
+ RTW_FLAG_LEISURE_PS,
+ RTW_FLAG_DIG_DISABLE,
+
+ NUM_OF_RTW_FLAGS,
+};
+
+/* the power index is represented by differences, which cck-1s & ht40-1s are
+ * the base values, so for 1s's differences, there are only ht20 & ofdm
+ */
+struct rtw_2g_1s_pwr_idx_diff {
+#ifdef __LITTLE_ENDIAN
+ s8 ofdm:4;
+ s8 bw20:4;
+#else
+ s8 bw20:4;
+ s8 ofdm:4;
+#endif
+} __packed;
+
+struct rtw_2g_ns_pwr_idx_diff {
+#ifdef __LITTLE_ENDIAN
+ s8 bw20:4;
+ s8 bw40:4;
+ s8 cck:4;
+ s8 ofdm:4;
+#else
+ s8 ofdm:4;
+ s8 cck:4;
+ s8 bw40:4;
+ s8 bw20:4;
+#endif
+} __packed;
+
+struct rtw_2g_txpwr_idx {
+ u8 cck_base[6];
+ u8 bw40_base[5];
+ struct rtw_2g_1s_pwr_idx_diff ht_1s_diff;
+ struct rtw_2g_ns_pwr_idx_diff ht_2s_diff;
+ struct rtw_2g_ns_pwr_idx_diff ht_3s_diff;
+ struct rtw_2g_ns_pwr_idx_diff ht_4s_diff;
+};
+
+struct rtw_5g_ht_1s_pwr_idx_diff {
+#ifdef __LITTLE_ENDIAN
+ s8 ofdm:4;
+ s8 bw20:4;
+#else
+ s8 bw20:4;
+ s8 ofdm:4;
+#endif
+} __packed;
+
+struct rtw_5g_ht_ns_pwr_idx_diff {
+#ifdef __LITTLE_ENDIAN
+ s8 bw20:4;
+ s8 bw40:4;
+#else
+ s8 bw40:4;
+ s8 bw20:4;
+#endif
+} __packed;
+
+struct rtw_5g_ofdm_ns_pwr_idx_diff {
+#ifdef __LITTLE_ENDIAN
+ s8 ofdm_3s:4;
+ s8 ofdm_2s:4;
+ s8 ofdm_4s:4;
+ s8 res:4;
+#else
+ s8 res:4;
+ s8 ofdm_4s:4;
+ s8 ofdm_2s:4;
+ s8 ofdm_3s:4;
+#endif
+} __packed;
+
+struct rtw_5g_vht_ns_pwr_idx_diff {
+#ifdef __LITTLE_ENDIAN
+ s8 bw160:4;
+ s8 bw80:4;
+#else
+ s8 bw80:4;
+ s8 bw160:4;
+#endif
+} __packed;
+
+struct rtw_5g_txpwr_idx {
+ u8 bw40_base[14];
+ struct rtw_5g_ht_1s_pwr_idx_diff ht_1s_diff;
+ struct rtw_5g_ht_ns_pwr_idx_diff ht_2s_diff;
+ struct rtw_5g_ht_ns_pwr_idx_diff ht_3s_diff;
+ struct rtw_5g_ht_ns_pwr_idx_diff ht_4s_diff;
+ struct rtw_5g_ofdm_ns_pwr_idx_diff ofdm_diff;
+ struct rtw_5g_vht_ns_pwr_idx_diff vht_1s_diff;
+ struct rtw_5g_vht_ns_pwr_idx_diff vht_2s_diff;
+ struct rtw_5g_vht_ns_pwr_idx_diff vht_3s_diff;
+ struct rtw_5g_vht_ns_pwr_idx_diff vht_4s_diff;
+};
+
+struct rtw_txpwr_idx {
+ struct rtw_2g_txpwr_idx pwr_idx_2g;
+ struct rtw_5g_txpwr_idx pwr_idx_5g;
+};
+
+struct rtw_timer_list {
+ struct timer_list timer;
+ void (*function)(void *data);
+ void *args;
+};
+
+struct rtw_channel_params {
+ u8 center_chan;
+ u8 bandwidth;
+ u8 primary_chan_idx;
+};
+
+struct rtw_hw_reg {
+ u32 addr;
+ u32 mask;
+};
+
+struct rtw_backup_info {
+ u8 len;
+ u32 reg;
+ u32 val;
+};
+
+enum rtw_vif_port_set {
+ PORT_SET_MAC_ADDR = BIT(0),
+ PORT_SET_BSSID = BIT(1),
+ PORT_SET_NET_TYPE = BIT(2),
+ PORT_SET_AID = BIT(3),
+};
+
+struct rtw_vif_port {
+ struct rtw_hw_reg mac_addr;
+ struct rtw_hw_reg bssid;
+ struct rtw_hw_reg net_type;
+ struct rtw_hw_reg aid;
+};
+
+struct rtw_tx_pkt_info {
+ u32 tx_pkt_size;
+ u8 offset;
+ u8 pkt_offset;
+ u8 mac_id;
+ u8 rate_id;
+ u8 rate;
+ u8 qsel;
+ u8 bw;
+ u8 sec_type;
+ u8 sn;
+ bool ampdu_en;
+ u8 ampdu_factor;
+ u8 ampdu_density;
+ u16 seq;
+ bool stbc;
+ bool ldpc;
+ bool dis_rate_fallback;
+ bool bmc;
+ bool use_rate;
+ bool ls;
+ bool fs;
+ bool short_gi;
+ bool report;
+};
+
+struct rtw_rx_pkt_stat {
+ bool phy_status;
+ bool icv_err;
+ bool crc_err;
+ bool decrypted;
+ bool is_c2h;
+
+ s32 signal_power;
+ u16 pkt_len;
+ u8 bw;
+ u8 drv_info_sz;
+ u8 shift;
+ u8 rate;
+ u8 mac_id;
+ u8 cam_id;
+ u8 ppdu_cnt;
+ u32 tsf_low;
+ s8 rx_power[RTW_RF_PATH_MAX];
+ u8 rssi;
+ u8 rxsc;
+ struct rtw_sta_info *si;
+ struct ieee80211_vif *vif;
+};
+
+struct rtw_traffic_stats {
+ /* units in bytes */
+ u64 tx_unicast;
+ u64 rx_unicast;
+
+ /* count for packets */
+ u64 tx_cnt;
+ u64 rx_cnt;
+
+ /* units in Mbps */
+ u32 tx_throughput;
+ u32 rx_throughput;
+};
+
+enum rtw_lps_mode {
+ RTW_MODE_ACTIVE = 0,
+ RTW_MODE_LPS = 1,
+ RTW_MODE_WMM_PS = 2,
+};
+
+enum rtw_pwr_state {
+ RTW_RF_OFF = 0x0,
+ RTW_RF_ON = 0x4,
+ RTW_ALL_ON = 0xc,
+};
+
+struct rtw_lps_conf {
+ /* the interface to enter lps */
+ struct rtw_vif *rtwvif;
+ enum rtw_lps_mode mode;
+ enum rtw_pwr_state state;
+ u8 awake_interval;
+ u8 rlbm;
+ u8 smart_ps;
+ u8 port_id;
+};
+
+enum rtw_hw_key_type {
+ RTW_CAM_NONE = 0,
+ RTW_CAM_WEP40 = 1,
+ RTW_CAM_TKIP = 2,
+ RTW_CAM_AES = 4,
+ RTW_CAM_WEP104 = 5,
+};
+
+struct rtw_cam_entry {
+ bool valid;
+ bool group;
+ u8 addr[ETH_ALEN];
+ u8 hw_key_type;
+ struct ieee80211_key_conf *key;
+};
+
+struct rtw_sec_desc {
+ /* search strategy */
+ bool default_key_search;
+
+ u32 total_cam_num;
+ struct rtw_cam_entry cam_table[RTW_MAX_SEC_CAM_NUM];
+ DECLARE_BITMAP(cam_map, RTW_MAX_SEC_CAM_NUM);
+};
+
+struct rtw_tx_report {
+ /* protect the tx report queue */
+ spinlock_t q_lock;
+ struct sk_buff_head queue;
+ atomic_t sn;
+ struct timer_list purge_timer;
+};
+
+#define RTW_BC_MC_MACID 1
+DECLARE_EWMA(rssi, 10, 16);
+
+struct rtw_sta_info {
+ struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif;
+
+ struct ewma_rssi avg_rssi;
+ u8 rssi_level;
+
+ u8 mac_id;
+ u8 rate_id;
+ enum rtw_bandwidth bw_mode;
+ enum rtw_rf_type rf_type;
+ enum rtw_wireless_set wireless_set;
+ u8 stbc_en:2;
+ u8 ldpc_en:2;
+ bool sgi_enable;
+ bool vht_enable;
+ bool updated;
+ u8 init_ra_lv;
+ u64 ra_mask;
+};
+
+struct rtw_vif {
+ struct ieee80211_vif *vif;
+ enum rtw_net_type net_type;
+ u16 aid;
+ u8 mac_addr[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ u8 port;
+ const struct rtw_vif_port *conf;
+
+ struct rtw_traffic_stats stats;
+ bool in_lps;
+};
+
+struct rtw_regulatory {
+ char alpha2[2];
+ u8 chplan;
+ u8 txpwr_regd;
+};
+
+struct rtw_chip_ops {
+ int (*mac_init)(struct rtw_dev *rtwdev);
+ int (*read_efuse)(struct rtw_dev *rtwdev, u8 *map);
+ void (*phy_set_param)(struct rtw_dev *rtwdev);
+ void (*set_channel)(struct rtw_dev *rtwdev, u8 channel,
+ u8 bandwidth, u8 primary_chan_idx);
+ void (*query_rx_desc)(struct rtw_dev *rtwdev, u8 *rx_desc,
+ struct rtw_rx_pkt_stat *pkt_stat,
+ struct ieee80211_rx_status *rx_status);
+ u32 (*read_rf)(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask);
+ bool (*write_rf)(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask, u32 data);
+ void (*set_tx_power_index)(struct rtw_dev *rtwdev);
+ int (*rsvd_page_dump)(struct rtw_dev *rtwdev, u8 *buf, u32 offset,
+ u32 size);
+ void (*set_antenna)(struct rtw_dev *rtwdev, u8 antenna_tx,
+ u8 antenna_rx);
+ void (*cfg_ldo25)(struct rtw_dev *rtwdev, bool enable);
+ void (*false_alarm_statistics)(struct rtw_dev *rtwdev);
+ void (*do_iqk)(struct rtw_dev *rtwdev);
+};
+
+#define RTW_PWR_POLLING_CNT 20000
+
+#define RTW_PWR_CMD_READ 0x00
+#define RTW_PWR_CMD_WRITE 0x01
+#define RTW_PWR_CMD_POLLING 0x02
+#define RTW_PWR_CMD_DELAY 0x03
+#define RTW_PWR_CMD_END 0x04
+
+/* define the base address of each block */
+#define RTW_PWR_ADDR_MAC 0x00
+#define RTW_PWR_ADDR_USB 0x01
+#define RTW_PWR_ADDR_PCIE 0x02
+#define RTW_PWR_ADDR_SDIO 0x03
+
+#define RTW_PWR_INTF_SDIO_MSK BIT(0)
+#define RTW_PWR_INTF_USB_MSK BIT(1)
+#define RTW_PWR_INTF_PCI_MSK BIT(2)
+#define RTW_PWR_INTF_ALL_MSK (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+#define RTW_PWR_CUT_A_MSK BIT(1)
+#define RTW_PWR_CUT_B_MSK BIT(2)
+#define RTW_PWR_CUT_C_MSK BIT(3)
+#define RTW_PWR_CUT_D_MSK BIT(4)
+#define RTW_PWR_CUT_E_MSK BIT(5)
+#define RTW_PWR_CUT_F_MSK BIT(6)
+#define RTW_PWR_CUT_G_MSK BIT(7)
+#define RTW_PWR_CUT_ALL_MSK 0xFF
+
+enum rtw_pwr_seq_cmd_delay_unit {
+ RTW_PWR_DELAY_US,
+ RTW_PWR_DELAY_MS,
+};
+
+struct rtw_pwr_seq_cmd {
+ u16 offset;
+ u8 cut_mask;
+ u8 intf_mask;
+ u8 base:4;
+ u8 cmd:4;
+ u8 mask;
+ u8 value;
+};
+
+enum rtw_chip_ver {
+ RTW_CHIP_VER_CUT_A = 0x00,
+ RTW_CHIP_VER_CUT_B = 0x01,
+ RTW_CHIP_VER_CUT_C = 0x02,
+ RTW_CHIP_VER_CUT_D = 0x03,
+ RTW_CHIP_VER_CUT_E = 0x04,
+ RTW_CHIP_VER_CUT_F = 0x05,
+ RTW_CHIP_VER_CUT_G = 0x06,
+};
+
+#define RTW_INTF_PHY_PLATFORM_ALL 0
+
+enum rtw_intf_phy_cut {
+ RTW_INTF_PHY_CUT_A = BIT(0),
+ RTW_INTF_PHY_CUT_B = BIT(1),
+ RTW_INTF_PHY_CUT_C = BIT(2),
+ RTW_INTF_PHY_CUT_D = BIT(3),
+ RTW_INTF_PHY_CUT_E = BIT(4),
+ RTW_INTF_PHY_CUT_F = BIT(5),
+ RTW_INTF_PHY_CUT_G = BIT(6),
+ RTW_INTF_PHY_CUT_ALL = 0xFFFF,
+};
+
+enum rtw_ip_sel {
+ RTW_IP_SEL_PHY = 0,
+ RTW_IP_SEL_MAC = 1,
+ RTW_IP_SEL_DBI = 2,
+
+ RTW_IP_SEL_UNDEF = 0xFFFF
+};
+
+enum rtw_pq_map_id {
+ RTW_PQ_MAP_VO = 0x0,
+ RTW_PQ_MAP_VI = 0x1,
+ RTW_PQ_MAP_BE = 0x2,
+ RTW_PQ_MAP_BK = 0x3,
+ RTW_PQ_MAP_MG = 0x4,
+ RTW_PQ_MAP_HI = 0x5,
+ RTW_PQ_MAP_NUM = 0x6,
+
+ RTW_PQ_MAP_UNDEF,
+};
+
+enum rtw_dma_mapping {
+ RTW_DMA_MAPPING_EXTRA = 0,
+ RTW_DMA_MAPPING_LOW = 1,
+ RTW_DMA_MAPPING_NORMAL = 2,
+ RTW_DMA_MAPPING_HIGH = 3,
+
+ RTW_DMA_MAPPING_UNDEF,
+};
+
+struct rtw_rqpn {
+ enum rtw_dma_mapping dma_map_vo;
+ enum rtw_dma_mapping dma_map_vi;
+ enum rtw_dma_mapping dma_map_be;
+ enum rtw_dma_mapping dma_map_bk;
+ enum rtw_dma_mapping dma_map_mg;
+ enum rtw_dma_mapping dma_map_hi;
+};
+
+struct rtw_page_table {
+ u16 hq_num;
+ u16 nq_num;
+ u16 lq_num;
+ u16 exq_num;
+ u16 gapq_num;
+};
+
+struct rtw_intf_phy_para {
+ u16 offset;
+ u16 value;
+ u16 ip_sel;
+ u16 cut_mask;
+ u16 platform;
+};
+
+struct rtw_intf_phy_para_table {
+ struct rtw_intf_phy_para *usb2_para;
+ struct rtw_intf_phy_para *usb3_para;
+ struct rtw_intf_phy_para *gen1_para;
+ struct rtw_intf_phy_para *gen2_para;
+ u8 n_usb2_para;
+ u8 n_usb3_para;
+ u8 n_gen1_para;
+ u8 n_gen2_para;
+};
+
+struct rtw_table {
+ const void *data;
+ const u32 size;
+ void (*parse)(struct rtw_dev *rtwdev, const struct rtw_table *tbl);
+ void (*do_cfg)(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
+ u32 addr, u32 data);
+ enum rtw_rf_path rf_path;
+};
+
+static inline void rtw_load_table(struct rtw_dev *rtwdev,
+ const struct rtw_table *tbl)
+{
+ (*tbl->parse)(rtwdev, tbl);
+}
+
+enum rtw_rfe_fem {
+ RTW_RFE_IFEM,
+ RTW_RFE_EFEM,
+ RTW_RFE_IFEM2G_EFEM5G,
+ RTW_RFE_NUM,
+};
+
+struct rtw_rfe_def {
+ const struct rtw_table *phy_pg_tbl;
+ const struct rtw_table *txpwr_lmt_tbl;
+};
+
+#define RTW_DEF_RFE(chip, bb_pg, pwrlmt) { \
+ .phy_pg_tbl = &rtw ## chip ## _bb_pg_type ## bb_pg ## _tbl, \
+ .txpwr_lmt_tbl = &rtw ## chip ## _txpwr_lmt_type ## pwrlmt ## _tbl, \
+ }
+
+/* hardware configuration for each IC */
+struct rtw_chip_info {
+ struct rtw_chip_ops *ops;
+ u8 id;
+
+ const char *fw_name;
+ u8 tx_pkt_desc_sz;
+ u8 tx_buf_desc_sz;
+ u8 rx_pkt_desc_sz;
+ u8 rx_buf_desc_sz;
+ u32 phy_efuse_size;
+ u32 log_efuse_size;
+ u32 ptct_efuse_size;
+ u32 txff_size;
+ u32 rxff_size;
+ u8 band;
+ u8 page_size;
+ u8 csi_buf_pg_num;
+ u8 dig_max;
+ u8 dig_min;
+ u8 txgi_factor;
+ bool is_pwr_by_rate_dec;
+ u8 max_power_index;
+
+ bool ht_supported;
+ bool vht_supported;
+
+ /* init values */
+ u8 sys_func_en;
+ struct rtw_pwr_seq_cmd **pwr_on_seq;
+ struct rtw_pwr_seq_cmd **pwr_off_seq;
+ struct rtw_rqpn *rqpn_table;
+ struct rtw_page_table *page_table;
+ struct rtw_intf_phy_para_table *intf_table;
+
+ struct rtw_hw_reg *dig;
+ u32 rf_base_addr[2];
+ u32 rf_sipi_addr[2];
+
+ const struct rtw_table *mac_tbl;
+ const struct rtw_table *agc_tbl;
+ const struct rtw_table *bb_tbl;
+ const struct rtw_table *rf_tbl[RTW_RF_PATH_MAX];
+ const struct rtw_table *rfk_init_tbl;
+
+ const struct rtw_rfe_def *rfe_defs;
+ u32 rfe_defs_size;
+};
+
+struct rtw_dm_info {
+ u32 cck_fa_cnt;
+ u32 ofdm_fa_cnt;
+ u32 total_fa_cnt;
+ u8 min_rssi;
+ u8 pre_min_rssi;
+ u16 fa_history[4];
+ u8 igi_history[4];
+ u8 igi_bitmap;
+ bool damping;
+ u8 damping_cnt;
+ u8 damping_rssi;
+
+ u8 cck_gi_u_bnd;
+ u8 cck_gi_l_bnd;
+};
+
+struct rtw_efuse {
+ u32 size;
+ u32 physical_size;
+ u32 logical_size;
+ u32 protect_size;
+
+ u8 addr[ETH_ALEN];
+ u8 channel_plan;
+ u8 country_code[2];
+ u8 rfe_option;
+ u8 thermal_meter;
+ u8 crystal_cap;
+ u8 ant_div_cfg;
+ u8 ant_div_type;
+ u8 regd;
+
+ u8 lna_type_2g;
+ u8 lna_type_5g;
+ u8 glna_type;
+ u8 alna_type;
+ bool ext_lna_2g;
+ bool ext_lna_5g;
+ u8 pa_type_2g;
+ u8 pa_type_5g;
+ u8 gpa_type;
+ u8 apa_type;
+ bool ext_pa_2g;
+ bool ext_pa_5g;
+
+ bool btcoex;
+ /* bt share antenna with wifi */
+ bool share_ant;
+ u8 bt_setting;
+
+ struct {
+ u8 hci;
+ u8 bw;
+ u8 ptcl;
+ u8 nss;
+ u8 ant_num;
+ } hw_cap;
+
+ struct rtw_txpwr_idx txpwr_idx_table[4];
+};
+
+struct rtw_phy_cond {
+#ifdef __LITTLE_ENDIAN
+ u32 rfe:8;
+ u32 intf:4;
+ u32 pkg:4;
+ u32 plat:4;
+ u32 intf_rsvd:4;
+ u32 cut:4;
+ u32 branch:2;
+ u32 neg:1;
+ u32 pos:1;
+#else
+ u32 pos:1;
+ u32 neg:1;
+ u32 branch:2;
+ u32 cut:4;
+ u32 intf_rsvd:4;
+ u32 plat:4;
+ u32 pkg:4;
+ u32 intf:4;
+ u32 rfe:8;
+#endif
+ /* for intf:4 */
+ #define INTF_PCIE BIT(0)
+ #define INTF_USB BIT(1)
+ #define INTF_SDIO BIT(2)
+ /* for branch:2 */
+ #define BRANCH_IF 0
+ #define BRANCH_ELIF 1
+ #define BRANCH_ELSE 2
+ #define BRANCH_ENDIF 3
+};
+
+struct rtw_fifo_conf {
+ /* tx fifo information */
+ u16 rsvd_boundary;
+ u16 rsvd_pg_num;
+ u16 rsvd_drv_pg_num;
+ u16 txff_pg_num;
+ u16 acq_pg_num;
+ u16 rsvd_drv_addr;
+ u16 rsvd_h2c_info_addr;
+ u16 rsvd_h2c_sta_info_addr;
+ u16 rsvd_h2cq_addr;
+ u16 rsvd_cpu_instr_addr;
+ u16 rsvd_fw_txbuf_addr;
+ u16 rsvd_csibuf_addr;
+ enum rtw_dma_mapping pq_map[RTW_PQ_MAP_NUM];
+};
+
+struct rtw_fw_state {
+ const struct firmware *firmware;
+ struct completion completion;
+ u16 version;
+ u8 sub_version;
+ u8 sub_index;
+ u16 h2c_version;
+};
+
+struct rtw_hal {
+ u32 rcr;
+
+ u32 chip_version;
+ u8 fab_version;
+ u8 cut_version;
+ u8 mp_chip;
+ u8 oem_id;
+ struct rtw_phy_cond phy_cond;
+
+ u8 ps_mode;
+ u8 current_channel;
+ u8 current_band_width;
+ u8 current_band_type;
+ u8 sec_ch_offset;
+ u8 rf_type;
+ u8 rf_path_num;
+ u8 antenna_tx;
+ u8 antenna_rx;
+
+ /* protect tx power section */
+ struct mutex tx_power_mutex;
+ s8 tx_pwr_by_rate_offset_2g[RTW_RF_PATH_MAX]
+ [DESC_RATE_MAX];
+ s8 tx_pwr_by_rate_offset_5g[RTW_RF_PATH_MAX]
+ [DESC_RATE_MAX];
+ s8 tx_pwr_by_rate_base_2g[RTW_RF_PATH_MAX]
+ [RTW_RATE_SECTION_MAX];
+ s8 tx_pwr_by_rate_base_5g[RTW_RF_PATH_MAX]
+ [RTW_RATE_SECTION_MAX];
+ s8 tx_pwr_limit_2g[RTW_REGD_MAX]
+ [RTW_CHANNEL_WIDTH_MAX]
+ [RTW_RATE_SECTION_MAX]
+ [RTW_MAX_CHANNEL_NUM_2G];
+ s8 tx_pwr_limit_5g[RTW_REGD_MAX]
+ [RTW_CHANNEL_WIDTH_MAX]
+ [RTW_RATE_SECTION_MAX]
+ [RTW_MAX_CHANNEL_NUM_5G];
+ s8 tx_pwr_tbl[RTW_RF_PATH_MAX]
+ [DESC_RATE_MAX];
+};
+
+struct rtw_dev {
+ struct ieee80211_hw *hw;
+ struct device *dev;
+
+ struct rtw_hci hci;
+
+ struct rtw_chip_info *chip;
+ struct rtw_hal hal;
+ struct rtw_fifo_conf fifo;
+ struct rtw_fw_state fw;
+ struct rtw_efuse efuse;
+ struct rtw_sec_desc sec;
+ struct rtw_traffic_stats stats;
+ struct rtw_regulatory regd;
+
+ struct rtw_dm_info dm_info;
+
+ /* ensures exclusive access from mac80211 callbacks */
+ struct mutex mutex;
+
+ /* lock for dm to use */
+ spinlock_t dm_lock;
+
+ /* read/write rf register */
+ spinlock_t rf_lock;
+
+ /* watch dog every 2 sec */
+ struct delayed_work watch_dog_work;
+ u32 watch_dog_cnt;
+
+ struct list_head rsvd_page_list;
+
+ /* c2h cmd queue & handler work */
+ struct sk_buff_head c2h_queue;
+ struct work_struct c2h_work;
+
+ struct rtw_tx_report tx_report;
+
+ struct {
+ /* incicate the mail box to use with fw */
+ u8 last_box_num;
+ /* protect to send h2c to fw */
+ spinlock_t lock;
+ u32 seq;
+ } h2c;
+
+ /* lps power state & handler work */
+ struct rtw_lps_conf lps_conf;
+ struct delayed_work lps_work;
+
+ struct dentry *debugfs;
+
+ u8 sta_cnt;
+
+ DECLARE_BITMAP(mac_id_map, RTW_MAX_MAC_ID_NUM);
+ DECLARE_BITMAP(flags, NUM_OF_RTW_FLAGS);
+
+ u8 mp_mode;
+
+ /* hci related data, must be last */
+ u8 priv[0] __aligned(sizeof(void *));
+};
+
+#include "hci.h"
+
+static inline bool rtw_flag_check(struct rtw_dev *rtwdev, enum rtw_flags flag)
+{
+ return test_bit(flag, rtwdev->flags);
+}
+
+static inline void rtw_flag_clear(struct rtw_dev *rtwdev, enum rtw_flags flag)
+{
+ clear_bit(flag, rtwdev->flags);
+}
+
+static inline void rtw_flag_set(struct rtw_dev *rtwdev, enum rtw_flags flag)
+{
+ set_bit(flag, rtwdev->flags);
+}
+
+void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
+ struct rtw_channel_params *ch_param);
+bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target);
+bool ltecoex_read_reg(struct rtw_dev *rtwdev, u16 offset, u32 *val);
+bool ltecoex_reg_write(struct rtw_dev *rtwdev, u16 offset, u32 value);
+void rtw_restore_reg(struct rtw_dev *rtwdev,
+ struct rtw_backup_info *bckp, u32 num);
+void rtw_set_channel(struct rtw_dev *rtwdev);
+void rtw_vif_port_config(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif,
+ u32 config);
+void rtw_tx_report_purge_timer(struct timer_list *t);
+void rtw_update_sta_info(struct rtw_dev *rtwdev, struct rtw_sta_info *si);
+int rtw_core_start(struct rtw_dev *rtwdev);
+void rtw_core_stop(struct rtw_dev *rtwdev);
+int rtw_chip_info_setup(struct rtw_dev *rtwdev);
+int rtw_core_init(struct rtw_dev *rtwdev);
+void rtw_core_deinit(struct rtw_dev *rtwdev);
+int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw);
+void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
new file mode 100644
index 000000000000..cfe05ba7280d
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -0,0 +1,1211 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include "main.h"
+#include "pci.h"
+#include "tx.h"
+#include "rx.h"
+#include "debug.h"
+
+static u32 rtw_pci_tx_queue_idx_addr[] = {
+ [RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ,
+ [RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ,
+ [RTW_TX_QUEUE_VI] = RTK_PCI_TXBD_IDX_VIQ,
+ [RTW_TX_QUEUE_VO] = RTK_PCI_TXBD_IDX_VOQ,
+ [RTW_TX_QUEUE_MGMT] = RTK_PCI_TXBD_IDX_MGMTQ,
+ [RTW_TX_QUEUE_HI0] = RTK_PCI_TXBD_IDX_HI0Q,
+ [RTW_TX_QUEUE_H2C] = RTK_PCI_TXBD_IDX_H2CQ,
+};
+
+static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue)
+{
+ switch (queue) {
+ case RTW_TX_QUEUE_BCN:
+ return TX_DESC_QSEL_BEACON;
+ case RTW_TX_QUEUE_H2C:
+ return TX_DESC_QSEL_H2C;
+ case RTW_TX_QUEUE_MGMT:
+ return TX_DESC_QSEL_MGMT;
+ case RTW_TX_QUEUE_HI0:
+ return TX_DESC_QSEL_HIGH;
+ default:
+ return skb->priority;
+ }
+};
+
+static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ return readb(rtwpci->mmap + addr);
+}
+
+static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ return readw(rtwpci->mmap + addr);
+}
+
+static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ return readl(rtwpci->mmap + addr);
+}
+
+static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ writeb(val, rtwpci->mmap + addr);
+}
+
+static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ writew(val, rtwpci->mmap + addr);
+}
+
+static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ writel(val, rtwpci->mmap + addr);
+}
+
+static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx)
+{
+ int offset = tx_ring->r.desc_size * idx;
+
+ return tx_ring->r.head + offset;
+}
+
+static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
+ struct rtw_pci_tx_ring *tx_ring)
+{
+ struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
+ struct rtw_pci_tx_data *tx_data;
+ struct sk_buff *skb, *tmp;
+ dma_addr_t dma;
+ u8 *head = tx_ring->r.head;
+ u32 len = tx_ring->r.len;
+ int ring_sz = len * tx_ring->r.desc_size;
+
+ /* free every skb remained in tx list */
+ skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
+ __skb_unlink(skb, &tx_ring->queue);
+ tx_data = rtw_pci_get_tx_data(skb);
+ dma = tx_data->dma;
+
+ pci_unmap_single(pdev, dma, skb->len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(skb);
+ }
+
+ /* free the ring itself */
+ pci_free_consistent(pdev, ring_sz, head, tx_ring->r.dma);
+ tx_ring->r.head = NULL;
+}
+
+static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
+ struct rtw_pci_rx_ring *rx_ring)
+{
+ struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ u8 *head = rx_ring->r.head;
+ int buf_sz = RTK_PCI_RX_BUF_SIZE;
+ int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
+ int i;
+
+ for (i = 0; i < rx_ring->r.len; i++) {
+ skb = rx_ring->buf[i];
+ if (!skb)
+ continue;
+
+ dma = *((dma_addr_t *)skb->cb);
+ pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb(skb);
+ rx_ring->buf[i] = NULL;
+ }
+
+ pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma);
+}
+
+static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ struct rtw_pci_tx_ring *tx_ring;
+ struct rtw_pci_rx_ring *rx_ring;
+ int i;
+
+ for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
+ tx_ring = &rtwpci->tx_rings[i];
+ rtw_pci_free_tx_ring(rtwdev, tx_ring);
+ }
+
+ for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) {
+ rx_ring = &rtwpci->rx_rings[i];
+ rtw_pci_free_rx_ring(rtwdev, rx_ring);
+ }
+}
+
+static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
+ struct rtw_pci_tx_ring *tx_ring,
+ u8 desc_size, u32 len)
+{
+ struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
+ int ring_sz = desc_size * len;
+ dma_addr_t dma;
+ u8 *head;
+
+ head = pci_zalloc_consistent(pdev, ring_sz, &dma);
+ if (!head) {
+ rtw_err(rtwdev, "failed to allocate tx ring\n");
+ return -ENOMEM;
+ }
+
+ skb_queue_head_init(&tx_ring->queue);
+ tx_ring->r.head = head;
+ tx_ring->r.dma = dma;
+ tx_ring->r.len = len;
+ tx_ring->r.desc_size = desc_size;
+ tx_ring->r.wp = 0;
+ tx_ring->r.rp = 0;
+
+ return 0;
+}
+
+static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
+ struct rtw_pci_rx_ring *rx_ring,
+ u32 idx, u32 desc_sz)
+{
+ struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
+ struct rtw_pci_rx_buffer_desc *buf_desc;
+ int buf_sz = RTK_PCI_RX_BUF_SIZE;
+ dma_addr_t dma;
+
+ if (!skb)
+ return -EINVAL;
+
+ dma = pci_map_single(pdev, skb->data, buf_sz, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(pdev, dma))
+ return -EBUSY;
+
+ *((dma_addr_t *)skb->cb) = dma;
+ buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
+ idx * desc_sz);
+ memset(buf_desc, 0, sizeof(*buf_desc));
+ buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
+ buf_desc->dma = cpu_to_le32(dma);
+
+ return 0;
+}
+
+static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
+ struct rtw_pci_rx_ring *rx_ring,
+ u8 desc_size, u32 len)
+{
+ struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
+ struct sk_buff *skb = NULL;
+ dma_addr_t dma;
+ u8 *head;
+ int ring_sz = desc_size * len;
+ int buf_sz = RTK_PCI_RX_BUF_SIZE;
+ int i, allocated;
+ int ret = 0;
+
+ head = pci_zalloc_consistent(pdev, ring_sz, &dma);
+ if (!head) {
+ rtw_err(rtwdev, "failed to allocate rx ring\n");
+ return -ENOMEM;
+ }
+ rx_ring->r.head = head;
+
+ for (i = 0; i < len; i++) {
+ skb = dev_alloc_skb(buf_sz);
+ if (!skb) {
+ allocated = i;
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ memset(skb->data, 0, buf_sz);
+ rx_ring->buf[i] = skb;
+ ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);
+ if (ret) {
+ allocated = i;
+ dev_kfree_skb_any(skb);
+ goto err_out;
+ }
+ }
+
+ rx_ring->r.dma = dma;
+ rx_ring->r.len = len;
+ rx_ring->r.desc_size = desc_size;
+ rx_ring->r.wp = 0;
+ rx_ring->r.rp = 0;
+
+ return 0;
+
+err_out:
+ for (i = 0; i < allocated; i++) {
+ skb = rx_ring->buf[i];
+ if (!skb)
+ continue;
+ dma = *((dma_addr_t *)skb->cb);
+ pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(skb);
+ rx_ring->buf[i] = NULL;
+ }
+ pci_free_consistent(pdev, ring_sz, head, dma);
+
+ rtw_err(rtwdev, "failed to init rx buffer\n");
+
+ return ret;
+}
+
+static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ struct rtw_pci_tx_ring *tx_ring;
+ struct rtw_pci_rx_ring *rx_ring;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
+ int tx_desc_size, rx_desc_size;
+ u32 len;
+ int ret;
+
+ tx_desc_size = chip->tx_buf_desc_sz;
+
+ for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
+ tx_ring = &rtwpci->tx_rings[i];
+ len = max_num_of_tx_queue(i);
+ ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);
+ if (ret)
+ goto out;
+ }
+
+ rx_desc_size = chip->rx_buf_desc_sz;
+
+ for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) {
+ rx_ring = &rtwpci->rx_rings[j];
+ ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size,
+ RTK_MAX_RX_DESC_NUM);
+ if (ret)
+ goto out;
+ }
+
+ return 0;
+
+out:
+ tx_alloced = i;
+ for (i = 0; i < tx_alloced; i++) {
+ tx_ring = &rtwpci->tx_rings[i];
+ rtw_pci_free_tx_ring(rtwdev, tx_ring);
+ }
+
+ rx_alloced = j;
+ for (j = 0; j < rx_alloced; j++) {
+ rx_ring = &rtwpci->rx_rings[j];
+ rtw_pci_free_rx_ring(rtwdev, rx_ring);
+ }
+
+ return ret;
+}
+
+static void rtw_pci_deinit(struct rtw_dev *rtwdev)
+{
+ rtw_pci_free_trx_ring(rtwdev);
+}
+
+static int rtw_pci_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ int ret = 0;
+
+ rtwpci->irq_mask[0] = IMR_HIGHDOK |
+ IMR_MGNTDOK |
+ IMR_BKDOK |
+ IMR_BEDOK |
+ IMR_VIDOK |
+ IMR_VODOK |
+ IMR_ROK |
+ IMR_BCNDMAINT_E |
+ 0;
+ rtwpci->irq_mask[1] = IMR_TXFOVW |
+ 0;
+ rtwpci->irq_mask[3] = IMR_H2CDOK |
+ 0;
+ spin_lock_init(&rtwpci->irq_lock);
+ ret = rtw_pci_init_trx_ring(rtwdev);
+
+ return ret;
+}
+
+static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ u32 len;
+ u8 tmp;
+ dma_addr_t dma;
+
+ tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3);
+ rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7);
+
+ dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
+ rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
+
+ len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
+ dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
+ rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
+ rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len);
+ rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
+
+ len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
+ dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
+ rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
+ rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len);
+ rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
+
+ len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
+ dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
+ rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
+ rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len);
+ rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
+
+ len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
+ dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
+ rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
+ rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len);
+ rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
+
+ len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
+ dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
+ rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
+ rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len);
+ rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
+
+ len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
+ dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
+ rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
+ rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len);
+ rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
+
+ len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
+ dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
+ rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
+ rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
+ rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len);
+ rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
+
+ len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
+ dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
+ rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
+ rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
+ rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & 0xfff);
+ rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
+
+ /* reset read/write point */
+ rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
+
+ /* rest H2C Queue index */
+ rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HOST_IDX);
+ rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR, BIT_CLR_H2CQ_HW_IDX);
+}
+
+static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
+{
+ rtw_pci_reset_buf_desc(rtwdev);
+}
+
+static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
+ struct rtw_pci *rtwpci)
+{
+ rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0]);
+ rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
+ rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
+ rtwpci->irq_enabled = true;
+}
+
+static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
+ struct rtw_pci *rtwpci)
+{
+ rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
+ rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
+ rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
+ rtwpci->irq_enabled = false;
+}
+
+static int rtw_pci_setup(struct rtw_dev *rtwdev)
+{
+ rtw_pci_reset_trx_ring(rtwdev);
+
+ return 0;
+}
+
+static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
+{
+ /* reset dma and rx tag */
+ rtw_write32_set(rtwdev, RTK_PCI_CTRL,
+ BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN);
+ rtwpci->rx_tag = 0;
+}
+
+static int rtw_pci_start(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ unsigned long flags;
+
+ rtw_pci_dma_reset(rtwdev, rtwpci);
+
+ spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ rtw_pci_enable_interrupt(rtwdev, rtwpci);
+ spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+
+ return 0;
+}
+
+static void rtw_pci_stop(struct rtw_dev *rtwdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ rtw_pci_disable_interrupt(rtwdev, rtwpci);
+ spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+}
+
+static u8 ac_to_hwq[] = {
+ [0] = RTW_TX_QUEUE_VO,
+ [1] = RTW_TX_QUEUE_VI,
+ [2] = RTW_TX_QUEUE_BE,
+ [3] = RTW_TX_QUEUE_BK,
+};
+
+static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ __le16 fc = hdr->frame_control;
+ u8 q_mapping = skb_get_queue_mapping(skb);
+ u8 queue;
+
+ if (unlikely(ieee80211_is_beacon(fc)))
+ queue = RTW_TX_QUEUE_BCN;
+ else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)))
+ queue = RTW_TX_QUEUE_MGMT;
+ else
+ queue = ac_to_hwq[q_mapping];
+
+ return queue;
+}
+
+static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
+ struct rtw_pci_tx_ring *ring)
+{
+ struct sk_buff *prev = skb_dequeue(&ring->queue);
+ struct rtw_pci_tx_data *tx_data;
+ dma_addr_t dma;
+
+ if (!prev)
+ return;
+
+ tx_data = rtw_pci_get_tx_data(prev);
+ dma = tx_data->dma;
+ pci_unmap_single(rtwpci->pdev, dma, prev->len,
+ PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(prev);
+}
+
+static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
+ struct rtw_pci_rx_ring *rx_ring,
+ u32 idx)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_pci_rx_buffer_desc *buf_desc;
+ u32 desc_sz = chip->rx_buf_desc_sz;
+ u16 total_pkt_size;
+
+ buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
+ idx * desc_sz);
+ total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);
+
+ /* rx tag mismatch, throw a warning */
+ if (total_pkt_size != rtwpci->rx_tag)
+ rtw_warn(rtwdev, "pci bus timeout, check dma status\n");
+
+ rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
+}
+
+static int rtw_pci_xmit(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct sk_buff *skb, u8 queue)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_pci_tx_ring *ring;
+ struct rtw_pci_tx_data *tx_data;
+ dma_addr_t dma;
+ u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
+ u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;
+ u32 size;
+ u32 psb_len;
+ u8 *pkt_desc;
+ struct rtw_pci_tx_buffer_desc *buf_desc;
+ u32 bd_idx;
+
+ ring = &rtwpci->tx_rings[queue];
+
+ size = skb->len;
+
+ if (queue == RTW_TX_QUEUE_BCN)
+ rtw_pci_release_rsvd_page(rtwpci, ring);
+ else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
+ return -ENOSPC;
+
+ pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
+ memset(pkt_desc, 0, tx_pkt_desc_sz);
+ pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
+ rtw_tx_fill_tx_desc(pkt_info, skb);
+ dma = pci_map_single(rtwpci->pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(rtwpci->pdev, dma))
+ return -EBUSY;
+
+ /* after this we got dma mapped, there is no way back */
+ buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);
+ memset(buf_desc, 0, tx_buf_desc_sz);
+ psb_len = (skb->len - 1) / 128 + 1;
+ if (queue == RTW_TX_QUEUE_BCN)
+ psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET;
+
+ buf_desc[0].psb_len = cpu_to_le16(psb_len);
+ buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz);
+ buf_desc[0].dma = cpu_to_le32(dma);
+ buf_desc[1].buf_size = cpu_to_le16(size);
+ buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz);
+
+ tx_data = rtw_pci_get_tx_data(skb);
+ tx_data->dma = dma;
+ tx_data->sn = pkt_info->sn;
+ skb_queue_tail(&ring->queue, skb);
+
+ /* kick off tx queue */
+ if (queue != RTW_TX_QUEUE_BCN) {
+ if (++ring->r.wp >= ring->r.len)
+ ring->r.wp = 0;
+ bd_idx = rtw_pci_tx_queue_idx_addr[queue];
+ rtw_write16(rtwdev, bd_idx, ring->r.wp & 0xfff);
+ } else {
+ u32 reg_bcn_work;
+
+ reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
+ reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
+ rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
+ }
+
+ return 0;
+}
+
+static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
+ u32 size)
+{
+ struct sk_buff *skb;
+ struct rtw_tx_pkt_info pkt_info;
+ u32 tx_pkt_desc_sz;
+ u32 length;
+
+ tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz;
+ length = size + tx_pkt_desc_sz;
+ skb = dev_alloc_skb(length);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_reserve(skb, tx_pkt_desc_sz);
+ memcpy((u8 *)skb_put(skb, size), buf, size);
+ memset(&pkt_info, 0, sizeof(pkt_info));
+ pkt_info.tx_pkt_size = size;
+ pkt_info.offset = tx_pkt_desc_sz;
+
+ return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
+}
+
+static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
+{
+ struct sk_buff *skb;
+ struct rtw_tx_pkt_info pkt_info;
+ u32 tx_pkt_desc_sz;
+ u32 length;
+
+ tx_pkt_desc_sz = rtwdev->chip->tx_pkt_desc_sz;
+ length = size + tx_pkt_desc_sz;
+ skb = dev_alloc_skb(length);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_reserve(skb, tx_pkt_desc_sz);
+ memcpy((u8 *)skb_put(skb, size), buf, size);
+ memset(&pkt_info, 0, sizeof(pkt_info));
+ pkt_info.tx_pkt_size = size;
+
+ return rtw_pci_xmit(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
+}
+
+static int rtw_pci_tx(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct sk_buff *skb)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ struct rtw_pci_tx_ring *ring;
+ u8 queue = rtw_hw_queue_mapping(skb);
+ int ret;
+
+ ret = rtw_pci_xmit(rtwdev, pkt_info, skb, queue);
+ if (ret)
+ return ret;
+
+ ring = &rtwpci->tx_rings[queue];
+ if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
+ ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
+ ring->queue_stopped = true;
+ }
+
+ return 0;
+}
+
+static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
+ u8 hw_queue)
+{
+ struct ieee80211_hw *hw = rtwdev->hw;
+ struct ieee80211_tx_info *info;
+ struct rtw_pci_tx_ring *ring;
+ struct rtw_pci_tx_data *tx_data;
+ struct sk_buff *skb;
+ u32 count;
+ u32 bd_idx_addr;
+ u32 bd_idx, cur_rp;
+ u16 q_map;
+
+ ring = &rtwpci->tx_rings[hw_queue];
+
+ bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
+ bd_idx = rtw_read32(rtwdev, bd_idx_addr);
+ cur_rp = bd_idx >> 16;
+ cur_rp &= 0xfff;
+ if (cur_rp >= ring->r.rp)
+ count = cur_rp - ring->r.rp;
+ else
+ count = ring->r.len - (ring->r.rp - cur_rp);
+
+ while (count--) {
+ skb = skb_dequeue(&ring->queue);
+ tx_data = rtw_pci_get_tx_data(skb);
+ pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len,
+ PCI_DMA_TODEVICE);
+
+ /* just free command packets from host to card */
+ if (hw_queue == RTW_TX_QUEUE_H2C) {
+ dev_kfree_skb_irq(skb);
+ continue;
+ }
+
+ if (ring->queue_stopped &&
+ avail_desc(ring->r.wp, ring->r.rp, ring->r.len) > 4) {
+ q_map = skb_get_queue_mapping(skb);
+ ieee80211_wake_queue(hw, q_map);
+ ring->queue_stopped = false;
+ }
+
+ skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
+
+ info = IEEE80211_SKB_CB(skb);
+
+ /* enqueue to wait for tx report */
+ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
+ rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
+ continue;
+ }
+
+ /* always ACK for others, then they won't be marked as drop */
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ else
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ ieee80211_tx_info_clear_status(info);
+ ieee80211_tx_status_irqsafe(hw, skb);
+ }
+
+ ring->r.rp = cur_rp;
+}
+
+static void rtw_pci_rx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
+ u8 hw_queue)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_pci_rx_ring *ring;
+ struct rtw_rx_pkt_stat pkt_stat;
+ struct ieee80211_rx_status rx_status;
+ struct sk_buff *skb, *new;
+ u32 cur_wp, cur_rp, tmp;
+ u32 count;
+ u32 pkt_offset;
+ u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
+ u32 buf_desc_sz = chip->rx_buf_desc_sz;
+ u8 *rx_desc;
+ dma_addr_t dma;
+
+ ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
+
+ tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
+ cur_wp = tmp >> 16;
+ cur_wp &= 0xfff;
+ if (cur_wp >= ring->r.wp)
+ count = cur_wp - ring->r.wp;
+ else
+ count = ring->r.len - (ring->r.wp - cur_wp);
+
+ cur_rp = ring->r.rp;
+ while (count--) {
+ rtw_pci_dma_check(rtwdev, ring, cur_rp);
+ skb = ring->buf[cur_rp];
+ dma = *((dma_addr_t *)skb->cb);
+ pci_unmap_single(rtwpci->pdev, dma, RTK_PCI_RX_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ rx_desc = skb->data;
+ chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
+
+ /* offset from rx_desc to payload */
+ pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
+ pkt_stat.shift;
+
+ if (pkt_stat.is_c2h) {
+ /* keep rx_desc, halmac needs it */
+ skb_put(skb, pkt_stat.pkt_len + pkt_offset);
+
+ /* pass offset for further operation */
+ *((u32 *)skb->cb) = pkt_offset;
+ skb_queue_tail(&rtwdev->c2h_queue, skb);
+ ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
+ } else {
+ /* remove rx_desc, maybe use skb_pull? */
+ skb_put(skb, pkt_stat.pkt_len);
+ skb_reserve(skb, pkt_offset);
+
+ /* alloc a smaller skb to mac80211 */
+ new = dev_alloc_skb(pkt_stat.pkt_len);
+ if (!new) {
+ new = skb;
+ } else {
+ skb_put_data(new, skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+ }
+ /* TODO: merge into rx.c */
+ rtw_rx_stats(rtwdev, pkt_stat.vif, skb);
+ memcpy(new->cb, &rx_status, sizeof(rx_status));
+ ieee80211_rx_irqsafe(rtwdev->hw, new);
+ }
+
+ /* skb delivered to mac80211, alloc a new one in rx ring */
+ new = dev_alloc_skb(RTK_PCI_RX_BUF_SIZE);
+ if (WARN(!new, "rx routine starvation\n"))
+ return;
+
+ ring->buf[cur_rp] = new;
+ rtw_pci_reset_rx_desc(rtwdev, new, ring, cur_rp, buf_desc_sz);
+
+ /* host read next element in ring */
+ if (++cur_rp >= ring->r.len)
+ cur_rp = 0;
+ }
+
+ ring->r.rp = cur_rp;
+ ring->r.wp = cur_wp;
+ rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
+}
+
+static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
+ struct rtw_pci *rtwpci, u32 *irq_status)
+{
+ irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
+ irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
+ irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
+ irq_status[0] &= rtwpci->irq_mask[0];
+ irq_status[1] &= rtwpci->irq_mask[1];
+ irq_status[3] &= rtwpci->irq_mask[3];
+ rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
+ rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
+ rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
+}
+
+static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
+{
+ struct rtw_dev *rtwdev = dev;
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ u32 irq_status[4];
+
+ spin_lock(&rtwpci->irq_lock);
+ if (!rtwpci->irq_enabled)
+ goto out;
+
+ rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
+
+ if (irq_status[0] & IMR_MGNTDOK)
+ rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT);
+ if (irq_status[0] & IMR_HIGHDOK)
+ rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0);
+ if (irq_status[0] & IMR_BEDOK)
+ rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE);
+ if (irq_status[0] & IMR_BKDOK)
+ rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK);
+ if (irq_status[0] & IMR_VODOK)
+ rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO);
+ if (irq_status[0] & IMR_VIDOK)
+ rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);
+ if (irq_status[3] & IMR_H2CDOK)
+ rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
+ if (irq_status[0] & IMR_ROK)
+ rtw_pci_rx_isr(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU);
+
+out:
+ spin_unlock(&rtwpci->irq_lock);
+
+ return IRQ_HANDLED;
+}
+
+static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
+ struct pci_dev *pdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+ unsigned long len;
+ u8 bar_id = 2;
+ int ret;
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret) {
+ rtw_err(rtwdev, "failed to request pci regions\n");
+ return ret;
+ }
+
+ len = pci_resource_len(pdev, bar_id);
+ rtwpci->mmap = pci_iomap(pdev, bar_id, len);
+ if (!rtwpci->mmap) {
+ rtw_err(rtwdev, "failed to map pci memory\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
+ struct pci_dev *pdev)
+{
+ struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ if (rtwpci->mmap) {
+ pci_iounmap(pdev, rtwpci->mmap);
+ pci_release_regions(pdev);
+ }
+}
+
+static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
+{
+ u16 write_addr;
+ u16 remainder = addr & 0x3;
+ u8 flag;
+ u8 cnt = 20;
+
+ write_addr = ((addr & 0x0ffc) | (BIT(0) << (remainder + 12)));
+ rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
+ rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
+ rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, 0x01);
+
+ flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
+ while (flag && (cnt != 0)) {
+ udelay(10);
+ flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
+ cnt--;
+ }
+
+ WARN(flag, "DBI write fail\n");
+}
+
+static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
+{
+ u8 page;
+ u8 wflag;
+ u8 cnt;
+
+ rtw_write16(rtwdev, REG_MDIO_V1, data);
+
+ page = addr < 0x20 ? 0 : 1;
+ page += g1 ? 0 : 2;
+ rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & 0x1f);
+ rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
+
+ rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
+ wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1);
+
+ cnt = 20;
+ while (wflag && (cnt != 0)) {
+ udelay(10);
+ wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
+ BIT_MDIO_WFLAG_V1);
+ cnt--;
+ }
+
+ WARN(wflag, "MDIO write fail\n");
+}
+
+static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_intf_phy_para *para;
+ u16 cut;
+ u16 value;
+ u16 offset;
+ u16 ip_sel;
+ int i;
+
+ cut = BIT(0) << rtwdev->hal.cut_version;
+
+ for (i = 0; i < chip->intf_table->n_gen1_para; i++) {
+ para = &chip->intf_table->gen1_para[i];
+ if (!(para->cut_mask & cut))
+ continue;
+ if (para->offset == 0xffff)
+ break;
+ offset = para->offset;
+ value = para->value;
+ ip_sel = para->ip_sel;
+ if (para->ip_sel == RTW_IP_SEL_PHY)
+ rtw_mdio_write(rtwdev, offset, value, true);
+ else
+ rtw_dbi_write8(rtwdev, offset, value);
+ }
+
+ for (i = 0; i < chip->intf_table->n_gen2_para; i++) {
+ para = &chip->intf_table->gen2_para[i];
+ if (!(para->cut_mask & cut))
+ continue;
+ if (para->offset == 0xffff)
+ break;
+ offset = para->offset;
+ value = para->value;
+ ip_sel = para->ip_sel;
+ if (para->ip_sel == RTW_IP_SEL_PHY)
+ rtw_mdio_write(rtwdev, offset, value, false);
+ else
+ rtw_dbi_write8(rtwdev, offset, value);
+ }
+}
+
+static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
+{
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to enable pci device\n");
+ return ret;
+ }
+
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, rtwdev->hw);
+ SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
+
+ return 0;
+}
+
+static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
+{
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+}
+
+static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
+{
+ struct rtw_pci *rtwpci;
+ int ret;
+
+ rtwpci = (struct rtw_pci *)rtwdev->priv;
+ rtwpci->pdev = pdev;
+
+ /* after this driver can access to hw registers */
+ ret = rtw_pci_io_mapping(rtwdev, pdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to request pci io region\n");
+ goto err_out;
+ }
+
+ ret = rtw_pci_init(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to allocate pci resources\n");
+ goto err_io_unmap;
+ }
+
+ rtw_pci_phy_cfg(rtwdev);
+
+ return 0;
+
+err_io_unmap:
+ rtw_pci_io_unmapping(rtwdev, pdev);
+
+err_out:
+ return ret;
+}
+
+static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
+{
+ rtw_pci_deinit(rtwdev);
+ rtw_pci_io_unmapping(rtwdev, pdev);
+}
+
+static struct rtw_hci_ops rtw_pci_ops = {
+ .tx = rtw_pci_tx,
+ .setup = rtw_pci_setup,
+ .start = rtw_pci_start,
+ .stop = rtw_pci_stop,
+
+ .read8 = rtw_pci_read8,
+ .read16 = rtw_pci_read16,
+ .read32 = rtw_pci_read32,
+ .write8 = rtw_pci_write8,
+ .write16 = rtw_pci_write16,
+ .write32 = rtw_pci_write32,
+ .write_data_rsvd_page = rtw_pci_write_data_rsvd_page,
+ .write_data_h2c = rtw_pci_write_data_h2c,
+};
+
+static int rtw_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct ieee80211_hw *hw;
+ struct rtw_dev *rtwdev;
+ int drv_data_size;
+ int ret;
+
+ drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci);
+ hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
+ if (!hw) {
+ dev_err(&pdev->dev, "failed to allocate hw\n");
+ return -ENOMEM;
+ }
+
+ rtwdev = hw->priv;
+ rtwdev->hw = hw;
+ rtwdev->dev = &pdev->dev;
+ rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
+ rtwdev->hci.ops = &rtw_pci_ops;
+ rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
+
+ ret = rtw_core_init(rtwdev);
+ if (ret)
+ goto err_release_hw;
+
+ rtw_dbg(rtwdev, RTW_DBG_PCI,
+ "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
+ pdev->vendor, pdev->device, pdev->revision);
+
+ ret = rtw_pci_claim(rtwdev, pdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to claim pci device\n");
+ goto err_deinit_core;
+ }
+
+ ret = rtw_pci_setup_resource(rtwdev, pdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to setup pci resources\n");
+ goto err_pci_declaim;
+ }
+
+ ret = rtw_chip_info_setup(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to setup chip information\n");
+ goto err_destroy_pci;
+ }
+
+ ret = rtw_register_hw(rtwdev, hw);
+ if (ret) {
+ rtw_err(rtwdev, "failed to register hw\n");
+ goto err_destroy_pci;
+ }
+
+ ret = request_irq(pdev->irq, &rtw_pci_interrupt_handler,
+ IRQF_SHARED, KBUILD_MODNAME, rtwdev);
+ if (ret) {
+ ieee80211_unregister_hw(hw);
+ goto err_destroy_pci;
+ }
+
+ return 0;
+
+err_destroy_pci:
+ rtw_pci_destroy(rtwdev, pdev);
+
+err_pci_declaim:
+ rtw_pci_declaim(rtwdev, pdev);
+
+err_deinit_core:
+ rtw_core_deinit(rtwdev);
+
+err_release_hw:
+ ieee80211_free_hw(hw);
+
+ return ret;
+}
+
+static void rtw_pci_remove(struct pci_dev *pdev)
+{
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct rtw_dev *rtwdev;
+ struct rtw_pci *rtwpci;
+
+ if (!hw)
+ return;
+
+ rtwdev = hw->priv;
+ rtwpci = (struct rtw_pci *)rtwdev->priv;
+
+ rtw_unregister_hw(rtwdev, hw);
+ rtw_pci_disable_interrupt(rtwdev, rtwpci);
+ rtw_pci_destroy(rtwdev, pdev);
+ rtw_pci_declaim(rtwdev, pdev);
+ free_irq(rtwpci->pdev->irq, rtwdev);
+ rtw_core_deinit(rtwdev);
+ ieee80211_free_hw(hw);
+}
+
+static const struct pci_device_id rtw_pci_id_table[] = {
+#ifdef CONFIG_RTW88_8822BE
+ { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xB822, rtw8822b_hw_spec) },
+#endif
+#ifdef CONFIG_RTW88_8822CE
+ { RTK_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xC822, rtw8822c_hw_spec) },
+#endif
+ {},
+};
+MODULE_DEVICE_TABLE(pci, rtw_pci_id_table);
+
+static struct pci_driver rtw_pci_driver = {
+ .name = "rtw_pci",
+ .id_table = rtw_pci_id_table,
+ .probe = rtw_pci_probe,
+ .remove = rtw_pci_remove,
+};
+module_pci_driver(rtw_pci_driver);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
new file mode 100644
index 000000000000..87824a4caba9
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/pci.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTK_PCI_H_
+#define __RTK_PCI_H_
+
+#define RTK_PCI_DEVICE(vend, dev, hw_config) \
+ PCI_DEVICE(vend, dev), \
+ .driver_data = (kernel_ulong_t)&(hw_config),
+
+#define RTK_DEFAULT_TX_DESC_NUM 128
+#define RTK_BEQ_TX_DESC_NUM 256
+
+#define RTK_MAX_RX_DESC_NUM 512
+/* 8K + rx desc size */
+#define RTK_PCI_RX_BUF_SIZE (8192 + 24)
+
+#define RTK_PCI_CTRL 0x300
+#define BIT_RST_TRXDMA_INTF BIT(20)
+#define BIT_RX_TAG_EN BIT(15)
+#define REG_DBI_WDATA_V1 0x03E8
+#define REG_DBI_FLAG_V1 0x03F0
+#define REG_MDIO_V1 0x03F4
+#define REG_PCIE_MIX_CFG 0x03F8
+#define BIT_MDIO_WFLAG_V1 BIT(5)
+
+#define BIT_PCI_BCNQ_FLAG BIT(4)
+#define RTK_PCI_TXBD_DESA_BCNQ 0x308
+#define RTK_PCI_TXBD_DESA_H2CQ 0x1320
+#define RTK_PCI_TXBD_DESA_MGMTQ 0x310
+#define RTK_PCI_TXBD_DESA_BKQ 0x330
+#define RTK_PCI_TXBD_DESA_BEQ 0x328
+#define RTK_PCI_TXBD_DESA_VIQ 0x320
+#define RTK_PCI_TXBD_DESA_VOQ 0x318
+#define RTK_PCI_TXBD_DESA_HI0Q 0x340
+#define RTK_PCI_RXBD_DESA_MPDUQ 0x338
+
+/* BCNQ is specialized for rsvd page, does not need to specify a number */
+#define RTK_PCI_TXBD_NUM_H2CQ 0x1328
+#define RTK_PCI_TXBD_NUM_MGMTQ 0x380
+#define RTK_PCI_TXBD_NUM_BKQ 0x38A
+#define RTK_PCI_TXBD_NUM_BEQ 0x388
+#define RTK_PCI_TXBD_NUM_VIQ 0x386
+#define RTK_PCI_TXBD_NUM_VOQ 0x384
+#define RTK_PCI_TXBD_NUM_HI0Q 0x38C
+#define RTK_PCI_RXBD_NUM_MPDUQ 0x382
+#define RTK_PCI_TXBD_IDX_H2CQ 0x132C
+#define RTK_PCI_TXBD_IDX_MGMTQ 0x3B0
+#define RTK_PCI_TXBD_IDX_BKQ 0x3AC
+#define RTK_PCI_TXBD_IDX_BEQ 0x3A8
+#define RTK_PCI_TXBD_IDX_VIQ 0x3A4
+#define RTK_PCI_TXBD_IDX_VOQ 0x3A0
+#define RTK_PCI_TXBD_IDX_HI0Q 0x3B8
+#define RTK_PCI_RXBD_IDX_MPDUQ 0x3B4
+
+#define RTK_PCI_TXBD_RWPTR_CLR 0x39C
+#define RTK_PCI_TXBD_H2CQ_CSR 0x1330
+
+#define BIT_CLR_H2CQ_HOST_IDX BIT(16)
+#define BIT_CLR_H2CQ_HW_IDX BIT(8)
+
+#define RTK_PCI_HIMR0 0x0B0
+#define RTK_PCI_HISR0 0x0B4
+#define RTK_PCI_HIMR1 0x0B8
+#define RTK_PCI_HISR1 0x0BC
+#define RTK_PCI_HIMR2 0x10B0
+#define RTK_PCI_HISR2 0x10B4
+#define RTK_PCI_HIMR3 0x10B8
+#define RTK_PCI_HISR3 0x10BC
+/* IMR 0 */
+#define IMR_TIMER2 BIT(31)
+#define IMR_TIMER1 BIT(30)
+#define IMR_PSTIMEOUT BIT(29)
+#define IMR_GTINT4 BIT(28)
+#define IMR_GTINT3 BIT(27)
+#define IMR_TBDER BIT(26)
+#define IMR_TBDOK BIT(25)
+#define IMR_TSF_BIT32_TOGGLE BIT(24)
+#define IMR_BCNDMAINT0 BIT(20)
+#define IMR_BCNDOK0 BIT(16)
+#define IMR_HSISR_IND_ON_INT BIT(15)
+#define IMR_BCNDMAINT_E BIT(14)
+#define IMR_ATIMEND BIT(12)
+#define IMR_HISR1_IND_INT BIT(11)
+#define IMR_C2HCMD BIT(10)
+#define IMR_CPWM2 BIT(9)
+#define IMR_CPWM BIT(8)
+#define IMR_HIGHDOK BIT(7)
+#define IMR_MGNTDOK BIT(6)
+#define IMR_BKDOK BIT(5)
+#define IMR_BEDOK BIT(4)
+#define IMR_VIDOK BIT(3)
+#define IMR_VODOK BIT(2)
+#define IMR_RDU BIT(1)
+#define IMR_ROK BIT(0)
+/* IMR 1 */
+#define IMR_TXFIFO_TH_INT BIT(30)
+#define IMR_BTON_STS_UPDATE BIT(29)
+#define IMR_MCUERR BIT(28)
+#define IMR_BCNDMAINT7 BIT(27)
+#define IMR_BCNDMAINT6 BIT(26)
+#define IMR_BCNDMAINT5 BIT(25)
+#define IMR_BCNDMAINT4 BIT(24)
+#define IMR_BCNDMAINT3 BIT(23)
+#define IMR_BCNDMAINT2 BIT(22)
+#define IMR_BCNDMAINT1 BIT(21)
+#define IMR_BCNDOK7 BIT(20)
+#define IMR_BCNDOK6 BIT(19)
+#define IMR_BCNDOK5 BIT(18)
+#define IMR_BCNDOK4 BIT(17)
+#define IMR_BCNDOK3 BIT(16)
+#define IMR_BCNDOK2 BIT(15)
+#define IMR_BCNDOK1 BIT(14)
+#define IMR_ATIMEND_E BIT(13)
+#define IMR_ATIMEND BIT(12)
+#define IMR_TXERR BIT(11)
+#define IMR_RXERR BIT(10)
+#define IMR_TXFOVW BIT(9)
+#define IMR_RXFOVW BIT(8)
+#define IMR_CPU_MGQ_TXDONE BIT(5)
+#define IMR_PS_TIMER_C BIT(4)
+#define IMR_PS_TIMER_B BIT(3)
+#define IMR_PS_TIMER_A BIT(2)
+#define IMR_CPUMGQ_TX_TIMER BIT(1)
+/* IMR 3 */
+#define IMR_H2CDOK BIT(16)
+
+/* one element is reserved to know if the ring is closed */
+static inline int avail_desc(u32 wp, u32 rp, u32 len)
+{
+ if (rp > wp)
+ return rp - wp - 1;
+ else
+ return len - wp + rp - 1;
+}
+
+#define RTK_PCI_TXBD_OWN_OFFSET 15
+#define RTK_PCI_TXBD_BCN_WORK 0x383
+
+struct rtw_pci_tx_buffer_desc {
+ __le16 buf_size;
+ __le16 psb_len;
+ __le32 dma;
+};
+
+struct rtw_pci_tx_data {
+ dma_addr_t dma;
+ u8 sn;
+};
+
+struct rtw_pci_ring {
+ u8 *head;
+ dma_addr_t dma;
+
+ u8 desc_size;
+
+ u32 len;
+ u32 wp;
+ u32 rp;
+};
+
+struct rtw_pci_tx_ring {
+ struct rtw_pci_ring r;
+ struct sk_buff_head queue;
+ bool queue_stopped;
+};
+
+struct rtw_pci_rx_buffer_desc {
+ __le16 buf_size;
+ __le16 total_pkt_size;
+ __le32 dma;
+};
+
+struct rtw_pci_rx_ring {
+ struct rtw_pci_ring r;
+ struct sk_buff *buf[RTK_MAX_RX_DESC_NUM];
+};
+
+#define RX_TAG_MAX 8192
+
+struct rtw_pci {
+ struct pci_dev *pdev;
+
+ /* used for pci interrupt */
+ spinlock_t irq_lock;
+ u32 irq_mask[4];
+ bool irq_enabled;
+
+ u16 rx_tag;
+ struct rtw_pci_tx_ring tx_rings[RTK_MAX_TX_QUEUE_NUM];
+ struct rtw_pci_rx_ring rx_rings[RTK_MAX_RX_QUEUE_NUM];
+
+ void __iomem *mmap;
+};
+
+static u32 max_num_of_tx_queue(u8 queue)
+{
+ u32 max_num;
+
+ switch (queue) {
+ case RTW_TX_QUEUE_BE:
+ max_num = RTK_BEQ_TX_DESC_NUM;
+ break;
+ case RTW_TX_QUEUE_BCN:
+ max_num = 1;
+ break;
+ default:
+ max_num = RTK_DEFAULT_TX_DESC_NUM;
+ break;
+ }
+
+ return max_num;
+}
+
+static inline struct
+rtw_pci_tx_data *rtw_pci_get_tx_data(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ BUILD_BUG_ON(sizeof(struct rtw_pci_tx_data) >
+ sizeof(info->status.status_driver_data));
+
+ return (struct rtw_pci_tx_data *)info->status.status_driver_data;
+}
+
+static inline
+struct rtw_pci_tx_buffer_desc *get_tx_buffer_desc(struct rtw_pci_tx_ring *ring,
+ u32 size)
+{
+ u8 *buf_desc;
+
+ buf_desc = ring->r.head + ring->r.wp * size;
+ return (struct rtw_pci_tx_buffer_desc *)buf_desc;
+}
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
new file mode 100644
index 000000000000..4381b360b5b5
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -0,0 +1,1727 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include <linux/bcd.h>
+
+#include "main.h"
+#include "reg.h"
+#include "fw.h"
+#include "phy.h"
+#include "debug.h"
+
+struct phy_cfg_pair {
+ u32 addr;
+ u32 data;
+};
+
+union phy_table_tile {
+ struct rtw_phy_cond cond;
+ struct phy_cfg_pair cfg;
+};
+
+struct phy_pg_cfg_pair {
+ u32 band;
+ u32 rf_path;
+ u32 tx_num;
+ u32 addr;
+ u32 bitmask;
+ u32 data;
+};
+
+struct txpwr_lmt_cfg_pair {
+ u8 regd;
+ u8 band;
+ u8 bw;
+ u8 rs;
+ u8 ch;
+ s8 txpwr_lmt;
+};
+
+static const u32 db_invert_table[12][8] = {
+ {10, 13, 16, 20,
+ 25, 32, 40, 50},
+ {64, 80, 101, 128,
+ 160, 201, 256, 318},
+ {401, 505, 635, 800,
+ 1007, 1268, 1596, 2010},
+ {316, 398, 501, 631,
+ 794, 1000, 1259, 1585},
+ {1995, 2512, 3162, 3981,
+ 5012, 6310, 7943, 10000},
+ {12589, 15849, 19953, 25119,
+ 31623, 39811, 50119, 63098},
+ {79433, 100000, 125893, 158489,
+ 199526, 251189, 316228, 398107},
+ {501187, 630957, 794328, 1000000,
+ 1258925, 1584893, 1995262, 2511886},
+ {3162278, 3981072, 5011872, 6309573,
+ 7943282, 1000000, 12589254, 15848932},
+ {19952623, 25118864, 31622777, 39810717,
+ 50118723, 63095734, 79432823, 100000000},
+ {125892541, 158489319, 199526232, 251188643,
+ 316227766, 398107171, 501187234, 630957345},
+ {794328235, 1000000000, 1258925412, 1584893192,
+ 1995262315, 2511886432U, 3162277660U, 3981071706U}
+};
+
+enum rtw_phy_band_type {
+ PHY_BAND_2G = 0,
+ PHY_BAND_5G = 1,
+};
+
+void rtw_phy_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u32 addr, mask;
+
+ dm_info->fa_history[3] = 0;
+ dm_info->fa_history[2] = 0;
+ dm_info->fa_history[1] = 0;
+ dm_info->fa_history[0] = 0;
+ dm_info->igi_bitmap = 0;
+ dm_info->igi_history[3] = 0;
+ dm_info->igi_history[2] = 0;
+ dm_info->igi_history[1] = 0;
+
+ addr = chip->dig[0].addr;
+ mask = chip->dig[0].mask;
+ dm_info->igi_history[0] = rtw_read32_mask(rtwdev, addr, mask);
+}
+
+void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_hal *hal = &rtwdev->hal;
+ u32 addr, mask;
+ u8 path;
+
+ for (path = 0; path < hal->rf_path_num; path++) {
+ addr = chip->dig[path].addr;
+ mask = chip->dig[path].mask;
+ rtw_write32_mask(rtwdev, addr, mask, igi);
+ }
+}
+
+static void rtw_phy_stat_false_alarm(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ chip->ops->false_alarm_statistics(rtwdev);
+}
+
+#define RA_FLOOR_TABLE_SIZE 7
+#define RA_FLOOR_UP_GAP 3
+
+static u8 rtw_phy_get_rssi_level(u8 old_level, u8 rssi)
+{
+ u8 table[RA_FLOOR_TABLE_SIZE] = {20, 34, 38, 42, 46, 50, 100};
+ u8 new_level = 0;
+ int i;
+
+ for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++)
+ if (i >= old_level)
+ table[i] += RA_FLOOR_UP_GAP;
+
+ for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) {
+ if (rssi < table[i]) {
+ new_level = i;
+ break;
+ }
+ }
+
+ return new_level;
+}
+
+struct rtw_phy_stat_iter_data {
+ struct rtw_dev *rtwdev;
+ u8 min_rssi;
+};
+
+static void rtw_phy_stat_rssi_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw_phy_stat_iter_data *iter_data = data;
+ struct rtw_dev *rtwdev = iter_data->rtwdev;
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ u8 rssi, rssi_level;
+
+ rssi = ewma_rssi_read(&si->avg_rssi);
+ rssi_level = rtw_phy_get_rssi_level(si->rssi_level, rssi);
+
+ rtw_fw_send_rssi_info(rtwdev, si);
+
+ iter_data->min_rssi = min_t(u8, rssi, iter_data->min_rssi);
+}
+
+static void rtw_phy_stat_rssi(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_phy_stat_iter_data data = {};
+
+ data.rtwdev = rtwdev;
+ data.min_rssi = U8_MAX;
+ rtw_iterate_stas_atomic(rtwdev, rtw_phy_stat_rssi_iter, &data);
+
+ dm_info->pre_min_rssi = dm_info->min_rssi;
+ dm_info->min_rssi = data.min_rssi;
+}
+
+static void rtw_phy_statistics(struct rtw_dev *rtwdev)
+{
+ rtw_phy_stat_rssi(rtwdev);
+ rtw_phy_stat_false_alarm(rtwdev);
+}
+
+#define DIG_PERF_FA_TH_LOW 250
+#define DIG_PERF_FA_TH_HIGH 500
+#define DIG_PERF_FA_TH_EXTRA_HIGH 750
+#define DIG_PERF_MAX 0x5a
+#define DIG_PERF_MID 0x40
+#define DIG_CVRG_FA_TH_LOW 2000
+#define DIG_CVRG_FA_TH_HIGH 4000
+#define DIG_CVRG_FA_TH_EXTRA_HIGH 5000
+#define DIG_CVRG_MAX 0x2a
+#define DIG_CVRG_MID 0x26
+#define DIG_CVRG_MIN 0x1c
+#define DIG_RSSI_GAIN_OFFSET 15
+
+static bool
+rtw_phy_dig_check_damping(struct rtw_dm_info *dm_info)
+{
+ u16 fa_lo = DIG_PERF_FA_TH_LOW;
+ u16 fa_hi = DIG_PERF_FA_TH_HIGH;
+ u16 *fa_history;
+ u8 *igi_history;
+ u8 damping_rssi;
+ u8 min_rssi;
+ u8 diff;
+ u8 igi_bitmap;
+ bool damping = false;
+
+ min_rssi = dm_info->min_rssi;
+ if (dm_info->damping) {
+ damping_rssi = dm_info->damping_rssi;
+ diff = min_rssi > damping_rssi ? min_rssi - damping_rssi :
+ damping_rssi - min_rssi;
+ if (diff > 3 || dm_info->damping_cnt++ > 20) {
+ dm_info->damping = false;
+ return false;
+ }
+
+ return true;
+ }
+
+ igi_history = dm_info->igi_history;
+ fa_history = dm_info->fa_history;
+ igi_bitmap = dm_info->igi_bitmap & 0xf;
+ switch (igi_bitmap) {
+ case 5:
+ /* down -> up -> down -> up */
+ if (igi_history[0] > igi_history[1] &&
+ igi_history[2] > igi_history[3] &&
+ igi_history[0] - igi_history[1] >= 2 &&
+ igi_history[2] - igi_history[3] >= 2 &&
+ fa_history[0] > fa_hi && fa_history[1] < fa_lo &&
+ fa_history[2] > fa_hi && fa_history[3] < fa_lo)
+ damping = true;
+ break;
+ case 9:
+ /* up -> down -> down -> up */
+ if (igi_history[0] > igi_history[1] &&
+ igi_history[3] > igi_history[2] &&
+ igi_history[0] - igi_history[1] >= 4 &&
+ igi_history[3] - igi_history[2] >= 2 &&
+ fa_history[0] > fa_hi && fa_history[1] < fa_lo &&
+ fa_history[2] < fa_lo && fa_history[3] > fa_hi)
+ damping = true;
+ break;
+ default:
+ return false;
+ }
+
+ if (damping) {
+ dm_info->damping = true;
+ dm_info->damping_cnt = 0;
+ dm_info->damping_rssi = min_rssi;
+ }
+
+ return damping;
+}
+
+static void rtw_phy_dig_get_boundary(struct rtw_dm_info *dm_info,
+ u8 *upper, u8 *lower, bool linked)
+{
+ u8 dig_max, dig_min, dig_mid;
+ u8 min_rssi;
+
+ if (linked) {
+ dig_max = DIG_PERF_MAX;
+ dig_mid = DIG_PERF_MID;
+ /* 22B=0x1c, 22C=0x20 */
+ dig_min = 0x1c;
+ min_rssi = max_t(u8, dm_info->min_rssi, dig_min);
+ } else {
+ dig_max = DIG_CVRG_MAX;
+ dig_mid = DIG_CVRG_MID;
+ dig_min = DIG_CVRG_MIN;
+ min_rssi = dig_min;
+ }
+
+ /* DIG MAX should be bounded by minimum RSSI with offset +15 */
+ dig_max = min_t(u8, dig_max, min_rssi + DIG_RSSI_GAIN_OFFSET);
+
+ *lower = clamp_t(u8, min_rssi, dig_min, dig_mid);
+ *upper = clamp_t(u8, *lower + DIG_RSSI_GAIN_OFFSET, dig_min, dig_max);
+}
+
+static void rtw_phy_dig_get_threshold(struct rtw_dm_info *dm_info,
+ u16 *fa_th, u8 *step, bool linked)
+{
+ u8 min_rssi, pre_min_rssi;
+
+ min_rssi = dm_info->min_rssi;
+ pre_min_rssi = dm_info->pre_min_rssi;
+ step[0] = 4;
+ step[1] = 3;
+ step[2] = 2;
+
+ if (linked) {
+ fa_th[0] = DIG_PERF_FA_TH_EXTRA_HIGH;
+ fa_th[1] = DIG_PERF_FA_TH_HIGH;
+ fa_th[2] = DIG_PERF_FA_TH_LOW;
+ if (pre_min_rssi > min_rssi) {
+ step[0] = 6;
+ step[1] = 4;
+ step[2] = 2;
+ }
+ } else {
+ fa_th[0] = DIG_CVRG_FA_TH_EXTRA_HIGH;
+ fa_th[1] = DIG_CVRG_FA_TH_HIGH;
+ fa_th[2] = DIG_CVRG_FA_TH_LOW;
+ }
+}
+
+static void rtw_phy_dig_recorder(struct rtw_dm_info *dm_info, u8 igi, u16 fa)
+{
+ u8 *igi_history;
+ u16 *fa_history;
+ u8 igi_bitmap;
+ bool up;
+
+ igi_bitmap = dm_info->igi_bitmap << 1 & 0xfe;
+ igi_history = dm_info->igi_history;
+ fa_history = dm_info->fa_history;
+
+ up = igi > igi_history[0];
+ igi_bitmap |= up;
+
+ igi_history[3] = igi_history[2];
+ igi_history[2] = igi_history[1];
+ igi_history[1] = igi_history[0];
+ igi_history[0] = igi;
+
+ fa_history[3] = fa_history[2];
+ fa_history[2] = fa_history[1];
+ fa_history[1] = fa_history[0];
+ fa_history[0] = fa;
+
+ dm_info->igi_bitmap = igi_bitmap;
+}
+
+static void rtw_phy_dig(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 upper_bound, lower_bound;
+ u8 pre_igi, cur_igi;
+ u16 fa_th[3], fa_cnt;
+ u8 level;
+ u8 step[3];
+ bool linked;
+
+ if (rtw_flag_check(rtwdev, RTW_FLAG_DIG_DISABLE))
+ return;
+
+ if (rtw_phy_dig_check_damping(dm_info))
+ return;
+
+ linked = !!rtwdev->sta_cnt;
+
+ fa_cnt = dm_info->total_fa_cnt;
+ pre_igi = dm_info->igi_history[0];
+
+ rtw_phy_dig_get_threshold(dm_info, fa_th, step, linked);
+
+ /* test the false alarm count from the highest threshold level first,
+ * and increase it by corresponding step size
+ *
+ * note that the step size is offset by -2, compensate it afterall
+ */
+ cur_igi = pre_igi;
+ for (level = 0; level < 3; level++) {
+ if (fa_cnt > fa_th[level]) {
+ cur_igi += step[level];
+ break;
+ }
+ }
+ cur_igi -= 2;
+
+ /* calculate the upper/lower bound by the minimum rssi we have among
+ * the peers connected with us, meanwhile make sure the igi value does
+ * not beyond the hardware limitation
+ */
+ rtw_phy_dig_get_boundary(dm_info, &upper_bound, &lower_bound, linked);
+ cur_igi = clamp_t(u8, cur_igi, lower_bound, upper_bound);
+
+ /* record current igi value and false alarm statistics for further
+ * damping checks, and record the trend of igi values
+ */
+ rtw_phy_dig_recorder(dm_info, cur_igi, fa_cnt);
+
+ if (cur_igi != pre_igi)
+ rtw_phy_dig_write(rtwdev, cur_igi);
+}
+
+static void rtw_phy_ra_info_update_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw_dev *rtwdev = data;
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+
+ rtw_update_sta_info(rtwdev, si);
+}
+
+static void rtw_phy_ra_info_update(struct rtw_dev *rtwdev)
+{
+ if (rtwdev->watch_dog_cnt & 0x3)
+ return;
+
+ rtw_iterate_stas_atomic(rtwdev, rtw_phy_ra_info_update_iter, rtwdev);
+}
+
+void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev)
+{
+ /* for further calculation */
+ rtw_phy_statistics(rtwdev);
+ rtw_phy_dig(rtwdev);
+ rtw_phy_ra_info_update(rtwdev);
+}
+
+#define FRAC_BITS 3
+
+static u8 rtw_phy_power_2_db(s8 power)
+{
+ if (power <= -100 || power >= 20)
+ return 0;
+ else if (power >= 0)
+ return 100;
+ else
+ return 100 + power;
+}
+
+static u64 rtw_phy_db_2_linear(u8 power_db)
+{
+ u8 i, j;
+ u64 linear;
+
+ /* 1dB ~ 96dB */
+ i = (power_db - 1) >> 3;
+ j = (power_db - 1) - (i << 3);
+
+ linear = db_invert_table[i][j];
+ linear = i > 2 ? linear << FRAC_BITS : linear;
+
+ return linear;
+}
+
+static u8 rtw_phy_linear_2_db(u64 linear)
+{
+ u8 i;
+ u8 j;
+ u32 dB;
+
+ if (linear >= db_invert_table[11][7])
+ return 96; /* maximum 96 dB */
+
+ for (i = 0; i < 12; i++) {
+ if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][7])
+ break;
+ else if (i > 2 && linear <= db_invert_table[i][7])
+ break;
+ }
+
+ for (j = 0; j < 8; j++) {
+ if (i <= 2 && (linear << FRAC_BITS) <= db_invert_table[i][j])
+ break;
+ else if (i > 2 && linear <= db_invert_table[i][j])
+ break;
+ }
+
+ if (j == 0 && i == 0)
+ goto end;
+
+ if (j == 0) {
+ if (i != 3) {
+ if (db_invert_table[i][0] - linear >
+ linear - db_invert_table[i - 1][7]) {
+ i = i - 1;
+ j = 7;
+ }
+ } else {
+ if (db_invert_table[3][0] - linear >
+ linear - db_invert_table[2][7]) {
+ i = 2;
+ j = 7;
+ }
+ }
+ } else {
+ if (db_invert_table[i][j] - linear >
+ linear - db_invert_table[i][j - 1]) {
+ j = j - 1;
+ }
+ }
+end:
+ dB = (i << 3) + j + 1;
+
+ return dB;
+}
+
+u8 rtw_phy_rf_power_2_rssi(s8 *rf_power, u8 path_num)
+{
+ s8 power;
+ u8 power_db;
+ u64 linear;
+ u64 sum = 0;
+ u8 path;
+
+ for (path = 0; path < path_num; path++) {
+ power = rf_power[path];
+ power_db = rtw_phy_power_2_db(power);
+ linear = rtw_phy_db_2_linear(power_db);
+ sum += linear;
+ }
+
+ sum = (sum + (1 << (FRAC_BITS - 1))) >> FRAC_BITS;
+ switch (path_num) {
+ case 2:
+ sum >>= 1;
+ break;
+ case 3:
+ sum = ((sum) + ((sum) << 1) + ((sum) << 3)) >> 5;
+ break;
+ case 4:
+ sum >>= 2;
+ break;
+ default:
+ break;
+ }
+
+ return rtw_phy_linear_2_db(sum);
+}
+
+u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ const u32 *base_addr = chip->rf_base_addr;
+ u32 val, direct_addr;
+
+ if (rf_path >= hal->rf_path_num) {
+ rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return INV_RF_DATA;
+ }
+
+ addr &= 0xff;
+ direct_addr = base_addr[rf_path] + (addr << 2);
+ mask &= RFREG_MASK;
+
+ val = rtw_read32_mask(rtwdev, direct_addr, mask);
+
+ return val;
+}
+
+bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask, u32 data)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u32 *sipi_addr = chip->rf_sipi_addr;
+ u32 data_and_addr;
+ u32 old_data = 0;
+ u32 shift;
+
+ if (rf_path >= hal->rf_path_num) {
+ rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return false;
+ }
+
+ addr &= 0xff;
+ mask &= RFREG_MASK;
+
+ if (mask != RFREG_MASK) {
+ old_data = rtw_phy_read_rf(rtwdev, rf_path, addr, RFREG_MASK);
+
+ if (old_data == INV_RF_DATA) {
+ rtw_err(rtwdev, "Write fail, rf is disabled\n");
+ return false;
+ }
+
+ shift = __ffs(mask);
+ data = ((old_data) & (~mask)) | (data << shift);
+ }
+
+ data_and_addr = ((addr << 20) | (data & 0x000fffff)) & 0x0fffffff;
+
+ rtw_write32(rtwdev, sipi_addr[rf_path], data_and_addr);
+
+ udelay(13);
+
+ return true;
+}
+
+bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask, u32 data)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_chip_info *chip = rtwdev->chip;
+ const u32 *base_addr = chip->rf_base_addr;
+ u32 direct_addr;
+
+ if (rf_path >= hal->rf_path_num) {
+ rtw_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return false;
+ }
+
+ addr &= 0xff;
+ direct_addr = base_addr[rf_path] + (addr << 2);
+ mask &= RFREG_MASK;
+
+ rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, DISABLE_PI);
+ rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, DISABLE_PI);
+ rtw_write32_mask(rtwdev, direct_addr, mask, data);
+
+ udelay(1);
+
+ rtw_write32_mask(rtwdev, REG_RSV_CTRL, BITS_RFC_DIRECT, ENABLE_PI);
+ rtw_write32_mask(rtwdev, REG_WLRF1, BITS_RFC_DIRECT, ENABLE_PI);
+
+ return true;
+}
+
+bool rtw_phy_write_rf_reg_mix(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask, u32 data)
+{
+ if (addr != 0x00)
+ return rtw_phy_write_rf_reg(rtwdev, rf_path, addr, mask, data);
+
+ return rtw_phy_write_rf_reg_sipi(rtwdev, rf_path, addr, mask, data);
+}
+
+void rtw_phy_setup_phy_cond(struct rtw_dev *rtwdev, u32 pkg)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_phy_cond cond = {0};
+
+ cond.cut = hal->cut_version ? hal->cut_version : 15;
+ cond.pkg = pkg ? pkg : 15;
+ cond.plat = 0x04;
+ cond.rfe = efuse->rfe_option;
+
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_USB:
+ cond.intf = INTF_USB;
+ break;
+ case RTW_HCI_TYPE_SDIO:
+ cond.intf = INTF_SDIO;
+ break;
+ case RTW_HCI_TYPE_PCIE:
+ default:
+ cond.intf = INTF_PCIE;
+ break;
+ }
+
+ hal->phy_cond = cond;
+
+ rtw_dbg(rtwdev, RTW_DBG_PHY, "phy cond=0x%08x\n", *((u32 *)&hal->phy_cond));
+}
+
+static bool check_positive(struct rtw_dev *rtwdev, struct rtw_phy_cond cond)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_phy_cond drv_cond = hal->phy_cond;
+
+ if (cond.cut && cond.cut != drv_cond.cut)
+ return false;
+
+ if (cond.pkg && cond.pkg != drv_cond.pkg)
+ return false;
+
+ if (cond.intf && cond.intf != drv_cond.intf)
+ return false;
+
+ if (cond.rfe != drv_cond.rfe)
+ return false;
+
+ return true;
+}
+
+void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl)
+{
+ const union phy_table_tile *p = tbl->data;
+ const union phy_table_tile *end = p + tbl->size / 2;
+ struct rtw_phy_cond pos_cond = {0};
+ bool is_matched = true, is_skipped = false;
+
+ BUILD_BUG_ON(sizeof(union phy_table_tile) != sizeof(struct phy_cfg_pair));
+
+ for (; p < end; p++) {
+ if (p->cond.pos) {
+ switch (p->cond.branch) {
+ case BRANCH_ENDIF:
+ is_matched = true;
+ is_skipped = false;
+ break;
+ case BRANCH_ELSE:
+ is_matched = is_skipped ? false : true;
+ break;
+ case BRANCH_IF:
+ case BRANCH_ELIF:
+ default:
+ pos_cond = p->cond;
+ break;
+ }
+ } else if (p->cond.neg) {
+ if (!is_skipped) {
+ if (check_positive(rtwdev, pos_cond)) {
+ is_matched = true;
+ is_skipped = true;
+ } else {
+ is_matched = false;
+ is_skipped = false;
+ }
+ } else {
+ is_matched = false;
+ }
+ } else if (is_matched) {
+ (*tbl->do_cfg)(rtwdev, tbl, p->cfg.addr, p->cfg.data);
+ }
+ }
+}
+
+void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl)
+{
+ const struct phy_pg_cfg_pair *p = tbl->data;
+ const struct phy_pg_cfg_pair *end = p + tbl->size / 6;
+
+ BUILD_BUG_ON(sizeof(struct phy_pg_cfg_pair) != sizeof(u32) * 6);
+
+ for (; p < end; p++) {
+ if (p->addr == 0xfe || p->addr == 0xffe) {
+ msleep(50);
+ continue;
+ }
+ phy_store_tx_power_by_rate(rtwdev, p->band, p->rf_path,
+ p->tx_num, p->addr, p->bitmask,
+ p->data);
+ }
+}
+
+void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev,
+ const struct rtw_table *tbl)
+{
+ const struct txpwr_lmt_cfg_pair *p = tbl->data;
+ const struct txpwr_lmt_cfg_pair *end = p + tbl->size / 6;
+
+ BUILD_BUG_ON(sizeof(struct txpwr_lmt_cfg_pair) != sizeof(u8) * 6);
+
+ for (; p < end; p++) {
+ phy_set_tx_power_limit(rtwdev, p->regd, p->band,
+ p->bw, p->rs,
+ p->ch, p->txpwr_lmt);
+ }
+}
+
+void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
+ u32 addr, u32 data)
+{
+ rtw_write8(rtwdev, addr, data);
+}
+
+void rtw_phy_cfg_agc(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
+ u32 addr, u32 data)
+{
+ rtw_write32(rtwdev, addr, data);
+}
+
+void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
+ u32 addr, u32 data)
+{
+ if (addr == 0xfe)
+ msleep(50);
+ else if (addr == 0xfd)
+ mdelay(5);
+ else if (addr == 0xfc)
+ mdelay(1);
+ else if (addr == 0xfb)
+ usleep_range(50, 60);
+ else if (addr == 0xfa)
+ udelay(5);
+ else if (addr == 0xf9)
+ udelay(1);
+ else
+ rtw_write32(rtwdev, addr, data);
+}
+
+void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
+ u32 addr, u32 data)
+{
+ if (addr == 0xffe) {
+ msleep(50);
+ } else if (addr == 0xfe) {
+ usleep_range(100, 110);
+ } else {
+ rtw_write_rf(rtwdev, tbl->rf_path, addr, RFREG_MASK, data);
+ udelay(1);
+ }
+}
+
+static void rtw_load_rfk_table(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ if (!chip->rfk_init_tbl)
+ return;
+
+ rtw_load_table(rtwdev, chip->rfk_init_tbl);
+}
+
+void rtw_phy_load_tables(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 rf_path;
+
+ rtw_load_table(rtwdev, chip->mac_tbl);
+ rtw_load_table(rtwdev, chip->bb_tbl);
+ rtw_load_table(rtwdev, chip->agc_tbl);
+ rtw_load_rfk_table(rtwdev);
+
+ for (rf_path = 0; rf_path < rtwdev->hal.rf_path_num; rf_path++) {
+ const struct rtw_table *tbl;
+
+ tbl = chip->rf_tbl[rf_path];
+ rtw_load_table(rtwdev, tbl);
+ }
+}
+
+#define bcd_to_dec_pwr_by_rate(val, i) bcd2bin(val >> (i * 8))
+
+#define RTW_MAX_POWER_INDEX 0x3F
+
+u8 rtw_cck_rates[] = { DESC_RATE1M, DESC_RATE2M, DESC_RATE5_5M, DESC_RATE11M };
+u8 rtw_ofdm_rates[] = {
+ DESC_RATE6M, DESC_RATE9M, DESC_RATE12M,
+ DESC_RATE18M, DESC_RATE24M, DESC_RATE36M,
+ DESC_RATE48M, DESC_RATE54M
+};
+u8 rtw_ht_1s_rates[] = {
+ DESC_RATEMCS0, DESC_RATEMCS1, DESC_RATEMCS2,
+ DESC_RATEMCS3, DESC_RATEMCS4, DESC_RATEMCS5,
+ DESC_RATEMCS6, DESC_RATEMCS7
+};
+u8 rtw_ht_2s_rates[] = {
+ DESC_RATEMCS8, DESC_RATEMCS9, DESC_RATEMCS10,
+ DESC_RATEMCS11, DESC_RATEMCS12, DESC_RATEMCS13,
+ DESC_RATEMCS14, DESC_RATEMCS15
+};
+u8 rtw_vht_1s_rates[] = {
+ DESC_RATEVHT1SS_MCS0, DESC_RATEVHT1SS_MCS1,
+ DESC_RATEVHT1SS_MCS2, DESC_RATEVHT1SS_MCS3,
+ DESC_RATEVHT1SS_MCS4, DESC_RATEVHT1SS_MCS5,
+ DESC_RATEVHT1SS_MCS6, DESC_RATEVHT1SS_MCS7,
+ DESC_RATEVHT1SS_MCS8, DESC_RATEVHT1SS_MCS9
+};
+u8 rtw_vht_2s_rates[] = {
+ DESC_RATEVHT2SS_MCS0, DESC_RATEVHT2SS_MCS1,
+ DESC_RATEVHT2SS_MCS2, DESC_RATEVHT2SS_MCS3,
+ DESC_RATEVHT2SS_MCS4, DESC_RATEVHT2SS_MCS5,
+ DESC_RATEVHT2SS_MCS6, DESC_RATEVHT2SS_MCS7,
+ DESC_RATEVHT2SS_MCS8, DESC_RATEVHT2SS_MCS9
+};
+u8 rtw_cck_size = ARRAY_SIZE(rtw_cck_rates);
+u8 rtw_ofdm_size = ARRAY_SIZE(rtw_ofdm_rates);
+u8 rtw_ht_1s_size = ARRAY_SIZE(rtw_ht_1s_rates);
+u8 rtw_ht_2s_size = ARRAY_SIZE(rtw_ht_2s_rates);
+u8 rtw_vht_1s_size = ARRAY_SIZE(rtw_vht_1s_rates);
+u8 rtw_vht_2s_size = ARRAY_SIZE(rtw_vht_2s_rates);
+u8 *rtw_rate_section[RTW_RATE_SECTION_MAX] = {
+ rtw_cck_rates, rtw_ofdm_rates,
+ rtw_ht_1s_rates, rtw_ht_2s_rates,
+ rtw_vht_1s_rates, rtw_vht_2s_rates
+};
+u8 rtw_rate_size[RTW_RATE_SECTION_MAX] = {
+ ARRAY_SIZE(rtw_cck_rates),
+ ARRAY_SIZE(rtw_ofdm_rates),
+ ARRAY_SIZE(rtw_ht_1s_rates),
+ ARRAY_SIZE(rtw_ht_2s_rates),
+ ARRAY_SIZE(rtw_vht_1s_rates),
+ ARRAY_SIZE(rtw_vht_2s_rates)
+};
+
+static const u8 rtw_channel_idx_5g[RTW_MAX_CHANNEL_NUM_5G] = {
+ 36, 38, 40, 42, 44, 46, 48, /* Band 1 */
+ 52, 54, 56, 58, 60, 62, 64, /* Band 2 */
+ 100, 102, 104, 106, 108, 110, 112, /* Band 3 */
+ 116, 118, 120, 122, 124, 126, 128, /* Band 3 */
+ 132, 134, 136, 138, 140, 142, 144, /* Band 3 */
+ 149, 151, 153, 155, 157, 159, 161, /* Band 4 */
+ 165, 167, 169, 171, 173, 175, 177}; /* Band 4 */
+
+static int rtw_channel_to_idx(u8 band, u8 channel)
+{
+ int ch_idx;
+ u8 n_channel;
+
+ if (band == PHY_BAND_2G) {
+ ch_idx = channel - 1;
+ n_channel = RTW_MAX_CHANNEL_NUM_2G;
+ } else if (band == PHY_BAND_5G) {
+ n_channel = RTW_MAX_CHANNEL_NUM_5G;
+ for (ch_idx = 0; ch_idx < n_channel; ch_idx++)
+ if (rtw_channel_idx_5g[ch_idx] == channel)
+ break;
+ } else {
+ return -1;
+ }
+
+ if (ch_idx >= n_channel)
+ return -1;
+
+ return ch_idx;
+}
+
+static u8 rtw_get_channel_group(u8 channel)
+{
+ switch (channel) {
+ default:
+ WARN_ON(1);
+ /* fall through */
+ case 1:
+ case 2:
+ case 36:
+ case 38:
+ case 40:
+ case 42:
+ return 0;
+ case 3:
+ case 4:
+ case 5:
+ case 44:
+ case 46:
+ case 48:
+ case 50:
+ return 1;
+ case 6:
+ case 7:
+ case 8:
+ case 52:
+ case 54:
+ case 56:
+ case 58:
+ return 2;
+ case 9:
+ case 10:
+ case 11:
+ case 60:
+ case 62:
+ case 64:
+ return 3;
+ case 12:
+ case 13:
+ case 100:
+ case 102:
+ case 104:
+ case 106:
+ return 4;
+ case 14:
+ case 108:
+ case 110:
+ case 112:
+ case 114:
+ return 5;
+ case 116:
+ case 118:
+ case 120:
+ case 122:
+ return 6;
+ case 124:
+ case 126:
+ case 128:
+ case 130:
+ return 7;
+ case 132:
+ case 134:
+ case 136:
+ case 138:
+ return 8;
+ case 140:
+ case 142:
+ case 144:
+ return 9;
+ case 149:
+ case 151:
+ case 153:
+ case 155:
+ return 10;
+ case 157:
+ case 159:
+ case 161:
+ return 11;
+ case 165:
+ case 167:
+ case 169:
+ case 171:
+ return 12;
+ case 173:
+ case 175:
+ case 177:
+ return 13;
+ }
+}
+
+static u8 phy_get_2g_tx_power_index(struct rtw_dev *rtwdev,
+ struct rtw_2g_txpwr_idx *pwr_idx_2g,
+ enum rtw_bandwidth bandwidth,
+ u8 rate, u8 group)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 tx_power;
+ bool mcs_rate;
+ bool above_2ss;
+ u8 factor = chip->txgi_factor;
+
+ if (rate <= DESC_RATE11M)
+ tx_power = pwr_idx_2g->cck_base[group];
+ else
+ tx_power = pwr_idx_2g->bw40_base[group];
+
+ if (rate >= DESC_RATE6M && rate <= DESC_RATE54M)
+ tx_power += pwr_idx_2g->ht_1s_diff.ofdm * factor;
+
+ mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
+ (rate >= DESC_RATEVHT1SS_MCS0 &&
+ rate <= DESC_RATEVHT2SS_MCS9);
+ above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
+ (rate >= DESC_RATEVHT2SS_MCS0);
+
+ if (!mcs_rate)
+ return tx_power;
+
+ switch (bandwidth) {
+ default:
+ WARN_ON(1);
+ /* fall through */
+ case RTW_CHANNEL_WIDTH_20:
+ tx_power += pwr_idx_2g->ht_1s_diff.bw20 * factor;
+ if (above_2ss)
+ tx_power += pwr_idx_2g->ht_2s_diff.bw20 * factor;
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ /* bw40 is the base power */
+ if (above_2ss)
+ tx_power += pwr_idx_2g->ht_2s_diff.bw40 * factor;
+ break;
+ }
+
+ return tx_power;
+}
+
+static u8 phy_get_5g_tx_power_index(struct rtw_dev *rtwdev,
+ struct rtw_5g_txpwr_idx *pwr_idx_5g,
+ enum rtw_bandwidth bandwidth,
+ u8 rate, u8 group)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u8 tx_power;
+ u8 upper, lower;
+ bool mcs_rate;
+ bool above_2ss;
+ u8 factor = chip->txgi_factor;
+
+ tx_power = pwr_idx_5g->bw40_base[group];
+
+ mcs_rate = (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS15) ||
+ (rate >= DESC_RATEVHT1SS_MCS0 &&
+ rate <= DESC_RATEVHT2SS_MCS9);
+ above_2ss = (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15) ||
+ (rate >= DESC_RATEVHT2SS_MCS0);
+
+ if (!mcs_rate) {
+ tx_power += pwr_idx_5g->ht_1s_diff.ofdm * factor;
+ return tx_power;
+ }
+
+ switch (bandwidth) {
+ default:
+ WARN_ON(1);
+ /* fall through */
+ case RTW_CHANNEL_WIDTH_20:
+ tx_power += pwr_idx_5g->ht_1s_diff.bw20 * factor;
+ if (above_2ss)
+ tx_power += pwr_idx_5g->ht_2s_diff.bw20 * factor;
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ /* bw40 is the base power */
+ if (above_2ss)
+ tx_power += pwr_idx_5g->ht_2s_diff.bw40 * factor;
+ break;
+ case RTW_CHANNEL_WIDTH_80:
+ /* the base idx of bw80 is the average of bw40+/bw40- */
+ lower = pwr_idx_5g->bw40_base[group];
+ upper = pwr_idx_5g->bw40_base[group + 1];
+
+ tx_power = (lower + upper) / 2;
+ tx_power += pwr_idx_5g->vht_1s_diff.bw80 * factor;
+ if (above_2ss)
+ tx_power += pwr_idx_5g->vht_2s_diff.bw80 * factor;
+ break;
+ }
+
+ return tx_power;
+}
+
+/* set tx power level by path for each rates, note that the order of the rates
+ * are *very* important, bacause 8822B/8821C combines every four bytes of tx
+ * power index into a four-byte power index register, and calls set_tx_agc to
+ * write these values into hardware
+ */
+static
+void phy_set_tx_power_level_by_path(struct rtw_dev *rtwdev, u8 ch, u8 path)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 rs;
+
+ /* do not need cck rates if we are not in 2.4G */
+ if (hal->current_band_type == RTW_BAND_2G)
+ rs = RTW_RATE_SECTION_CCK;
+ else
+ rs = RTW_RATE_SECTION_OFDM;
+
+ for (; rs < RTW_RATE_SECTION_MAX; rs++)
+ phy_set_tx_power_index_by_rs(rtwdev, ch, path, rs);
+}
+
+void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 path;
+
+ mutex_lock(&hal->tx_power_mutex);
+
+ for (path = 0; path < hal->rf_path_num; path++)
+ phy_set_tx_power_level_by_path(rtwdev, channel, path);
+
+ chip->ops->set_tx_power_index(rtwdev);
+ mutex_unlock(&hal->tx_power_mutex);
+}
+
+s8 phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band,
+ enum rtw_bandwidth bandwidth, u8 rf_path,
+ u8 rate, u8 channel, u8 regd);
+
+static
+u8 phy_get_tx_power_index(void *adapter, u8 rf_path, u8 rate,
+ enum rtw_bandwidth bandwidth, u8 channel, u8 regd)
+{
+ struct rtw_dev *rtwdev = adapter;
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_txpwr_idx *pwr_idx;
+ u8 tx_power;
+ u8 group;
+ u8 band;
+ s8 offset, limit;
+
+ pwr_idx = &rtwdev->efuse.txpwr_idx_table[rf_path];
+ group = rtw_get_channel_group(channel);
+
+ /* base power index for 2.4G/5G */
+ if (channel <= 14) {
+ band = PHY_BAND_2G;
+ tx_power = phy_get_2g_tx_power_index(rtwdev,
+ &pwr_idx->pwr_idx_2g,
+ bandwidth, rate, group);
+ offset = hal->tx_pwr_by_rate_offset_2g[rf_path][rate];
+ } else {
+ band = PHY_BAND_5G;
+ tx_power = phy_get_5g_tx_power_index(rtwdev,
+ &pwr_idx->pwr_idx_5g,
+ bandwidth, rate, group);
+ offset = hal->tx_pwr_by_rate_offset_5g[rf_path][rate];
+ }
+
+ limit = phy_get_tx_power_limit(rtwdev, band, bandwidth, rf_path,
+ rate, channel, regd);
+
+ if (offset > limit)
+ offset = limit;
+
+ tx_power += offset;
+
+ if (tx_power > rtwdev->chip->max_power_index)
+ tx_power = rtwdev->chip->max_power_index;
+
+ return tx_power;
+}
+
+void phy_set_tx_power_index_by_rs(void *adapter, u8 ch, u8 path, u8 rs)
+{
+ struct rtw_dev *rtwdev = adapter;
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 regd = rtwdev->regd.txpwr_regd;
+ u8 *rates;
+ u8 size;
+ u8 rate;
+ u8 pwr_idx;
+ u8 bw;
+ int i;
+
+ if (rs >= RTW_RATE_SECTION_MAX)
+ return;
+
+ rates = rtw_rate_section[rs];
+ size = rtw_rate_size[rs];
+ bw = hal->current_band_width;
+ for (i = 0; i < size; i++) {
+ rate = rates[i];
+ pwr_idx = phy_get_tx_power_index(adapter, path, rate, bw, ch,
+ regd);
+ hal->tx_pwr_tbl[path][rate] = pwr_idx;
+ }
+}
+
+static u8 tbl_to_dec_pwr_by_rate(struct rtw_dev *rtwdev, u32 hex, u8 i)
+{
+ if (rtwdev->chip->is_pwr_by_rate_dec)
+ return bcd_to_dec_pwr_by_rate(hex, i);
+ else
+ return (hex >> (i * 8)) & 0xFF;
+}
+
+static void phy_get_rate_values_of_txpwr_by_rate(struct rtw_dev *rtwdev,
+ u32 addr, u32 mask,
+ u32 val, u8 *rate,
+ u8 *pwr_by_rate, u8 *rate_num)
+{
+ int i;
+
+ switch (addr) {
+ case 0xE00:
+ case 0x830:
+ rate[0] = DESC_RATE6M;
+ rate[1] = DESC_RATE9M;
+ rate[2] = DESC_RATE12M;
+ rate[3] = DESC_RATE18M;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xE04:
+ case 0x834:
+ rate[0] = DESC_RATE24M;
+ rate[1] = DESC_RATE36M;
+ rate[2] = DESC_RATE48M;
+ rate[3] = DESC_RATE54M;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xE08:
+ rate[0] = DESC_RATE1M;
+ pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 1);
+ *rate_num = 1;
+ break;
+ case 0x86C:
+ if (mask == 0xffffff00) {
+ rate[0] = DESC_RATE2M;
+ rate[1] = DESC_RATE5_5M;
+ rate[2] = DESC_RATE11M;
+ for (i = 1; i < 4; ++i)
+ pwr_by_rate[i - 1] =
+ tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 3;
+ } else if (mask == 0x000000ff) {
+ rate[0] = DESC_RATE11M;
+ pwr_by_rate[0] = bcd_to_dec_pwr_by_rate(val, 0);
+ *rate_num = 1;
+ }
+ break;
+ case 0xE10:
+ case 0x83C:
+ rate[0] = DESC_RATEMCS0;
+ rate[1] = DESC_RATEMCS1;
+ rate[2] = DESC_RATEMCS2;
+ rate[3] = DESC_RATEMCS3;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xE14:
+ case 0x848:
+ rate[0] = DESC_RATEMCS4;
+ rate[1] = DESC_RATEMCS5;
+ rate[2] = DESC_RATEMCS6;
+ rate[3] = DESC_RATEMCS7;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xE18:
+ case 0x84C:
+ rate[0] = DESC_RATEMCS8;
+ rate[1] = DESC_RATEMCS9;
+ rate[2] = DESC_RATEMCS10;
+ rate[3] = DESC_RATEMCS11;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xE1C:
+ case 0x868:
+ rate[0] = DESC_RATEMCS12;
+ rate[1] = DESC_RATEMCS13;
+ rate[2] = DESC_RATEMCS14;
+ rate[3] = DESC_RATEMCS15;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+
+ break;
+ case 0x838:
+ rate[0] = DESC_RATE1M;
+ rate[1] = DESC_RATE2M;
+ rate[2] = DESC_RATE5_5M;
+ for (i = 1; i < 4; ++i)
+ pwr_by_rate[i - 1] = tbl_to_dec_pwr_by_rate(rtwdev,
+ val, i);
+ *rate_num = 3;
+ break;
+ case 0xC20:
+ case 0xE20:
+ case 0x1820:
+ case 0x1A20:
+ rate[0] = DESC_RATE1M;
+ rate[1] = DESC_RATE2M;
+ rate[2] = DESC_RATE5_5M;
+ rate[3] = DESC_RATE11M;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC24:
+ case 0xE24:
+ case 0x1824:
+ case 0x1A24:
+ rate[0] = DESC_RATE6M;
+ rate[1] = DESC_RATE9M;
+ rate[2] = DESC_RATE12M;
+ rate[3] = DESC_RATE18M;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC28:
+ case 0xE28:
+ case 0x1828:
+ case 0x1A28:
+ rate[0] = DESC_RATE24M;
+ rate[1] = DESC_RATE36M;
+ rate[2] = DESC_RATE48M;
+ rate[3] = DESC_RATE54M;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC2C:
+ case 0xE2C:
+ case 0x182C:
+ case 0x1A2C:
+ rate[0] = DESC_RATEMCS0;
+ rate[1] = DESC_RATEMCS1;
+ rate[2] = DESC_RATEMCS2;
+ rate[3] = DESC_RATEMCS3;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC30:
+ case 0xE30:
+ case 0x1830:
+ case 0x1A30:
+ rate[0] = DESC_RATEMCS4;
+ rate[1] = DESC_RATEMCS5;
+ rate[2] = DESC_RATEMCS6;
+ rate[3] = DESC_RATEMCS7;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC34:
+ case 0xE34:
+ case 0x1834:
+ case 0x1A34:
+ rate[0] = DESC_RATEMCS8;
+ rate[1] = DESC_RATEMCS9;
+ rate[2] = DESC_RATEMCS10;
+ rate[3] = DESC_RATEMCS11;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC38:
+ case 0xE38:
+ case 0x1838:
+ case 0x1A38:
+ rate[0] = DESC_RATEMCS12;
+ rate[1] = DESC_RATEMCS13;
+ rate[2] = DESC_RATEMCS14;
+ rate[3] = DESC_RATEMCS15;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC3C:
+ case 0xE3C:
+ case 0x183C:
+ case 0x1A3C:
+ rate[0] = DESC_RATEVHT1SS_MCS0;
+ rate[1] = DESC_RATEVHT1SS_MCS1;
+ rate[2] = DESC_RATEVHT1SS_MCS2;
+ rate[3] = DESC_RATEVHT1SS_MCS3;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC40:
+ case 0xE40:
+ case 0x1840:
+ case 0x1A40:
+ rate[0] = DESC_RATEVHT1SS_MCS4;
+ rate[1] = DESC_RATEVHT1SS_MCS5;
+ rate[2] = DESC_RATEVHT1SS_MCS6;
+ rate[3] = DESC_RATEVHT1SS_MCS7;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC44:
+ case 0xE44:
+ case 0x1844:
+ case 0x1A44:
+ rate[0] = DESC_RATEVHT1SS_MCS8;
+ rate[1] = DESC_RATEVHT1SS_MCS9;
+ rate[2] = DESC_RATEVHT2SS_MCS0;
+ rate[3] = DESC_RATEVHT2SS_MCS1;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC48:
+ case 0xE48:
+ case 0x1848:
+ case 0x1A48:
+ rate[0] = DESC_RATEVHT2SS_MCS2;
+ rate[1] = DESC_RATEVHT2SS_MCS3;
+ rate[2] = DESC_RATEVHT2SS_MCS4;
+ rate[3] = DESC_RATEVHT2SS_MCS5;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xC4C:
+ case 0xE4C:
+ case 0x184C:
+ case 0x1A4C:
+ rate[0] = DESC_RATEVHT2SS_MCS6;
+ rate[1] = DESC_RATEVHT2SS_MCS7;
+ rate[2] = DESC_RATEVHT2SS_MCS8;
+ rate[3] = DESC_RATEVHT2SS_MCS9;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xCD8:
+ case 0xED8:
+ case 0x18D8:
+ case 0x1AD8:
+ rate[0] = DESC_RATEMCS16;
+ rate[1] = DESC_RATEMCS17;
+ rate[2] = DESC_RATEMCS18;
+ rate[3] = DESC_RATEMCS19;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xCDC:
+ case 0xEDC:
+ case 0x18DC:
+ case 0x1ADC:
+ rate[0] = DESC_RATEMCS20;
+ rate[1] = DESC_RATEMCS21;
+ rate[2] = DESC_RATEMCS22;
+ rate[3] = DESC_RATEMCS23;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xCE0:
+ case 0xEE0:
+ case 0x18E0:
+ case 0x1AE0:
+ rate[0] = DESC_RATEVHT3SS_MCS0;
+ rate[1] = DESC_RATEVHT3SS_MCS1;
+ rate[2] = DESC_RATEVHT3SS_MCS2;
+ rate[3] = DESC_RATEVHT3SS_MCS3;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xCE4:
+ case 0xEE4:
+ case 0x18E4:
+ case 0x1AE4:
+ rate[0] = DESC_RATEVHT3SS_MCS4;
+ rate[1] = DESC_RATEVHT3SS_MCS5;
+ rate[2] = DESC_RATEVHT3SS_MCS6;
+ rate[3] = DESC_RATEVHT3SS_MCS7;
+ for (i = 0; i < 4; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 4;
+ break;
+ case 0xCE8:
+ case 0xEE8:
+ case 0x18E8:
+ case 0x1AE8:
+ rate[0] = DESC_RATEVHT3SS_MCS8;
+ rate[1] = DESC_RATEVHT3SS_MCS9;
+ for (i = 0; i < 2; ++i)
+ pwr_by_rate[i] = tbl_to_dec_pwr_by_rate(rtwdev, val, i);
+ *rate_num = 2;
+ break;
+ default:
+ rtw_warn(rtwdev, "invalid tx power index addr 0x%08x\n", addr);
+ break;
+ }
+}
+
+void phy_store_tx_power_by_rate(void *adapter, u32 band, u32 rfpath, u32 txnum,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtw_dev *rtwdev = adapter;
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 rate_num = 0;
+ u8 rate;
+ u8 rates[RTW_RF_PATH_MAX] = {0};
+ s8 offset;
+ s8 pwr_by_rate[RTW_RF_PATH_MAX] = {0};
+ int i;
+
+ phy_get_rate_values_of_txpwr_by_rate(rtwdev, regaddr, bitmask, data,
+ rates, pwr_by_rate, &rate_num);
+
+ if (WARN_ON(rfpath >= RTW_RF_PATH_MAX ||
+ (band != PHY_BAND_2G && band != PHY_BAND_5G) ||
+ rate_num > RTW_RF_PATH_MAX))
+ return;
+
+ for (i = 0; i < rate_num; i++) {
+ offset = pwr_by_rate[i];
+ rate = rates[i];
+ if (band == PHY_BAND_2G)
+ hal->tx_pwr_by_rate_offset_2g[rfpath][rate] = offset;
+ else if (band == PHY_BAND_5G)
+ hal->tx_pwr_by_rate_offset_5g[rfpath][rate] = offset;
+ else
+ continue;
+ }
+}
+
+static
+void phy_tx_power_by_rate_config_by_path(struct rtw_hal *hal, u8 path,
+ u8 rs, u8 size, u8 *rates)
+{
+ u8 rate;
+ u8 base_idx, rate_idx;
+ s8 base_2g, base_5g;
+
+ if (rs >= RTW_RATE_SECTION_VHT_1S)
+ base_idx = rates[size - 3];
+ else
+ base_idx = rates[size - 1];
+ base_2g = hal->tx_pwr_by_rate_offset_2g[path][base_idx];
+ base_5g = hal->tx_pwr_by_rate_offset_5g[path][base_idx];
+ hal->tx_pwr_by_rate_base_2g[path][rs] = base_2g;
+ hal->tx_pwr_by_rate_base_5g[path][rs] = base_5g;
+ for (rate = 0; rate < size; rate++) {
+ rate_idx = rates[rate];
+ hal->tx_pwr_by_rate_offset_2g[path][rate_idx] -= base_2g;
+ hal->tx_pwr_by_rate_offset_5g[path][rate_idx] -= base_5g;
+ }
+}
+
+void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal)
+{
+ u8 path;
+
+ for (path = 0; path < RTW_RF_PATH_MAX; path++) {
+ phy_tx_power_by_rate_config_by_path(hal, path,
+ RTW_RATE_SECTION_CCK,
+ rtw_cck_size, rtw_cck_rates);
+ phy_tx_power_by_rate_config_by_path(hal, path,
+ RTW_RATE_SECTION_OFDM,
+ rtw_ofdm_size, rtw_ofdm_rates);
+ phy_tx_power_by_rate_config_by_path(hal, path,
+ RTW_RATE_SECTION_HT_1S,
+ rtw_ht_1s_size, rtw_ht_1s_rates);
+ phy_tx_power_by_rate_config_by_path(hal, path,
+ RTW_RATE_SECTION_HT_2S,
+ rtw_ht_2s_size, rtw_ht_2s_rates);
+ phy_tx_power_by_rate_config_by_path(hal, path,
+ RTW_RATE_SECTION_VHT_1S,
+ rtw_vht_1s_size, rtw_vht_1s_rates);
+ phy_tx_power_by_rate_config_by_path(hal, path,
+ RTW_RATE_SECTION_VHT_2S,
+ rtw_vht_2s_size, rtw_vht_2s_rates);
+ }
+}
+
+static void
+phy_tx_power_limit_config(struct rtw_hal *hal, u8 regd, u8 bw, u8 rs)
+{
+ s8 base, orig;
+ u8 ch;
+
+ for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++) {
+ base = hal->tx_pwr_by_rate_base_2g[0][rs];
+ orig = hal->tx_pwr_limit_2g[regd][bw][rs][ch];
+ hal->tx_pwr_limit_2g[regd][bw][rs][ch] -= base;
+ }
+
+ for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++) {
+ base = hal->tx_pwr_by_rate_base_5g[0][rs];
+ hal->tx_pwr_limit_5g[regd][bw][rs][ch] -= base;
+ }
+}
+
+void rtw_phy_tx_power_limit_config(struct rtw_hal *hal)
+{
+ u8 regd, bw, rs;
+
+ for (regd = 0; regd < RTW_REGD_MAX; regd++)
+ for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
+ for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
+ phy_tx_power_limit_config(hal, regd, bw, rs);
+}
+
+static s8 get_tx_power_limit(struct rtw_hal *hal, u8 bw, u8 rs, u8 ch, u8 regd)
+{
+ if (regd > RTW_REGD_WW)
+ return RTW_MAX_POWER_INDEX;
+
+ return hal->tx_pwr_limit_2g[regd][bw][rs][ch];
+}
+
+s8 phy_get_tx_power_limit(struct rtw_dev *rtwdev, u8 band,
+ enum rtw_bandwidth bw, u8 rf_path,
+ u8 rate, u8 channel, u8 regd)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ s8 power_limit;
+ u8 rs;
+ int ch_idx;
+
+ if (rate >= DESC_RATE1M && rate <= DESC_RATE11M)
+ rs = RTW_RATE_SECTION_CCK;
+ else if (rate >= DESC_RATE6M && rate <= DESC_RATE54M)
+ rs = RTW_RATE_SECTION_OFDM;
+ else if (rate >= DESC_RATEMCS0 && rate <= DESC_RATEMCS7)
+ rs = RTW_RATE_SECTION_HT_1S;
+ else if (rate >= DESC_RATEMCS8 && rate <= DESC_RATEMCS15)
+ rs = RTW_RATE_SECTION_HT_2S;
+ else if (rate >= DESC_RATEVHT1SS_MCS0 && rate <= DESC_RATEVHT1SS_MCS9)
+ rs = RTW_RATE_SECTION_VHT_1S;
+ else if (rate >= DESC_RATEVHT2SS_MCS0 && rate <= DESC_RATEVHT2SS_MCS9)
+ rs = RTW_RATE_SECTION_VHT_2S;
+ else
+ goto err;
+
+ ch_idx = rtw_channel_to_idx(band, channel);
+ if (ch_idx < 0)
+ goto err;
+
+ power_limit = get_tx_power_limit(hal, bw, rs, ch_idx, regd);
+
+ return power_limit;
+
+err:
+ WARN(1, "invalid arguments, band=%d, bw=%d, path=%d, rate=%d, ch=%d\n",
+ band, bw, rf_path, rate, channel);
+ return RTW_MAX_POWER_INDEX;
+}
+
+void phy_set_tx_power_limit(struct rtw_dev *rtwdev, u8 regd, u8 band,
+ u8 bw, u8 rs, u8 ch, s8 pwr_limit)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ int ch_idx;
+
+ pwr_limit = clamp_t(s8, pwr_limit,
+ -RTW_MAX_POWER_INDEX, RTW_MAX_POWER_INDEX);
+ ch_idx = rtw_channel_to_idx(band, ch);
+
+ if (regd >= RTW_REGD_MAX || bw >= RTW_CHANNEL_WIDTH_MAX ||
+ rs >= RTW_RATE_SECTION_MAX || ch_idx < 0) {
+ WARN(1,
+ "wrong txpwr_lmt regd=%u, band=%u bw=%u, rs=%u, ch_idx=%u, pwr_limit=%d\n",
+ regd, band, bw, rs, ch_idx, pwr_limit);
+ return;
+ }
+
+ if (band == PHY_BAND_2G)
+ hal->tx_pwr_limit_2g[regd][bw][rs][ch_idx] = pwr_limit;
+ else if (band == PHY_BAND_5G)
+ hal->tx_pwr_limit_5g[regd][bw][rs][ch_idx] = pwr_limit;
+}
+
+static
+void rtw_hw_tx_power_limit_init(struct rtw_hal *hal, u8 regd, u8 bw, u8 rs)
+{
+ u8 ch;
+
+ /* 2.4G channels */
+ for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++)
+ hal->tx_pwr_limit_2g[regd][bw][rs][ch] = RTW_MAX_POWER_INDEX;
+
+ /* 5G channels */
+ for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++)
+ hal->tx_pwr_limit_5g[regd][bw][rs][ch] = RTW_MAX_POWER_INDEX;
+}
+
+void rtw_hw_init_tx_power(struct rtw_hal *hal)
+{
+ u8 regd, path, rate, rs, bw;
+
+ /* init tx power by rate offset */
+ for (path = 0; path < RTW_RF_PATH_MAX; path++) {
+ for (rate = 0; rate < DESC_RATE_MAX; rate++) {
+ hal->tx_pwr_by_rate_offset_2g[path][rate] = 0;
+ hal->tx_pwr_by_rate_offset_5g[path][rate] = 0;
+ }
+ }
+
+ /* init tx power limit */
+ for (regd = 0; regd < RTW_REGD_MAX; regd++)
+ for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
+ for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
+ rtw_hw_tx_power_limit_init(hal, regd, bw, rs);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
new file mode 100644
index 000000000000..ec03a2051e52
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/phy.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_PHY_H_
+#define __RTW_PHY_H_
+
+#include "debug.h"
+
+extern u8 rtw_cck_rates[];
+extern u8 rtw_ofdm_rates[];
+extern u8 rtw_ht_1s_rates[];
+extern u8 rtw_ht_2s_rates[];
+extern u8 rtw_vht_1s_rates[];
+extern u8 rtw_vht_2s_rates[];
+extern u8 *rtw_rate_section[];
+extern u8 rtw_rate_size[];
+
+void rtw_phy_init(struct rtw_dev *rtwdev);
+void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev);
+u8 rtw_phy_rf_power_2_rssi(s8 *rf_power, u8 path_num);
+u32 rtw_phy_read_rf(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask);
+bool rtw_phy_write_rf_reg_sipi(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask, u32 data);
+bool rtw_phy_write_rf_reg(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask, u32 data);
+bool rtw_phy_write_rf_reg_mix(struct rtw_dev *rtwdev, enum rtw_rf_path rf_path,
+ u32 addr, u32 mask, u32 data);
+void phy_store_tx_power_by_rate(void *adapter, u32 band, u32 rfpath, u32 txnum,
+ u32 regaddr, u32 bitmask, u32 data);
+void phy_set_tx_power_limit(struct rtw_dev *rtwdev, u8 regd, u8 band,
+ u8 bw, u8 rs, u8 ch, s8 pwr_limit);
+void phy_set_tx_power_index_by_rs(void *adapter, u8 ch, u8 path, u8 rs);
+void rtw_phy_setup_phy_cond(struct rtw_dev *rtwdev, u32 pkg);
+void rtw_parse_tbl_phy_cond(struct rtw_dev *rtwdev, const struct rtw_table *tbl);
+void rtw_parse_tbl_bb_pg(struct rtw_dev *rtwdev, const struct rtw_table *tbl);
+void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev, const struct rtw_table *tbl);
+void rtw_phy_cfg_mac(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
+ u32 addr, u32 data);
+void rtw_phy_cfg_agc(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
+ u32 addr, u32 data);
+void rtw_phy_cfg_bb(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
+ u32 addr, u32 data);
+void rtw_phy_cfg_rf(struct rtw_dev *rtwdev, const struct rtw_table *tbl,
+ u32 addr, u32 data);
+void rtw_hw_init_tx_power(struct rtw_hal *hal);
+void rtw_phy_load_tables(struct rtw_dev *rtwdev);
+void rtw_phy_set_tx_power_level(struct rtw_dev *rtwdev, u8 channel);
+void rtw_phy_tx_power_by_rate_config(struct rtw_hal *hal);
+void rtw_phy_tx_power_limit_config(struct rtw_hal *hal);
+
+#define RTW_DECL_TABLE_PHY_COND_CORE(name, cfg, path) \
+const struct rtw_table name ## _tbl = { \
+ .data = name, \
+ .size = ARRAY_SIZE(name), \
+ .parse = rtw_parse_tbl_phy_cond, \
+ .do_cfg = cfg, \
+ .rf_path = path, \
+}
+
+#define RTW_DECL_TABLE_PHY_COND(name, cfg) \
+ RTW_DECL_TABLE_PHY_COND_CORE(name, cfg, 0)
+
+#define RTW_DECL_TABLE_RF_RADIO(name, path) \
+ RTW_DECL_TABLE_PHY_COND_CORE(name, rtw_phy_cfg_rf, RF_PATH_ ## path)
+
+#define RTW_DECL_TABLE_BB_PG(name) \
+const struct rtw_table name ## _tbl = { \
+ .data = name, \
+ .size = ARRAY_SIZE(name), \
+ .parse = rtw_parse_tbl_bb_pg, \
+}
+
+#define RTW_DECL_TABLE_TXPWR_LMT(name) \
+const struct rtw_table name ## _tbl = { \
+ .data = name, \
+ .size = ARRAY_SIZE(name), \
+ .parse = rtw_parse_tbl_txpwr_lmt, \
+}
+
+static inline const struct rtw_rfe_def *rtw_get_rfe_def(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ const struct rtw_rfe_def *rfe_def = NULL;
+
+ if (chip->rfe_defs_size == 0)
+ return NULL;
+
+ if (efuse->rfe_option < chip->rfe_defs_size)
+ rfe_def = &chip->rfe_defs[efuse->rfe_option];
+
+ rtw_dbg(rtwdev, RTW_DBG_PHY, "use rfe_def[%d]\n", efuse->rfe_option);
+ return rfe_def;
+}
+
+static inline int rtw_check_supported_rfe(struct rtw_dev *rtwdev)
+{
+ const struct rtw_rfe_def *rfe_def = rtw_get_rfe_def(rtwdev);
+
+ if (!rfe_def || !rfe_def->phy_pg_tbl || !rfe_def->txpwr_lmt_tbl) {
+ rtw_err(rtwdev, "rfe %d isn't supported\n",
+ rtwdev->efuse.rfe_option);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi);
+
+#define MASKBYTE0 0xff
+#define MASKBYTE1 0xff00
+#define MASKBYTE2 0xff0000
+#define MASKBYTE3 0xff000000
+#define MASKHWORD 0xffff0000
+#define MASKLWORD 0x0000ffff
+#define MASKDWORD 0xffffffff
+#define RFREG_MASK 0xfffff
+
+#define MASK7BITS 0x7f
+#define MASK12BITS 0xfff
+#define MASKH4BITS 0xf0000000
+#define MASK20BITS 0xfffff
+#define MASK24BITS 0xffffff
+
+#define MASKH3BYTES 0xffffff00
+#define MASKL3BYTES 0x00ffffff
+#define MASKBYTE2HIGHNIBBLE 0x00f00000
+#define MASKBYTE3LOWNIBBLE 0x0f000000
+#define MASKL3BYTES 0x00ffffff
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c
new file mode 100644
index 000000000000..607bfa4317d9
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/ps.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "fw.h"
+#include "ps.h"
+#include "mac.h"
+#include "debug.h"
+
+static int rtw_ips_pwr_up(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw_core_start(rtwdev);
+ if (ret)
+ rtw_err(rtwdev, "leave idle state failed\n");
+
+ rtw_set_channel(rtwdev);
+ rtw_flag_clear(rtwdev, RTW_FLAG_INACTIVE_PS);
+
+ return ret;
+}
+
+int rtw_enter_ips(struct rtw_dev *rtwdev)
+{
+ rtw_flag_set(rtwdev, RTW_FLAG_INACTIVE_PS);
+
+ rtw_core_stop(rtwdev);
+
+ return 0;
+}
+
+static void rtw_restore_port_cfg_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_dev *rtwdev = data;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+ u32 config = ~0;
+
+ rtw_vif_port_config(rtwdev, rtwvif, config);
+}
+
+int rtw_leave_ips(struct rtw_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw_ips_pwr_up(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to leave ips state\n");
+ return ret;
+ }
+
+ rtw_iterate_vifs_atomic(rtwdev, rtw_restore_port_cfg_iter, rtwdev);
+
+ return 0;
+}
+
+static void rtw_leave_lps_core(struct rtw_dev *rtwdev)
+{
+ struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+
+ conf->state = RTW_ALL_ON;
+ conf->awake_interval = 1;
+ conf->rlbm = 0;
+ conf->smart_ps = 0;
+
+ rtw_fw_set_pwr_mode(rtwdev);
+ rtw_flag_clear(rtwdev, RTW_FLAG_LEISURE_PS);
+}
+
+static void rtw_enter_lps_core(struct rtw_dev *rtwdev)
+{
+ struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+
+ conf->state = RTW_RF_OFF;
+ conf->awake_interval = 1;
+ conf->rlbm = 1;
+ conf->smart_ps = 2;
+
+ rtw_fw_set_pwr_mode(rtwdev);
+ rtw_flag_set(rtwdev, RTW_FLAG_LEISURE_PS);
+}
+
+void rtw_lps_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
+ lps_work.work);
+ struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+ struct rtw_vif *rtwvif = conf->rtwvif;
+
+ if (WARN_ON(!rtwvif))
+ return;
+
+ if (conf->mode == RTW_MODE_LPS)
+ rtw_enter_lps_core(rtwdev);
+ else
+ rtw_leave_lps_core(rtwdev);
+}
+
+void rtw_enter_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
+{
+ struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+
+ if (rtwvif->in_lps)
+ return;
+
+ conf->mode = RTW_MODE_LPS;
+ conf->rtwvif = rtwvif;
+ rtwvif->in_lps = true;
+
+ ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->lps_work, 0);
+}
+
+void rtw_leave_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
+{
+ struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+
+ if (!rtwvif->in_lps)
+ return;
+
+ conf->mode = RTW_MODE_ACTIVE;
+ conf->rtwvif = rtwvif;
+ rtwvif->in_lps = false;
+
+ ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->lps_work, 0);
+}
+
+bool rtw_in_lps(struct rtw_dev *rtwdev)
+{
+ return rtw_flag_check(rtwdev, RTW_FLAG_LEISURE_PS);
+}
+
+void rtw_enter_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
+{
+ struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+
+ if (WARN_ON(!rtwvif))
+ return;
+
+ if (rtwvif->in_lps)
+ return;
+
+ conf->mode = RTW_MODE_LPS;
+ conf->rtwvif = rtwvif;
+ rtwvif->in_lps = true;
+
+ rtw_enter_lps_core(rtwdev);
+}
+
+void rtw_leave_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif)
+{
+ struct rtw_lps_conf *conf = &rtwdev->lps_conf;
+
+ if (WARN_ON(!rtwvif))
+ return;
+
+ if (!rtwvif->in_lps)
+ return;
+
+ conf->mode = RTW_MODE_ACTIVE;
+ conf->rtwvif = rtwvif;
+ rtwvif->in_lps = false;
+
+ rtw_leave_lps_core(rtwdev);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/ps.h b/drivers/net/wireless/realtek/rtw88/ps.h
new file mode 100644
index 000000000000..09e57405dc1b
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/ps.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_PS_H_
+#define __RTW_PS_H_
+
+#define RTW_LPS_THRESHOLD 2
+
+int rtw_enter_ips(struct rtw_dev *rtwdev);
+int rtw_leave_ips(struct rtw_dev *rtwdev);
+
+void rtw_lps_work(struct work_struct *work);
+void rtw_enter_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
+void rtw_leave_lps_irqsafe(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
+void rtw_enter_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
+void rtw_leave_lps(struct rtw_dev *rtwdev, struct rtw_vif *rtwvif);
+bool rtw_in_lps(struct rtw_dev *rtwdev);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
new file mode 100644
index 000000000000..e2628f05812c
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -0,0 +1,421 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_REG_DEF_H__
+#define __RTW_REG_DEF_H__
+
+#define REG_SYS_FUNC_EN 0x0002
+#define BIT_FEN_CPUEN BIT(2)
+#define BIT_FEN_BB_GLB_RST BIT(1)
+#define BIT_FEN_BB_RSTB BIT(0)
+#define REG_SYS_PW_CTRL 0x0004
+#define REG_SYS_CLK_CTRL 0x0008
+#define BIT_CPU_CLK_EN BIT(14)
+
+#define REG_RSV_CTRL 0x001C
+#define DISABLE_PI 0x3
+#define ENABLE_PI 0x2
+#define BITS_RFC_DIRECT (BIT(31) | BIT(30))
+#define BIT_WLMCU_IOIF BIT(0)
+#define REG_RF_CTRL 0x001F
+#define BIT_RF_SDM_RSTB BIT(2)
+#define BIT_RF_RSTB BIT(1)
+#define BIT_RF_EN BIT(0)
+
+#define REG_AFE_CTRL1 0x0024
+#define BIT_MAC_CLK_SEL (BIT(20) | BIT(21))
+#define REG_EFUSE_CTRL 0x0030
+#define BIT_EF_FLAG BIT(31)
+#define BIT_SHIFT_EF_ADDR 8
+#define BIT_MASK_EF_ADDR 0x3ff
+#define BIT_MASK_EF_DATA 0xff
+#define BITS_EF_ADDR (BIT_MASK_EF_ADDR << BIT_SHIFT_EF_ADDR)
+
+#define REG_LDO_EFUSE_CTRL 0x0034
+#define BIT_MASK_EFUSE_BANK_SEL (BIT(8) | BIT(9))
+
+#define REG_GPIO_MUXCFG 0x0040
+#define BIT_FSPI_EN BIT(19)
+#define BIT_WLRFE_4_5_EN BIT(2)
+
+#define REG_LED_CFG 0x004C
+#define BIT_LNAON_SEL_EN BIT(26)
+#define BIT_PAPE_SEL_EN BIT(25)
+#define REG_PAD_CTRL1 0x0064
+#define BIT_PAPE_WLBT_SEL BIT(29)
+#define BIT_LNAON_WLBT_SEL BIT(28)
+#define REG_WL_BT_PWR_CTRL 0x0068
+#define BIT_BT_FUNC_EN BIT(18)
+#define BIT_BT_DIG_CLK_EN BIT(8)
+#define REG_HCI_OPT_CTRL 0x0074
+
+#define REG_MCUFW_CTRL 0x0080
+#define BIT_ANA_PORT_EN BIT(22)
+#define BIT_MAC_PORT_EN BIT(21)
+#define BIT_BOOT_FSPI_EN BIT(20)
+#define BIT_FW_INIT_RDY BIT(15)
+#define BIT_FW_DW_RDY BIT(14)
+#define BIT_RPWM_TOGGLE BIT(7)
+#define BIT_DMEM_CHKSUM_OK BIT(6)
+#define BIT_DMEM_DW_OK BIT(5)
+#define BIT_IMEM_CHKSUM_OK BIT(4)
+#define BIT_IMEM_DW_OK BIT(3)
+#define BIT_IMEM_BOOT_LOAD_CHECKSUM_OK BIT(2)
+#define BIT_MCUFWDL_EN BIT(0)
+#define BIT_CHECK_SUM_OK (BIT(4) | BIT(6))
+#define FW_READY (BIT_FW_INIT_RDY | BIT_FW_DW_RDY | \
+ BIT_IMEM_DW_OK | BIT_DMEM_DW_OK | \
+ BIT_CHECK_SUM_OK)
+#define FW_READY_MASK 0xffff
+
+#define REG_WLRF1 0x00EC
+#define REG_SYS_CFG1 0x00F0
+#define BIT_RTL_ID BIT(23)
+#define BIT_RF_TYPE_ID BIT(27)
+#define BIT_SHIFT_VENDOR_ID 16
+#define BIT_MASK_VENDOR_ID 0xf
+#define BIT_VENDOR_ID(x) (((x) & BIT_MASK_VENDOR_ID) << BIT_SHIFT_VENDOR_ID)
+#define BITS_VENDOR_ID (BIT_MASK_VENDOR_ID << BIT_SHIFT_VENDOR_ID)
+#define BIT_CLEAR_VENDOR_ID(x) ((x) & (~BITS_VENDOR_ID))
+#define BIT_GET_VENDOR_ID(x) (((x) >> BIT_SHIFT_VENDOR_ID) & BIT_MASK_VENDOR_ID)
+#define BIT_SHIFT_CHIP_VER 12
+#define BIT_MASK_CHIP_VER 0xf
+#define BIT_CHIP_VER(x) (((x) & BIT_MASK_CHIP_VER) << BIT_SHIFT_CHIP_VER)
+#define BITS_CHIP_VER (BIT_MASK_CHIP_VER << BIT_SHIFT_CHIP_VER)
+#define BIT_CLEAR_CHIP_VER(x) ((x) & (~BITS_CHIP_VER))
+#define BIT_GET_CHIP_VER(x) (((x) >> BIT_SHIFT_CHIP_VER) & BIT_MASK_CHIP_VER)
+#define REG_SYS_STATUS1 0x00F4
+#define REG_SYS_STATUS2 0x00F8
+#define REG_SYS_CFG2 0x00FC
+#define REG_WLRF1 0x00EC
+#define BIT_WLRF1_BBRF_EN (BIT(24) | BIT(25) | BIT(26))
+#define REG_CR 0x0100
+#define BIT_32K_CAL_TMR_EN BIT(10)
+#define BIT_MAC_SEC_EN BIT(9)
+#define BIT_ENSWBCN BIT(8)
+#define BIT_MACRXEN BIT(7)
+#define BIT_MACTXEN BIT(6)
+#define BIT_SCHEDULE_EN BIT(5)
+#define BIT_PROTOCOL_EN BIT(4)
+#define BIT_RXDMA_EN BIT(3)
+#define BIT_TXDMA_EN BIT(2)
+#define BIT_HCI_RXDMA_EN BIT(1)
+#define BIT_HCI_TXDMA_EN BIT(0)
+#define MAC_TRX_ENABLE (BIT_HCI_TXDMA_EN | BIT_HCI_RXDMA_EN | BIT_TXDMA_EN | \
+ BIT_RXDMA_EN | BIT_PROTOCOL_EN | BIT_SCHEDULE_EN | \
+ BIT_MACTXEN | BIT_MACRXEN)
+#define BIT_SHIFT_TXDMA_VOQ_MAP 4
+#define BIT_MASK_TXDMA_VOQ_MAP 0x3
+#define BIT_TXDMA_VOQ_MAP(x) \
+ (((x) & BIT_MASK_TXDMA_VOQ_MAP) << BIT_SHIFT_TXDMA_VOQ_MAP)
+#define BIT_SHIFT_TXDMA_VIQ_MAP 6
+#define BIT_MASK_TXDMA_VIQ_MAP 0x3
+#define BIT_TXDMA_VIQ_MAP(x) \
+ (((x) & BIT_MASK_TXDMA_VIQ_MAP) << BIT_SHIFT_TXDMA_VIQ_MAP)
+#define REG_TXDMA_PQ_MAP 0x010C
+#define BIT_SHIFT_TXDMA_BEQ_MAP 8
+#define BIT_MASK_TXDMA_BEQ_MAP 0x3
+#define BIT_TXDMA_BEQ_MAP(x) \
+ (((x) & BIT_MASK_TXDMA_BEQ_MAP) << BIT_SHIFT_TXDMA_BEQ_MAP)
+#define BIT_SHIFT_TXDMA_BKQ_MAP 10
+#define BIT_MASK_TXDMA_BKQ_MAP 0x3
+#define BIT_TXDMA_BKQ_MAP(x) \
+ (((x) & BIT_MASK_TXDMA_BKQ_MAP) << BIT_SHIFT_TXDMA_BKQ_MAP)
+#define BIT_SHIFT_TXDMA_MGQ_MAP 12
+#define BIT_MASK_TXDMA_MGQ_MAP 0x3
+#define BIT_TXDMA_MGQ_MAP(x) \
+ (((x) & BIT_MASK_TXDMA_MGQ_MAP) << BIT_SHIFT_TXDMA_MGQ_MAP)
+#define BIT_SHIFT_TXDMA_HIQ_MAP 14
+#define BIT_MASK_TXDMA_HIQ_MAP 0x3
+#define BIT_TXDMA_HIQ_MAP(x) \
+ (((x) & BIT_MASK_TXDMA_HIQ_MAP) << BIT_SHIFT_TXDMA_HIQ_MAP)
+#define BIT_SHIFT_TXSC_40M 4
+#define BIT_MASK_TXSC_40M 0xf
+#define BIT_TXSC_40M(x) \
+ (((x) & BIT_MASK_TXSC_40M) << BIT_SHIFT_TXSC_40M)
+#define BIT_SHIFT_TXSC_20M 0
+#define BIT_MASK_TXSC_20M 0xf
+#define BIT_TXSC_20M(x) \
+ (((x) & BIT_MASK_TXSC_20M) << BIT_SHIFT_TXSC_20M)
+#define BIT_SHIFT_MAC_CLK_SEL 20
+#define MAC_CLK_HW_DEF_80M 0
+#define MAC_CLK_HW_DEF_40M 1
+#define MAC_CLK_HW_DEF_20M 2
+#define MAC_CLK_SPEED 80
+
+#define REG_CR 0x0100
+#define REG_TRXFF_BNDY 0x0114
+#define REG_RXFF_BNDY 0x011C
+#define REG_PKTBUF_DBG_CTRL 0x0140
+#define REG_C2HEVT 0x01A0
+#define REG_HMETFR 0x01CC
+#define REG_HMEBOX0 0x01D0
+#define REG_HMEBOX1 0x01D4
+#define REG_HMEBOX2 0x01D8
+#define REG_HMEBOX3 0x01DC
+#define REG_HMEBOX0_EX 0x01F0
+#define REG_HMEBOX1_EX 0x01F4
+#define REG_HMEBOX2_EX 0x01F8
+#define REG_HMEBOX3_EX 0x01FC
+
+#define REG_FIFOPAGE_CTRL_2 0x0204
+#define BIT_BCN_VALID_V1 BIT(15)
+#define BIT_MASK_BCN_HEAD_1_V1 0xfff
+#define REG_AUTO_LLT_V1 0x0208
+#define BIT_AUTO_INIT_LLT_V1 BIT(0)
+#define REG_TXDMA_OFFSET_CHK 0x020C
+#define REG_TXDMA_STATUS 0x0210
+#define BTI_PAGE_OVF BIT(2)
+#define REG_RQPN_CTRL_1 0x0228
+#define REG_RQPN_CTRL_2 0x022C
+#define BIT_LD_RQPN BIT(31)
+#define REG_FIFOPAGE_INFO_1 0x0230
+#define REG_FIFOPAGE_INFO_2 0x0234
+#define REG_FIFOPAGE_INFO_3 0x0238
+#define REG_FIFOPAGE_INFO_4 0x023C
+#define REG_FIFOPAGE_INFO_5 0x0240
+#define REG_H2C_HEAD 0x0244
+#define REG_H2C_TAIL 0x0248
+#define REG_H2C_READ_ADDR 0x024C
+#define REG_H2C_INFO 0x0254
+
+#define REG_FWHW_TXQ_CTRL 0x0420
+#define BIT_EN_BCNQ_DL BIT(22)
+#define BIT_EN_WR_FREE_TAIL BIT(20)
+#define REG_BCNQ_BDNY_V1 0x0424
+#define REG_LIFETIME_EN 0x0426
+#define BIT_BA_PARSER_EN BIT(5)
+#define REG_SPEC_SIFS 0x0428
+#define REG_DARFRC 0x0430
+#define REG_DARFRCH 0x0434
+#define REG_RARFRCH 0x043C
+#define REG_ARFR0 0x0444
+#define REG_ARFRH0 0x0448
+#define REG_ARFR1_V1 0x044C
+#define REG_ARFRH1_V1 0x0450
+#define REG_CCK_CHECK 0x0454
+#define BIT_CHECK_CCK_EN BIT(7)
+#define REG_AMPDU_MAX_TIME_V1 0x0455
+#define REG_BCNQ1_BDNY_V1 0x0456
+#define REG_TX_HANG_CTRL 0x045E
+#define BIT_EN_EOF_V1 BIT(2)
+#define REG_DATA_SC 0x0483
+#define REG_ARFR4 0x049C
+#define REG_ARFRH4 0x04A0
+#define REG_ARFR5 0x04A4
+#define REG_ARFRH5 0x04A8
+#define REG_SW_AMPDU_BURST_MODE_CTRL 0x04BC
+#define BIT_PRE_TX_CMD BIT(6)
+#define REG_PROT_MODE_CTRL 0x04C8
+#define REG_BAR_MODE_CTRL 0x04CC
+#define REG_PRECNT_CTRL 0x04E5
+#define BIT_EN_PRECNT BIT(11)
+
+#define REG_EDCA_VO_PARAM 0x0500
+#define REG_EDCA_VI_PARAM 0x0504
+#define REG_EDCA_BE_PARAM 0x0508
+#define REG_EDCA_BK_PARAM 0x050C
+#define REG_PIFS 0x0512
+#define REG_SIFS 0x0514
+#define BIT_SHIFT_SIFS_OFDM_CTX 8
+#define BIT_SHIFT_SIFS_CCK_TRX 16
+#define BIT_SHIFT_SIFS_OFDM_TRX 24
+#define REG_SLOT 0x051B
+#define REG_TX_PTCL_CTRL 0x0520
+#define BIT_SIFS_BK_EN BIT(12)
+#define REG_TXPAUSE 0x0522
+#define REG_RD_CTRL 0x0524
+#define BIT_DIS_TXOP_CFE BIT(10)
+#define BIT_DIS_LSIG_CFE BIT(9)
+#define BIT_DIS_STBC_CFE BIT(8)
+#define REG_TBTT_PROHIBIT 0x0540
+#define BIT_SHIFT_TBTT_HOLD_TIME_AP 8
+#define REG_RD_NAV_NXT 0x0544
+#define REG_BCN_CTRL 0x0550
+#define BIT_DIS_TSF_UDT BIT(4)
+#define BIT_EN_BCN_FUNCTION BIT(3)
+#define REG_BCN_CTRL_CLINT0 0x0551
+#define REG_DRVERLYINT 0x0558
+#define REG_BCNDMATIM 0x0559
+#define REG_USTIME_TSF 0x055C
+#define REG_BCN_MAX_ERR 0x055D
+#define REG_RXTSF_OFFSET_CCK 0x055E
+#define REG_MISC_CTRL 0x0577
+#define BIT_EN_FREE_CNT BIT(3)
+#define BIT_DIS_SECOND_CCA (BIT(0) | BIT(1))
+#define REG_TIMER0_SRC_SEL 0x05B4
+#define BIT_TSFT_SEL_TIMER0 (BIT(4) | BIT(5) | BIT(6))
+
+#define REG_TCR 0x0604
+#define REG_RCR 0x0608
+#define BIT_APP_FCS BIT(31)
+#define BIT_APP_MIC BIT(30)
+#define BIT_APP_ICV BIT(29)
+#define BIT_APP_PHYSTS BIT(28)
+#define BIT_APP_BASSN BIT(27)
+#define BIT_VHT_DACK BIT(26)
+#define BIT_TCPOFLD_EN BIT(25)
+#define BIT_ENMBID BIT(24)
+#define BIT_LSIGEN BIT(23)
+#define BIT_MFBEN BIT(22)
+#define BIT_DISCHKPPDLLEN BIT(21)
+#define BIT_PKTCTL_DLEN BIT(20)
+#define BIT_TIM_PARSER_EN BIT(18)
+#define BIT_BC_MD_EN BIT(17)
+#define BIT_UC_MD_EN BIT(16)
+#define BIT_RXSK_PERPKT BIT(15)
+#define BIT_HTC_LOC_CTRL BIT(14)
+#define BIT_RPFM_CAM_ENABLE BIT(12)
+#define BIT_TA_BCN BIT(11)
+#define BIT_DISDECMYPKT BIT(10)
+#define BIT_AICV BIT(9)
+#define BIT_ACRC32 BIT(8)
+#define BIT_CBSSID_BCN BIT(7)
+#define BIT_CBSSID_DATA BIT(6)
+#define BIT_APWRMGT BIT(5)
+#define BIT_ADD3 BIT(4)
+#define BIT_AB BIT(3)
+#define BIT_AM BIT(2)
+#define BIT_APM BIT(1)
+#define BIT_AAP BIT(0)
+#define REG_RX_PKT_LIMIT 0x060C
+#define REG_RX_DRVINFO_SZ 0x060F
+#define BIT_APP_PHYSTS BIT(28)
+#define REG_USTIME_EDCA 0x0638
+#define REG_ACKTO_CCK 0x0639
+#define REG_RESP_SIFS_CCK 0x063C
+#define REG_RESP_SIFS_OFDM 0x063E
+#define REG_ACKTO 0x0640
+#define REG_EIFS 0x0642
+#define REG_NAV_CTRL 0x0650
+#define REG_WMAC_TRXPTCL_CTL 0x0668
+#define BIT_RFMOD (BIT(7) | BIT(8))
+#define BIT_RFMOD_80M BIT(8)
+#define BIT_RFMOD_40M BIT(7)
+#define REG_WMAC_TRXPTCL_CTL_H 0x066C
+#define REG_RXFLTMAP0 0x06A0
+#define REG_RXFLTMAP1 0x06A2
+#define REG_RXFLTMAP2 0x06A4
+#define REG_BBPSF_CTRL 0x06DC
+
+#define REG_WMAC_OPTION_FUNCTION 0x07D0
+#define REG_WMAC_OPTION_FUNCTION_1 0x07D4
+
+#define REG_ANAPAR_XTAL_0 0x1040
+#define REG_CPU_DMEM_CON 0x1080
+#define BIT_WL_PLATFORM_RST BIT(16)
+#define BIT_WL_SECURITY_CLK BIT(15)
+#define BIT_DDMA_EN BIT(8)
+
+#define REG_H2C_PKT_READADDR 0x10D0
+#define REG_H2C_PKT_WRITEADDR 0x10D4
+#define REG_FW_DBG7 0x10FC
+#define FW_KEY_MASK 0xffffff00
+
+#define REG_CR_EXT 0x1100
+
+#define REG_DDMA_CH0SA 0x1200
+#define REG_DDMA_CH0DA 0x1204
+#define REG_DDMA_CH0CTRL 0x1208
+#define BIT_DDMACH0_OWN BIT(31)
+#define BIT_DDMACH0_CHKSUM_EN BIT(29)
+#define BIT_DDMACH0_CHKSUM_STS BIT(27)
+#define BIT_DDMACH0_RESET_CHKSUM_STS BIT(25)
+#define BIT_DDMACH0_CHKSUM_CONT BIT(24)
+#define BIT_MASK_DDMACH0_DLEN 0x3ffff
+
+#define REG_H2CQ_CSR 0x1330
+#define BIT_H2CQ_FULL BIT(31)
+#define REG_FAST_EDCA_VOVI_SETTING 0x1448
+#define REG_FAST_EDCA_BEBK_SETTING 0x144C
+
+#define REG_RXPSF_CTRL 0x1610
+#define BIT_RXGCK_FIFOTHR_EN BIT(28)
+
+#define BIT_SHIFT_RXGCK_VHT_FIFOTHR 26
+#define BIT_MASK_RXGCK_VHT_FIFOTHR 0x3
+#define BIT_RXGCK_VHT_FIFOTHR(x) \
+ (((x) & BIT_MASK_RXGCK_VHT_FIFOTHR) << BIT_SHIFT_RXGCK_VHT_FIFOTHR)
+#define BITS_RXGCK_VHT_FIFOTHR \
+ (BIT_MASK_RXGCK_VHT_FIFOTHR << BIT_SHIFT_RXGCK_VHT_FIFOTHR)
+
+#define BIT_SHIFT_RXGCK_HT_FIFOTHR 24
+#define BIT_MASK_RXGCK_HT_FIFOTHR 0x3
+#define BIT_RXGCK_HT_FIFOTHR(x) \
+ (((x) & BIT_MASK_RXGCK_HT_FIFOTHR) << BIT_SHIFT_RXGCK_HT_FIFOTHR)
+#define BITS_RXGCK_HT_FIFOTHR \
+ (BIT_MASK_RXGCK_HT_FIFOTHR << BIT_SHIFT_RXGCK_HT_FIFOTHR)
+
+#define BIT_SHIFT_RXGCK_OFDM_FIFOTHR 22
+#define BIT_MASK_RXGCK_OFDM_FIFOTHR 0x3
+#define BIT_RXGCK_OFDM_FIFOTHR(x) \
+ (((x) & BIT_MASK_RXGCK_OFDM_FIFOTHR) << BIT_SHIFT_RXGCK_OFDM_FIFOTHR)
+#define BITS_RXGCK_OFDM_FIFOTHR \
+ (BIT_MASK_RXGCK_OFDM_FIFOTHR << BIT_SHIFT_RXGCK_OFDM_FIFOTHR)
+
+#define BIT_SHIFT_RXGCK_CCK_FIFOTHR 20
+#define BIT_MASK_RXGCK_CCK_FIFOTHR 0x3
+#define BIT_RXGCK_CCK_FIFOTHR(x) \
+ (((x) & BIT_MASK_RXGCK_CCK_FIFOTHR) << BIT_SHIFT_RXGCK_CCK_FIFOTHR)
+#define BITS_RXGCK_CCK_FIFOTHR \
+ (BIT_MASK_RXGCK_CCK_FIFOTHR << BIT_SHIFT_RXGCK_CCK_FIFOTHR)
+
+#define BIT_RXGCK_OFDMCCA_EN BIT(16)
+
+#define BIT_SHIFT_RXPSF_PKTLENTHR 13
+#define BIT_MASK_RXPSF_PKTLENTHR 0x7
+#define BIT_RXPSF_PKTLENTHR(x) \
+ (((x) & BIT_MASK_RXPSF_PKTLENTHR) << BIT_SHIFT_RXPSF_PKTLENTHR)
+#define BITS_RXPSF_PKTLENTHR \
+ (BIT_MASK_RXPSF_PKTLENTHR << BIT_SHIFT_RXPSF_PKTLENTHR)
+#define BIT_CLEAR_RXPSF_PKTLENTHR(x) ((x) & (~BITS_RXPSF_PKTLENTHR))
+#define BIT_SET_RXPSF_PKTLENTHR(x, v) \
+ (BIT_CLEAR_RXPSF_PKTLENTHR(x) | BIT_RXPSF_PKTLENTHR(v))
+
+#define BIT_RXPSF_CTRLEN BIT(12)
+#define BIT_RXPSF_VHTCHKEN BIT(11)
+#define BIT_RXPSF_HTCHKEN BIT(10)
+#define BIT_RXPSF_OFDMCHKEN BIT(9)
+#define BIT_RXPSF_CCKCHKEN BIT(8)
+#define BIT_RXPSF_OFDMRST BIT(7)
+#define BIT_RXPSF_CCKRST BIT(6)
+#define BIT_RXPSF_MHCHKEN BIT(5)
+#define BIT_RXPSF_CONT_ERRCHKEN BIT(4)
+#define BIT_RXPSF_ALL_ERRCHKEN BIT(3)
+
+#define BIT_SHIFT_RXPSF_ERRTHR 0
+#define BIT_MASK_RXPSF_ERRTHR 0x7
+#define BIT_RXPSF_ERRTHR(x) \
+ (((x) & BIT_MASK_RXPSF_ERRTHR) << BIT_SHIFT_RXPSF_ERRTHR)
+#define BITS_RXPSF_ERRTHR (BIT_MASK_RXPSF_ERRTHR << BIT_SHIFT_RXPSF_ERRTHR)
+#define BIT_CLEAR_RXPSF_ERRTHR(x) ((x) & (~BITS_RXPSF_ERRTHR))
+#define BIT_GET_RXPSF_ERRTHR(x) \
+ (((x) >> BIT_SHIFT_RXPSF_ERRTHR) & BIT_MASK_RXPSF_ERRTHR)
+#define BIT_SET_RXPSF_ERRTHR(x, v) \
+ (BIT_CLEAR_RXPSF_ERRTHR(x) | BIT_RXPSF_ERRTHR(v))
+
+#define REG_RXPSF_TYPE_CTRL 0x1614
+#define REG_GENERAL_OPTION 0x1664
+#define BIT_DUMMY_FCS_READY_MASK_EN BIT(9)
+
+#define REG_WL2LTECOEX_INDIRECT_ACCESS_CTRL_V1 0x1700
+#define REG_WL2LTECOEX_INDIRECT_ACCESS_WRITE_DATA_V1 0x1704
+#define REG_WL2LTECOEX_INDIRECT_ACCESS_READ_DATA_V1 0x1708
+#define LTECOEX_READY BIT(29)
+#define LTECOEX_ACCESS_CTRL REG_WL2LTECOEX_INDIRECT_ACCESS_CTRL_V1
+#define LTECOEX_WRITE_DATA REG_WL2LTECOEX_INDIRECT_ACCESS_WRITE_DATA_V1
+#define LTECOEX_READ_DATA REG_WL2LTECOEX_INDIRECT_ACCESS_READ_DATA_V1
+
+#define RF_DTXLOK 0x08
+#define RF_CFGCH 0x18
+#define RF_LUTWA 0x33
+#define RF_LUTWD1 0x3e
+#define RF_LUTWD0 0x3f
+#define RF_XTALX2 0xb8
+#define RF_MALSEL 0xbe
+#define RF_LUTDBG 0xdf
+#define RF_LUTWE2 0xee
+#define RF_LUTWE 0xef
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/regd.c b/drivers/net/wireless/realtek/rtw88/regd.c
new file mode 100644
index 000000000000..e7750a833a8e
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/regd.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "regd.h"
+#include "debug.h"
+#include "phy.h"
+
+#define COUNTRY_CHPLAN_ENT(_alpha2, _chplan, _txpwr_regd) \
+ {.alpha2 = (_alpha2), \
+ .chplan = (_chplan), \
+ .txpwr_regd = (_txpwr_regd) \
+ }
+
+/* If country code is not correctly defined in efuse,
+ * use worldwide country code and txpwr regd.
+ */
+static const struct rtw_regulatory rtw_defined_chplan =
+ COUNTRY_CHPLAN_ENT("00", RTW_CHPLAN_REALTEK_DEFINE, RTW_REGD_WW);
+
+static const struct rtw_regulatory all_chplan_map[] = {
+ COUNTRY_CHPLAN_ENT("AD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AF", RTW_CHPLAN_ETSI1_ETSI4, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AO", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AR", RTW_CHPLAN_FCC2_FCC7, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("AS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("AT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AU", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("AW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("AZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BB", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("BD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BH", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BN", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BO", RTW_CHPLAN_WORLD_FCC7, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("BR", RTW_CHPLAN_FCC2_FCC1, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("BS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("BW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BY", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("BZ", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("CA", RTW_CHPLAN_IC1_IC2, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("CC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CL", RTW_CHPLAN_WORLD_CHILE1, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("CM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CN", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CO", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("CR", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("CV", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CX", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CY", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("CZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("DE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("DJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("DK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("DM", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("DO", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("DZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("EC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("EE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("EG", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("EH", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("ER", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("ES", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("ET", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("FI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("FJ", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("FK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("FM", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("FO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("FR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GB", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GD", RTW_CHPLAN_FCC1_FCC7, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("GE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GP", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GT", RTW_CHPLAN_FCC2_FCC7, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("GU", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("GW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("GY", RTW_CHPLAN_FCC1_NCC3, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("HK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("HM", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("HN", RTW_CHPLAN_WORLD_FCC5, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("HR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("HT", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("HU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("ID", RTW_CHPLAN_ETSI1_ETSI12, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("IE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("IL", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("IM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("IN", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("IQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("IR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("IS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("IT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("JE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("JM", RTW_CHPLAN_WORLD_ETSI10, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("JO", RTW_CHPLAN_WORLD_ETSI8, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("JP", RTW_CHPLAN_MKK1_MKK1, RTW_REGD_MKK),
+ COUNTRY_CHPLAN_ENT("KE", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("KG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("KH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("KI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("KN", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("KR", RTW_CHPLAN_KCC1_KCC2, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("KW", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("KY", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("KZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("LA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("LB", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("LC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("LI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("LK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("LR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("LS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("LT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("LU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("LV", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("LY", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MA", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("ME", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MF", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("MG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MH", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("MK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("ML", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MP", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("MQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MV", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MX", RTW_CHPLAN_FCC2_FCC7, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("MY", RTW_CHPLAN_WORLD_ETSI20, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("MZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NF", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NG", RTW_CHPLAN_WORLD_ETSI20, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NI", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("NL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NP", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NU", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("NZ", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("OM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("PA", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("PE", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("PF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("PG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("PH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("PK", RTW_CHPLAN_WORLD_ETSI10, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("PL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("PM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("PR", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("PT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("PW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("PY", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("QA", RTW_CHPLAN_WORLD_ETSI10, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("RE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("RO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("RS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("RU", RTW_CHPLAN_WORLD_ETSI14, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("RW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SB", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("SE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("SR", RTW_CHPLAN_FCC2_FCC17, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("ST", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("SV", RTW_CHPLAN_WORLD_FCC3, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("SX", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("SZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TK", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TN", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TT", RTW_CHPLAN_ETSI1_ETSI4, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("TW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("TZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("UA", RTW_CHPLAN_WORLD_ETSI3, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("UG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("US", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("UY", RTW_CHPLAN_WORLD_FCC3, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("UZ", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("VA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("VC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("VE", RTW_CHPLAN_WORLD_FCC3, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("VI", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("VN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("VU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("WF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("WS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
+ COUNTRY_CHPLAN_ENT("YE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("YT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("ZA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("ZM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+ COUNTRY_CHPLAN_ENT("ZW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+};
+
+static void rtw_regd_apply_beaconing_flags(struct wiphy *wiphy,
+ enum nl80211_reg_initiator initiator)
+{
+ enum nl80211_band band;
+ struct ieee80211_supported_band *sband;
+ const struct ieee80211_reg_rule *reg_rule;
+ struct ieee80211_channel *ch;
+ unsigned int i;
+
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!wiphy->bands[band])
+ continue;
+
+ sband = wiphy->bands[band];
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+
+ reg_rule = freq_reg_info(wiphy,
+ MHZ_TO_KHZ(ch->center_freq));
+ if (IS_ERR(reg_rule))
+ continue;
+
+ ch->flags &= ~IEEE80211_CHAN_DISABLED;
+
+ if (!(reg_rule->flags & NL80211_RRF_NO_IR))
+ ch->flags &= ~IEEE80211_CHAN_NO_IR;
+ }
+ }
+}
+
+static void rtw_regd_apply_hw_cap_flags(struct wiphy *wiphy)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ int i;
+
+ if (efuse->hw_cap.bw & BIT(RTW_CHANNEL_WIDTH_80))
+ return;
+
+ sband = wiphy->bands[NL80211_BAND_2GHZ];
+ if (!sband)
+ goto out_5g;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+ ch->flags |= IEEE80211_CHAN_NO_80MHZ;
+ }
+
+out_5g:
+ sband = wiphy->bands[NL80211_BAND_5GHZ];
+ if (!sband)
+ return;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+ ch->flags |= IEEE80211_CHAN_NO_80MHZ;
+ }
+}
+
+static void rtw_regd_apply_world_flags(struct wiphy *wiphy,
+ enum nl80211_reg_initiator initiator)
+{
+ rtw_regd_apply_beaconing_flags(wiphy, initiator);
+}
+
+static struct rtw_regulatory rtw_regd_find_reg_by_name(char *alpha2)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(all_chplan_map); i++) {
+ if (!memcmp(all_chplan_map[i].alpha2, alpha2, 2))
+ return all_chplan_map[i];
+ }
+
+ return rtw_defined_chplan;
+}
+
+static int rtw_regd_notifier_apply(struct rtw_dev *rtwdev,
+ struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ if (request->initiator == NL80211_REGDOM_SET_BY_USER)
+ return 0;
+ rtwdev->regd = rtw_regd_find_reg_by_name(request->alpha2);
+ rtw_regd_apply_world_flags(wiphy, request->initiator);
+
+ return 0;
+}
+
+static int
+rtw_regd_init_wiphy(struct rtw_regulatory *reg, struct wiphy *wiphy,
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request))
+{
+ wiphy->reg_notifier = reg_notifier;
+
+ wiphy->regulatory_flags &= ~REGULATORY_CUSTOM_REG;
+ wiphy->regulatory_flags &= ~REGULATORY_STRICT_REG;
+ wiphy->regulatory_flags &= ~REGULATORY_DISABLE_BEACON_HINTS;
+
+ rtw_regd_apply_hw_cap_flags(wiphy);
+
+ return 0;
+}
+
+int rtw_regd_init(struct rtw_dev *rtwdev,
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request))
+{
+ struct wiphy *wiphy = rtwdev->hw->wiphy;
+
+ if (!wiphy)
+ return -EINVAL;
+
+ rtwdev->regd = rtw_regd_find_reg_by_name(rtwdev->efuse.country_code);
+ rtw_regd_init_wiphy(&rtwdev->regd, wiphy, reg_notifier);
+
+ return 0;
+}
+
+void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct rtw_dev *rtwdev = hw->priv;
+ struct rtw_hal *hal = &rtwdev->hal;
+
+ rtw_regd_notifier_apply(rtwdev, wiphy, request);
+ rtw_dbg(rtwdev, RTW_DBG_REGD,
+ "get alpha2 %c%c from initiator %d, mapping to chplan 0x%x, txregd %d\n",
+ request->alpha2[0], request->alpha2[1], request->initiator,
+ rtwdev->regd.chplan, rtwdev->regd.txpwr_regd);
+
+ rtw_phy_set_tx_power_level(rtwdev, hal->current_channel);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/regd.h b/drivers/net/wireless/realtek/rtw88/regd.h
new file mode 100644
index 000000000000..7784bb6d3ba7
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/regd.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_REGD_H_
+#define __RTW_REGD_H_
+
+#define IEEE80211_CHAN_NO_IBSS IEEE80211_CHAN_NO_IR
+#define IEEE80211_CHAN_PASSIVE_SCAN IEEE80211_CHAN_NO_IR
+enum rtw_chplan_id {
+ RTW_CHPLAN_WORLD_ETSI1 = 0x26,
+ RTW_CHPLAN_MKK1_MKK1 = 0x27,
+ RTW_CHPLAN_IC1_IC2 = 0x2B,
+ RTW_CHPLAN_WORLD_CHILE1 = 0x2D,
+ RTW_CHPLAN_WORLD_FCC3 = 0x30,
+ RTW_CHPLAN_WORLD_FCC5 = 0x32,
+ RTW_CHPLAN_FCC1_FCC7 = 0x34,
+ RTW_CHPLAN_WORLD_ETSI3 = 0x36,
+ RTW_CHPLAN_ETSI1_ETSI12 = 0x3D,
+ RTW_CHPLAN_KCC1_KCC2 = 0x3E,
+ RTW_CHPLAN_ETSI1_ETSI4 = 0x42,
+ RTW_CHPLAN_FCC1_NCC3 = 0x44,
+ RTW_CHPLAN_WORLD_ACMA1 = 0x45,
+ RTW_CHPLAN_WORLD_ETSI6 = 0x47,
+ RTW_CHPLAN_WORLD_ETSI7 = 0x48,
+ RTW_CHPLAN_WORLD_ETSI8 = 0x49,
+ RTW_CHPLAN_WORLD_ETSI10 = 0x51,
+ RTW_CHPLAN_WORLD_ETSI14 = 0x59,
+ RTW_CHPLAN_FCC2_FCC7 = 0x61,
+ RTW_CHPLAN_FCC2_FCC1 = 0x62,
+ RTW_CHPLAN_WORLD_FCC7 = 0x73,
+ RTW_CHPLAN_FCC2_FCC17 = 0x74,
+ RTW_CHPLAN_WORLD_ETSI20 = 0x75,
+ RTW_CHPLAN_FCC2_FCC11 = 0x76,
+ RTW_CHPLAN_REALTEK_DEFINE = 0x7f,
+};
+
+struct country_code_to_enum_rd {
+ u16 countrycode;
+ const char *iso_name;
+};
+
+enum country_code_type {
+ COUNTRY_CODE_FCC = 0,
+ COUNTRY_CODE_IC = 1,
+ COUNTRY_CODE_ETSI = 2,
+ COUNTRY_CODE_SPAIN = 3,
+ COUNTRY_CODE_FRANCE = 4,
+ COUNTRY_CODE_MKK = 5,
+ COUNTRY_CODE_MKK1 = 6,
+ COUNTRY_CODE_ISRAEL = 7,
+ COUNTRY_CODE_TELEC = 8,
+ COUNTRY_CODE_MIC = 9,
+ COUNTRY_CODE_GLOBAL_DOMAIN = 10,
+ COUNTRY_CODE_WORLD_WIDE_13 = 11,
+ COUNTRY_CODE_TELEC_NETGEAR = 12,
+ COUNTRY_CODE_WORLD_WIDE_13_5G_ALL = 13,
+
+ /* new channel plan above this */
+ COUNTRY_CODE_MAX
+};
+
+int rtw_regd_init(struct rtw_dev *rtwdev,
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request));
+void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
new file mode 100644
index 000000000000..1172f6c0605b
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -0,0 +1,1594 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "fw.h"
+#include "tx.h"
+#include "rx.h"
+#include "phy.h"
+#include "rtw8822b.h"
+#include "rtw8822b_table.h"
+#include "mac.h"
+#include "reg.h"
+#include "debug.h"
+
+static void rtw8822b_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
+ u8 rx_path, bool is_tx2_path);
+
+static void rtw8822be_efuse_parsing(struct rtw_efuse *efuse,
+ struct rtw8822b_efuse *map)
+{
+ ether_addr_copy(efuse->addr, map->e.mac_addr);
+}
+
+static int rtw8822b_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw8822b_efuse *map;
+ int i;
+
+ map = (struct rtw8822b_efuse *)log_map;
+
+ efuse->rfe_option = map->rfe_option;
+ efuse->crystal_cap = map->xtal_k;
+ efuse->pa_type_2g = map->pa_type;
+ efuse->pa_type_5g = map->pa_type;
+ efuse->lna_type_2g = map->lna_type_2g[0];
+ efuse->lna_type_5g = map->lna_type_5g[0];
+ efuse->channel_plan = map->channel_plan;
+ efuse->country_code[0] = map->country_code[0];
+ efuse->country_code[1] = map->country_code[1];
+ efuse->bt_setting = map->rf_bt_setting;
+ efuse->regd = map->rf_board_option & 0x7;
+
+ for (i = 0; i < 4; i++)
+ efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
+
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_PCIE:
+ rtw8822be_efuse_parsing(efuse, map);
+ break;
+ default:
+ /* unsupported now */
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static void rtw8822b_phy_rfe_init(struct rtw_dev *rtwdev)
+{
+ /* chip top mux */
+ rtw_write32_mask(rtwdev, 0x64, BIT(29) | BIT(28), 0x3);
+ rtw_write32_mask(rtwdev, 0x4c, BIT(26) | BIT(25), 0x0);
+ rtw_write32_mask(rtwdev, 0x40, BIT(2), 0x1);
+
+ /* from s0 or s1 */
+ rtw_write32_mask(rtwdev, 0x1990, 0x3f, 0x30);
+ rtw_write32_mask(rtwdev, 0x1990, (BIT(11) | BIT(10)), 0x3);
+
+ /* input or output */
+ rtw_write32_mask(rtwdev, 0x974, 0x3f, 0x3f);
+ rtw_write32_mask(rtwdev, 0x974, (BIT(11) | BIT(10)), 0x3);
+}
+
+static void rtw8822b_phy_set_param(struct rtw_dev *rtwdev)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 crystal_cap;
+ bool is_tx2_path;
+
+ /* power on BB/RF domain */
+ rtw_write8_set(rtwdev, REG_SYS_FUNC_EN,
+ BIT_FEN_BB_RSTB | BIT_FEN_BB_GLB_RST);
+ rtw_write8_set(rtwdev, REG_RF_CTRL,
+ BIT_RF_EN | BIT_RF_RSTB | BIT_RF_SDM_RSTB);
+ rtw_write32_set(rtwdev, REG_WLRF1, BIT_WLRF1_BBRF_EN);
+
+ /* pre init before header files config */
+ rtw_write32_clr(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST);
+
+ rtw_phy_load_tables(rtwdev);
+
+ crystal_cap = rtwdev->efuse.crystal_cap & 0x3F;
+ rtw_write32_mask(rtwdev, 0x24, 0x7e000000, crystal_cap);
+ rtw_write32_mask(rtwdev, 0x28, 0x7e, crystal_cap);
+
+ /* post init after header files config */
+ rtw_write32_set(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST);
+
+ is_tx2_path = false;
+ rtw8822b_config_trx_mode(rtwdev, hal->antenna_tx, hal->antenna_rx,
+ is_tx2_path);
+ rtw_phy_init(rtwdev);
+
+ rtw8822b_phy_rfe_init(rtwdev);
+
+ /* wifi path controller */
+ rtw_write32_mask(rtwdev, 0x70, 0x4000000, 1);
+ /* BB control */
+ rtw_write32_mask(rtwdev, 0x4c, 0x01800000, 0x2);
+ /* antenna mux switch */
+ rtw_write8(rtwdev, 0x974, 0xff);
+ rtw_write32_mask(rtwdev, 0x1990, 0x300, 0);
+ rtw_write32_mask(rtwdev, 0xcbc, 0x80000, 0x0);
+ /* SW control */
+ rtw_write8(rtwdev, 0xcb4, 0x77);
+ /* switch to WL side controller and gnt_wl gnt_bt debug signal */
+ rtw_write32_mask(rtwdev, 0x70, 0xff000000, 0x0e);
+ /* gnt_wl = 1, gnt_bt = 0 */
+ rtw_write32(rtwdev, 0x1704, 0x7700);
+ rtw_write32(rtwdev, 0x1700, 0xc00f0038);
+ /* switch for WL 2G */
+ rtw_write8(rtwdev, 0xcbd, 0x2);
+}
+
+#define WLAN_SLOT_TIME 0x09
+#define WLAN_PIFS_TIME 0x19
+#define WLAN_SIFS_CCK_CONT_TX 0xA
+#define WLAN_SIFS_OFDM_CONT_TX 0xE
+#define WLAN_SIFS_CCK_TRX 0x10
+#define WLAN_SIFS_OFDM_TRX 0x10
+#define WLAN_VO_TXOP_LIMIT 0x186 /* unit : 32us */
+#define WLAN_VI_TXOP_LIMIT 0x3BC /* unit : 32us */
+#define WLAN_RDG_NAV 0x05
+#define WLAN_TXOP_NAV 0x1B
+#define WLAN_CCK_RX_TSF 0x30
+#define WLAN_OFDM_RX_TSF 0x30
+#define WLAN_TBTT_PROHIBIT 0x04 /* unit : 32us */
+#define WLAN_TBTT_HOLD_TIME 0x064 /* unit : 32us */
+#define WLAN_DRV_EARLY_INT 0x04
+#define WLAN_BCN_DMA_TIME 0x02
+
+#define WLAN_RX_FILTER0 0x0FFFFFFF
+#define WLAN_RX_FILTER2 0xFFFF
+#define WLAN_RCR_CFG 0xE400220E
+#define WLAN_RXPKT_MAX_SZ 12288
+#define WLAN_RXPKT_MAX_SZ_512 (WLAN_RXPKT_MAX_SZ >> 9)
+
+#define WLAN_AMPDU_MAX_TIME 0x70
+#define WLAN_RTS_LEN_TH 0xFF
+#define WLAN_RTS_TX_TIME_TH 0x08
+#define WLAN_MAX_AGG_PKT_LIMIT 0x20
+#define WLAN_RTS_MAX_AGG_PKT_LIMIT 0x20
+#define FAST_EDCA_VO_TH 0x06
+#define FAST_EDCA_VI_TH 0x06
+#define FAST_EDCA_BE_TH 0x06
+#define FAST_EDCA_BK_TH 0x06
+#define WLAN_BAR_RETRY_LIMIT 0x01
+#define WLAN_RA_TRY_RATE_AGG_LIMIT 0x08
+
+#define WLAN_TX_FUNC_CFG1 0x30
+#define WLAN_TX_FUNC_CFG2 0x30
+#define WLAN_MAC_OPT_NORM_FUNC1 0x98
+#define WLAN_MAC_OPT_LB_FUNC1 0x80
+#define WLAN_MAC_OPT_FUNC2 0x30810041
+
+#define WLAN_SIFS_CFG (WLAN_SIFS_CCK_CONT_TX | \
+ (WLAN_SIFS_OFDM_CONT_TX << BIT_SHIFT_SIFS_OFDM_CTX) | \
+ (WLAN_SIFS_CCK_TRX << BIT_SHIFT_SIFS_CCK_TRX) | \
+ (WLAN_SIFS_OFDM_TRX << BIT_SHIFT_SIFS_OFDM_TRX))
+
+#define WLAN_TBTT_TIME (WLAN_TBTT_PROHIBIT |\
+ (WLAN_TBTT_HOLD_TIME << BIT_SHIFT_TBTT_HOLD_TIME_AP))
+
+#define WLAN_NAV_CFG (WLAN_RDG_NAV | (WLAN_TXOP_NAV << 16))
+#define WLAN_RX_TSF_CFG (WLAN_CCK_RX_TSF | (WLAN_OFDM_RX_TSF) << 8)
+
+static int rtw8822b_mac_init(struct rtw_dev *rtwdev)
+{
+ u32 value32;
+
+ /* protocol configuration */
+ rtw_write8_clr(rtwdev, REG_SW_AMPDU_BURST_MODE_CTRL, BIT_PRE_TX_CMD);
+ rtw_write8(rtwdev, REG_AMPDU_MAX_TIME_V1, WLAN_AMPDU_MAX_TIME);
+ rtw_write8_set(rtwdev, REG_TX_HANG_CTRL, BIT_EN_EOF_V1);
+ value32 = WLAN_RTS_LEN_TH | (WLAN_RTS_TX_TIME_TH << 8) |
+ (WLAN_MAX_AGG_PKT_LIMIT << 16) |
+ (WLAN_RTS_MAX_AGG_PKT_LIMIT << 24);
+ rtw_write32(rtwdev, REG_PROT_MODE_CTRL, value32);
+ rtw_write16(rtwdev, REG_BAR_MODE_CTRL + 2,
+ WLAN_BAR_RETRY_LIMIT | WLAN_RA_TRY_RATE_AGG_LIMIT << 8);
+ rtw_write8(rtwdev, REG_FAST_EDCA_VOVI_SETTING, FAST_EDCA_VO_TH);
+ rtw_write8(rtwdev, REG_FAST_EDCA_VOVI_SETTING + 2, FAST_EDCA_VI_TH);
+ rtw_write8(rtwdev, REG_FAST_EDCA_BEBK_SETTING, FAST_EDCA_BE_TH);
+ rtw_write8(rtwdev, REG_FAST_EDCA_BEBK_SETTING + 2, FAST_EDCA_BK_TH);
+ /* EDCA configuration */
+ rtw_write8_clr(rtwdev, REG_TIMER0_SRC_SEL, BIT_TSFT_SEL_TIMER0);
+ rtw_write16(rtwdev, REG_TXPAUSE, 0x0000);
+ rtw_write8(rtwdev, REG_SLOT, WLAN_SLOT_TIME);
+ rtw_write8(rtwdev, REG_PIFS, WLAN_PIFS_TIME);
+ rtw_write32(rtwdev, REG_SIFS, WLAN_SIFS_CFG);
+ rtw_write16(rtwdev, REG_EDCA_VO_PARAM + 2, WLAN_VO_TXOP_LIMIT);
+ rtw_write16(rtwdev, REG_EDCA_VI_PARAM + 2, WLAN_VI_TXOP_LIMIT);
+ rtw_write32(rtwdev, REG_RD_NAV_NXT, WLAN_NAV_CFG);
+ rtw_write16(rtwdev, REG_RXTSF_OFFSET_CCK, WLAN_RX_TSF_CFG);
+ /* Set beacon cotnrol - enable TSF and other related functions */
+ rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION);
+ /* Set send beacon related registers */
+ rtw_write32(rtwdev, REG_TBTT_PROHIBIT, WLAN_TBTT_TIME);
+ rtw_write8(rtwdev, REG_DRVERLYINT, WLAN_DRV_EARLY_INT);
+ rtw_write8(rtwdev, REG_BCNDMATIM, WLAN_BCN_DMA_TIME);
+ rtw_write8_clr(rtwdev, REG_TX_PTCL_CTRL + 1, BIT_SIFS_BK_EN >> 8);
+ /* WMAC configuration */
+ rtw_write32(rtwdev, REG_RXFLTMAP0, WLAN_RX_FILTER0);
+ rtw_write16(rtwdev, REG_RXFLTMAP2, WLAN_RX_FILTER2);
+ rtw_write32(rtwdev, REG_RCR, WLAN_RCR_CFG);
+ rtw_write8(rtwdev, REG_RX_PKT_LIMIT, WLAN_RXPKT_MAX_SZ_512);
+ rtw_write8(rtwdev, REG_TCR + 2, WLAN_TX_FUNC_CFG2);
+ rtw_write8(rtwdev, REG_TCR + 1, WLAN_TX_FUNC_CFG1);
+ rtw_write32(rtwdev, REG_WMAC_OPTION_FUNCTION + 8, WLAN_MAC_OPT_FUNC2);
+ rtw_write8(rtwdev, REG_WMAC_OPTION_FUNCTION + 4, WLAN_MAC_OPT_NORM_FUNC1);
+
+ return 0;
+}
+
+static void rtw8822b_set_channel_rfe_efem(struct rtw_dev *rtwdev, u8 channel)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ bool is_channel_2g = (channel <= 14) ? true : false;
+
+ if (is_channel_2g) {
+ rtw_write32s_mask(rtwdev, REG_RFESEL0, 0xffffff, 0x705770);
+ rtw_write32s_mask(rtwdev, REG_RFESEL8, MASKBYTE1, 0x57);
+ rtw_write32s_mask(rtwdev, REG_RFECTL, BIT(4), 0);
+ } else {
+ rtw_write32s_mask(rtwdev, REG_RFESEL0, 0xffffff, 0x177517);
+ rtw_write32s_mask(rtwdev, REG_RFESEL8, MASKBYTE1, 0x75);
+ rtw_write32s_mask(rtwdev, REG_RFECTL, BIT(5), 0);
+ }
+
+ rtw_write32s_mask(rtwdev, REG_RFEINV, BIT(11) | BIT(10) | 0x3f, 0x0);
+
+ if (hal->antenna_rx == BB_PATH_AB ||
+ hal->antenna_tx == BB_PATH_AB) {
+ /* 2TX or 2RX */
+ rtw_write32s_mask(rtwdev, REG_TRSW, MASKLWORD, 0xa501);
+ } else if (hal->antenna_rx == hal->antenna_tx) {
+ /* TXA+RXA or TXB+RXB */
+ rtw_write32s_mask(rtwdev, REG_TRSW, MASKLWORD, 0xa500);
+ } else {
+ /* TXB+RXA or TXA+RXB */
+ rtw_write32s_mask(rtwdev, REG_TRSW, MASKLWORD, 0xa005);
+ }
+}
+
+static void rtw8822b_set_channel_rfe_ifem(struct rtw_dev *rtwdev, u8 channel)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ bool is_channel_2g = (channel <= 14) ? true : false;
+
+ if (is_channel_2g) {
+ /* signal source */
+ rtw_write32s_mask(rtwdev, REG_RFESEL0, 0xffffff, 0x745774);
+ rtw_write32s_mask(rtwdev, REG_RFESEL8, MASKBYTE1, 0x57);
+ } else {
+ /* signal source */
+ rtw_write32s_mask(rtwdev, REG_RFESEL0, 0xffffff, 0x477547);
+ rtw_write32s_mask(rtwdev, REG_RFESEL8, MASKBYTE1, 0x75);
+ }
+
+ rtw_write32s_mask(rtwdev, REG_RFEINV, BIT(11) | BIT(10) | 0x3f, 0x0);
+
+ if (is_channel_2g) {
+ if (hal->antenna_rx == BB_PATH_AB ||
+ hal->antenna_tx == BB_PATH_AB) {
+ /* 2TX or 2RX */
+ rtw_write32s_mask(rtwdev, REG_TRSW, MASKLWORD, 0xa501);
+ } else if (hal->antenna_rx == hal->antenna_tx) {
+ /* TXA+RXA or TXB+RXB */
+ rtw_write32s_mask(rtwdev, REG_TRSW, MASKLWORD, 0xa500);
+ } else {
+ /* TXB+RXA or TXA+RXB */
+ rtw_write32s_mask(rtwdev, REG_TRSW, MASKLWORD, 0xa005);
+ }
+ } else {
+ rtw_write32s_mask(rtwdev, REG_TRSW, MASKLWORD, 0xa5a5);
+ }
+}
+
+enum {
+ CCUT_IDX_1R_2G,
+ CCUT_IDX_2R_2G,
+ CCUT_IDX_1R_5G,
+ CCUT_IDX_2R_5G,
+ CCUT_IDX_NR,
+};
+
+struct cca_ccut {
+ u32 reg82c[CCUT_IDX_NR];
+ u32 reg830[CCUT_IDX_NR];
+ u32 reg838[CCUT_IDX_NR];
+};
+
+static const struct cca_ccut cca_ifem_ccut = {
+ {0x75C97010, 0x75C97010, 0x75C97010, 0x75C97010}, /*Reg82C*/
+ {0x79a0eaaa, 0x79A0EAAC, 0x79a0eaaa, 0x79a0eaaa}, /*Reg830*/
+ {0x87765541, 0x87746341, 0x87765541, 0x87746341}, /*Reg838*/
+};
+
+static const struct cca_ccut cca_efem_ccut = {
+ {0x75B86010, 0x75B76010, 0x75B86010, 0x75B76010}, /*Reg82C*/
+ {0x79A0EAA8, 0x79A0EAAC, 0x79A0EAA8, 0x79a0eaaa}, /*Reg830*/
+ {0x87766451, 0x87766431, 0x87766451, 0x87766431}, /*Reg838*/
+};
+
+static const struct cca_ccut cca_ifem_ccut_ext = {
+ {0x75da8010, 0x75da8010, 0x75da8010, 0x75da8010}, /*Reg82C*/
+ {0x79a0eaaa, 0x97A0EAAC, 0x79a0eaaa, 0x79a0eaaa}, /*Reg830*/
+ {0x87765541, 0x86666341, 0x87765561, 0x86666361}, /*Reg838*/
+};
+
+static void rtw8822b_get_cca_val(const struct cca_ccut *cca_ccut, u8 col,
+ u32 *reg82c, u32 *reg830, u32 *reg838)
+{
+ *reg82c = cca_ccut->reg82c[col];
+ *reg830 = cca_ccut->reg830[col];
+ *reg838 = cca_ccut->reg838[col];
+}
+
+struct rtw8822b_rfe_info {
+ const struct cca_ccut *cca_ccut_2g;
+ const struct cca_ccut *cca_ccut_5g;
+ enum rtw_rfe_fem fem;
+ bool ifem_ext;
+ void (*rtw_set_channel_rfe)(struct rtw_dev *rtwdev, u8 channel);
+};
+
+#define I2GE5G_CCUT(set_ch) { \
+ .cca_ccut_2g = &cca_ifem_ccut, \
+ .cca_ccut_5g = &cca_efem_ccut, \
+ .fem = RTW_RFE_IFEM2G_EFEM5G, \
+ .ifem_ext = false, \
+ .rtw_set_channel_rfe = &rtw8822b_set_channel_rfe_ ## set_ch, \
+ }
+#define IFEM_EXT_CCUT(set_ch) { \
+ .cca_ccut_2g = &cca_ifem_ccut_ext, \
+ .cca_ccut_5g = &cca_ifem_ccut_ext, \
+ .fem = RTW_RFE_IFEM, \
+ .ifem_ext = true, \
+ .rtw_set_channel_rfe = &rtw8822b_set_channel_rfe_ ## set_ch, \
+ }
+
+static const struct rtw8822b_rfe_info rtw8822b_rfe_info[] = {
+ [2] = I2GE5G_CCUT(efem),
+ [5] = IFEM_EXT_CCUT(ifem),
+};
+
+static void rtw8822b_set_channel_cca(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ const struct rtw8822b_rfe_info *rfe_info)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ const struct cca_ccut *cca_ccut;
+ u8 col;
+ u32 reg82c, reg830, reg838;
+ bool is_efem_cca = false, is_ifem_cca = false, is_rfe_type = false;
+
+ if (channel <= 14) {
+ cca_ccut = rfe_info->cca_ccut_2g;
+
+ if (hal->antenna_rx == BB_PATH_A ||
+ hal->antenna_rx == BB_PATH_B)
+ col = CCUT_IDX_1R_2G;
+ else
+ col = CCUT_IDX_2R_2G;
+ } else {
+ cca_ccut = rfe_info->cca_ccut_5g;
+
+ if (hal->antenna_rx == BB_PATH_A ||
+ hal->antenna_rx == BB_PATH_B)
+ col = CCUT_IDX_1R_5G;
+ else
+ col = CCUT_IDX_2R_5G;
+ }
+
+ rtw8822b_get_cca_val(cca_ccut, col, &reg82c, &reg830, &reg838);
+
+ switch (rfe_info->fem) {
+ case RTW_RFE_IFEM:
+ default:
+ is_ifem_cca = true;
+ if (rfe_info->ifem_ext)
+ is_rfe_type = true;
+ break;
+ case RTW_RFE_EFEM:
+ is_efem_cca = true;
+ break;
+ case RTW_RFE_IFEM2G_EFEM5G:
+ if (channel <= 14)
+ is_ifem_cca = true;
+ else
+ is_efem_cca = true;
+ break;
+ }
+
+ if (is_ifem_cca) {
+ if ((hal->cut_version == RTW_CHIP_VER_CUT_B &&
+ (col == CCUT_IDX_2R_2G || col == CCUT_IDX_2R_5G) &&
+ bw == RTW_CHANNEL_WIDTH_40) ||
+ (!is_rfe_type && col == CCUT_IDX_2R_5G &&
+ bw == RTW_CHANNEL_WIDTH_40) ||
+ (efuse->rfe_option == 5 && col == CCUT_IDX_2R_5G))
+ reg830 = 0x79a0ea28;
+ }
+
+ rtw_write32_mask(rtwdev, REG_CCASEL, MASKDWORD, reg82c);
+ rtw_write32_mask(rtwdev, REG_PDMFTH, MASKDWORD, reg830);
+ rtw_write32_mask(rtwdev, REG_CCA2ND, MASKDWORD, reg838);
+
+ if (is_efem_cca && !(hal->cut_version == RTW_CHIP_VER_CUT_B))
+ rtw_write32_mask(rtwdev, REG_L1WT, MASKDWORD, 0x9194b2b9);
+
+ if (bw == RTW_CHANNEL_WIDTH_20 &&
+ ((channel >= 52 && channel <= 64) ||
+ (channel >= 100 && channel <= 144)))
+ rtw_write32_mask(rtwdev, REG_CCA2ND, 0xf0, 0x4);
+}
+
+static const u8 low_band[15] = {0x7, 0x6, 0x6, 0x5, 0x0, 0x0, 0x7, 0xff, 0x6,
+ 0x5, 0x0, 0x0, 0x7, 0x6, 0x6};
+static const u8 middle_band[23] = {0x6, 0x5, 0x0, 0x0, 0x7, 0x6, 0x6, 0xff, 0x0,
+ 0x0, 0x7, 0x6, 0x6, 0x5, 0x0, 0xff, 0x7, 0x6,
+ 0x6, 0x5, 0x0, 0x0, 0x7};
+static const u8 high_band[15] = {0x5, 0x5, 0x0, 0x7, 0x7, 0x6, 0x5, 0xff, 0x0,
+ 0x7, 0x7, 0x6, 0x5, 0x5, 0x0};
+
+static void rtw8822b_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
+{
+#define RF18_BAND_MASK (BIT(16) | BIT(9) | BIT(8))
+#define RF18_BAND_2G (0)
+#define RF18_BAND_5G (BIT(16) | BIT(8))
+#define RF18_CHANNEL_MASK (MASKBYTE0)
+#define RF18_RFSI_MASK (BIT(18) | BIT(17))
+#define RF18_RFSI_GE_CH80 (BIT(17))
+#define RF18_RFSI_GT_CH144 (BIT(18))
+#define RF18_BW_MASK (BIT(11) | BIT(10))
+#define RF18_BW_20M (BIT(11) | BIT(10))
+#define RF18_BW_40M (BIT(11))
+#define RF18_BW_80M (BIT(10))
+#define RFBE_MASK (BIT(17) | BIT(16) | BIT(15))
+
+ struct rtw_hal *hal = &rtwdev->hal;
+ u32 rf_reg18, rf_reg_be;
+
+ rf_reg18 = rtw_read_rf(rtwdev, RF_PATH_A, 0x18, RFREG_MASK);
+
+ rf_reg18 &= ~(RF18_BAND_MASK | RF18_CHANNEL_MASK | RF18_RFSI_MASK |
+ RF18_BW_MASK);
+
+ rf_reg18 |= (channel <= 14 ? RF18_BAND_2G : RF18_BAND_5G);
+ rf_reg18 |= (channel & RF18_CHANNEL_MASK);
+ if (channel > 144)
+ rf_reg18 |= RF18_RFSI_GT_CH144;
+ else if (channel >= 80)
+ rf_reg18 |= RF18_RFSI_GE_CH80;
+
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_5:
+ case RTW_CHANNEL_WIDTH_10:
+ case RTW_CHANNEL_WIDTH_20:
+ default:
+ rf_reg18 |= RF18_BW_20M;
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ rf_reg18 |= RF18_BW_40M;
+ break;
+ case RTW_CHANNEL_WIDTH_80:
+ rf_reg18 |= RF18_BW_80M;
+ break;
+ }
+
+ if (channel <= 14)
+ rf_reg_be = 0x0;
+ else if (channel >= 36 && channel <= 64)
+ rf_reg_be = low_band[(channel - 36) >> 1];
+ else if (channel >= 100 && channel <= 144)
+ rf_reg_be = middle_band[(channel - 100) >> 1];
+ else if (channel >= 149 && channel <= 177)
+ rf_reg_be = high_band[(channel - 149) >> 1];
+ else
+ goto err;
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_MALSEL, RFBE_MASK, rf_reg_be);
+
+ /* need to set 0xdf[18]=1 before writing RF18 when channel 144 */
+ if (channel == 144)
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, BIT(18), 0x1);
+ else
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, BIT(18), 0x0);
+
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x18, RFREG_MASK, rf_reg18);
+ if (hal->rf_type > RF_1T1R)
+ rtw_write_rf(rtwdev, RF_PATH_B, 0x18, RFREG_MASK, rf_reg18);
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_XTALX2, BIT(19), 0);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_XTALX2, BIT(19), 1);
+
+ return;
+
+err:
+ WARN_ON(1);
+}
+
+static void rtw8822b_toggle_igi(struct rtw_dev *rtwdev)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ u32 igi;
+
+ igi = rtw_read32_mask(rtwdev, REG_RXIGI_A, 0x7f);
+ rtw_write32_mask(rtwdev, REG_RXIGI_A, 0x7f, igi - 2);
+ rtw_write32_mask(rtwdev, REG_RXIGI_A, 0x7f, igi);
+ rtw_write32_mask(rtwdev, REG_RXIGI_B, 0x7f, igi - 2);
+ rtw_write32_mask(rtwdev, REG_RXIGI_B, 0x7f, igi);
+
+ rtw_write32_mask(rtwdev, REG_RXPSEL, MASKBYTE0, 0x0);
+ rtw_write32_mask(rtwdev, REG_RXPSEL, MASKBYTE0,
+ hal->antenna_rx | (hal->antenna_rx << 4));
+}
+
+static void rtw8822b_set_channel_rxdfir(struct rtw_dev *rtwdev, u8 bw)
+{
+ if (bw == RTW_CHANNEL_WIDTH_40) {
+ /* RX DFIR for BW40 */
+ rtw_write32_mask(rtwdev, REG_ACBB0, BIT(29) | BIT(28), 0x1);
+ rtw_write32_mask(rtwdev, REG_ACBBRXFIR, BIT(29) | BIT(28), 0x0);
+ rtw_write32s_mask(rtwdev, REG_TXDFIR, BIT(31), 0x0);
+ } else if (bw == RTW_CHANNEL_WIDTH_80) {
+ /* RX DFIR for BW80 */
+ rtw_write32_mask(rtwdev, REG_ACBB0, BIT(29) | BIT(28), 0x2);
+ rtw_write32_mask(rtwdev, REG_ACBBRXFIR, BIT(29) | BIT(28), 0x1);
+ rtw_write32s_mask(rtwdev, REG_TXDFIR, BIT(31), 0x0);
+ } else {
+ /* RX DFIR for BW20, BW10 and BW5*/
+ rtw_write32_mask(rtwdev, REG_ACBB0, BIT(29) | BIT(28), 0x2);
+ rtw_write32_mask(rtwdev, REG_ACBBRXFIR, BIT(29) | BIT(28), 0x2);
+ rtw_write32s_mask(rtwdev, REG_TXDFIR, BIT(31), 0x1);
+ }
+}
+
+static void rtw8822b_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ u8 primary_ch_idx)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u8 rfe_option = efuse->rfe_option;
+ u32 val32;
+
+ if (channel <= 14) {
+ rtw_write32_mask(rtwdev, REG_RXPSEL, BIT(28), 0x1);
+ rtw_write32_mask(rtwdev, REG_CCK_CHECK, BIT(7), 0x0);
+ rtw_write32_mask(rtwdev, REG_ENTXCCK, BIT(18), 0x0);
+ rtw_write32_mask(rtwdev, REG_RXCCAMSK, 0x0000FC00, 15);
+
+ rtw_write32_mask(rtwdev, REG_ACGG2TBL, 0x1f, 0x0);
+ rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x96a);
+ if (channel == 14) {
+ rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD, 0x00006577);
+ rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD, 0x0000);
+ } else {
+ rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD, 0x384f6577);
+ rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD, 0x1525);
+ }
+
+ rtw_write32_mask(rtwdev, REG_RFEINV, 0x300, 0x2);
+ } else if (channel > 35) {
+ rtw_write32_mask(rtwdev, REG_ENTXCCK, BIT(18), 0x1);
+ rtw_write32_mask(rtwdev, REG_CCK_CHECK, BIT(7), 0x1);
+ rtw_write32_mask(rtwdev, REG_RXPSEL, BIT(28), 0x0);
+ rtw_write32_mask(rtwdev, REG_RXCCAMSK, 0x0000FC00, 34);
+
+ if (channel >= 36 && channel <= 64)
+ rtw_write32_mask(rtwdev, REG_ACGG2TBL, 0x1f, 0x1);
+ else if (channel >= 100 && channel <= 144)
+ rtw_write32_mask(rtwdev, REG_ACGG2TBL, 0x1f, 0x2);
+ else if (channel >= 149)
+ rtw_write32_mask(rtwdev, REG_ACGG2TBL, 0x1f, 0x3);
+
+ if (channel >= 36 && channel <= 48)
+ rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x494);
+ else if (channel >= 52 && channel <= 64)
+ rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x453);
+ else if (channel >= 100 && channel <= 116)
+ rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x452);
+ else if (channel >= 118 && channel <= 177)
+ rtw_write32_mask(rtwdev, REG_CLKTRK, 0x1ffe0000, 0x412);
+
+ rtw_write32_mask(rtwdev, 0xcbc, 0x300, 0x1);
+ }
+
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_20:
+ default:
+ val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD);
+ val32 &= 0xFFCFFC00;
+ val32 |= (RTW_CHANNEL_WIDTH_20);
+ rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32);
+
+ rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x1);
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ if (primary_ch_idx == 1)
+ rtw_write32_set(rtwdev, REG_RXSB, BIT(4));
+ else
+ rtw_write32_clr(rtwdev, REG_RXSB, BIT(4));
+
+ val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD);
+ val32 &= 0xFF3FF300;
+ val32 |= (((primary_ch_idx & 0xf) << 2) | RTW_CHANNEL_WIDTH_40);
+ rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32);
+
+ rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x1);
+ break;
+ case RTW_CHANNEL_WIDTH_80:
+ val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD);
+ val32 &= 0xFCEFCF00;
+ val32 |= (((primary_ch_idx & 0xf) << 2) | RTW_CHANNEL_WIDTH_80);
+ rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32);
+
+ rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x1);
+
+ if (rfe_option == 2) {
+ rtw_write32_mask(rtwdev, REG_L1PKWT, 0x0000f000, 0x6);
+ rtw_write32_mask(rtwdev, REG_ADC40, BIT(10), 0x1);
+ }
+ break;
+ case RTW_CHANNEL_WIDTH_5:
+ val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD);
+ val32 &= 0xEFEEFE00;
+ val32 |= ((BIT(6) | RTW_CHANNEL_WIDTH_20));
+ rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32);
+
+ rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x0);
+ rtw_write32_mask(rtwdev, REG_ADC40, BIT(31), 0x1);
+ break;
+ case RTW_CHANNEL_WIDTH_10:
+ val32 = rtw_read32_mask(rtwdev, REG_ADCCLK, MASKDWORD);
+ val32 &= 0xEFFEFF00;
+ val32 |= ((BIT(7) | RTW_CHANNEL_WIDTH_20));
+ rtw_write32_mask(rtwdev, REG_ADCCLK, MASKDWORD, val32);
+
+ rtw_write32_mask(rtwdev, REG_ADC160, BIT(30), 0x0);
+ rtw_write32_mask(rtwdev, REG_ADC40, BIT(31), 0x1);
+ break;
+ }
+}
+
+static void rtw8822b_set_channel(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ u8 primary_chan_idx)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ const struct rtw8822b_rfe_info *rfe_info;
+
+ if (WARN(efuse->rfe_option >= ARRAY_SIZE(rtw8822b_rfe_info),
+ "rfe_option %d is out of boundary\n", efuse->rfe_option))
+ return;
+
+ rfe_info = &rtw8822b_rfe_info[efuse->rfe_option];
+
+ rtw8822b_set_channel_bb(rtwdev, channel, bw, primary_chan_idx);
+ rtw_set_channel_mac(rtwdev, channel, bw, primary_chan_idx);
+ rtw8822b_set_channel_rf(rtwdev, channel, bw);
+ rtw8822b_set_channel_rxdfir(rtwdev, bw);
+ rtw8822b_toggle_igi(rtwdev);
+ rtw8822b_set_channel_cca(rtwdev, channel, bw, rfe_info);
+ (*rfe_info->rtw_set_channel_rfe)(rtwdev, channel);
+}
+
+static void rtw8822b_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
+ u8 rx_path, bool is_tx2_path)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ const struct rtw8822b_rfe_info *rfe_info;
+ u8 ch = rtwdev->hal.current_channel;
+ u8 tx_path_sel, rx_path_sel;
+ int counter;
+
+ if (WARN(efuse->rfe_option >= ARRAY_SIZE(rtw8822b_rfe_info),
+ "rfe_option %d is out of boundary\n", efuse->rfe_option))
+ return;
+
+ rfe_info = &rtw8822b_rfe_info[efuse->rfe_option];
+
+ if ((tx_path | rx_path) & BB_PATH_A)
+ rtw_write32_mask(rtwdev, REG_AGCTR_A, MASKLWORD, 0x3231);
+ else
+ rtw_write32_mask(rtwdev, REG_AGCTR_A, MASKLWORD, 0x1111);
+
+ if ((tx_path | rx_path) & BB_PATH_B)
+ rtw_write32_mask(rtwdev, REG_AGCTR_B, MASKLWORD, 0x3231);
+ else
+ rtw_write32_mask(rtwdev, REG_AGCTR_B, MASKLWORD, 0x1111);
+
+ rtw_write32_mask(rtwdev, REG_CDDTXP, (BIT(19) | BIT(18)), 0x3);
+ rtw_write32_mask(rtwdev, REG_TXPSEL, (BIT(29) | BIT(28)), 0x1);
+ rtw_write32_mask(rtwdev, REG_TXPSEL, BIT(30), 0x1);
+
+ if (tx_path & BB_PATH_A) {
+ rtw_write32_mask(rtwdev, REG_CDDTXP, 0xfff00000, 0x001);
+ rtw_write32_mask(rtwdev, REG_ADCINI, 0xf0000000, 0x8);
+ } else if (tx_path & BB_PATH_B) {
+ rtw_write32_mask(rtwdev, REG_CDDTXP, 0xfff00000, 0x002);
+ rtw_write32_mask(rtwdev, REG_ADCINI, 0xf0000000, 0x4);
+ }
+
+ if (tx_path == BB_PATH_A || tx_path == BB_PATH_B)
+ rtw_write32_mask(rtwdev, REG_TXPSEL1, 0xfff0, 0x01);
+ else
+ rtw_write32_mask(rtwdev, REG_TXPSEL1, 0xfff0, 0x43);
+
+ tx_path_sel = (tx_path << 4) | tx_path;
+ rtw_write32_mask(rtwdev, REG_TXPSEL, MASKBYTE0, tx_path_sel);
+
+ if (tx_path != BB_PATH_A && tx_path != BB_PATH_B) {
+ if (is_tx2_path || rtwdev->mp_mode) {
+ rtw_write32_mask(rtwdev, REG_CDDTXP, 0xfff00000, 0x043);
+ rtw_write32_mask(rtwdev, REG_ADCINI, 0xf0000000, 0xc);
+ }
+ }
+
+ rtw_write32_mask(rtwdev, REG_RXDESC, BIT(22), 0x0);
+ rtw_write32_mask(rtwdev, REG_RXDESC, BIT(18), 0x0);
+
+ if (rx_path & BB_PATH_A)
+ rtw_write32_mask(rtwdev, REG_ADCINI, 0x0f000000, 0x0);
+ else if (rx_path & BB_PATH_B)
+ rtw_write32_mask(rtwdev, REG_ADCINI, 0x0f000000, 0x5);
+
+ rx_path_sel = (rx_path << 4) | rx_path;
+ rtw_write32_mask(rtwdev, REG_RXPSEL, MASKBYTE0, rx_path_sel);
+
+ if (rx_path == BB_PATH_A || rx_path == BB_PATH_B) {
+ rtw_write32_mask(rtwdev, REG_ANTWT, BIT(16), 0x0);
+ rtw_write32_mask(rtwdev, REG_HTSTFWT, BIT(28), 0x0);
+ rtw_write32_mask(rtwdev, REG_MRC, BIT(23), 0x0);
+ } else {
+ rtw_write32_mask(rtwdev, REG_ANTWT, BIT(16), 0x1);
+ rtw_write32_mask(rtwdev, REG_HTSTFWT, BIT(28), 0x1);
+ rtw_write32_mask(rtwdev, REG_MRC, BIT(23), 0x1);
+ }
+
+ for (counter = 100; counter > 0; counter--) {
+ u32 rf_reg33;
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, RFREG_MASK, 0x80000);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x00001);
+
+ udelay(2);
+ rf_reg33 = rtw_read_rf(rtwdev, RF_PATH_A, 0x33, RFREG_MASK);
+
+ if (rf_reg33 == 0x00001)
+ break;
+ }
+
+ if (WARN(counter <= 0, "write RF mode table fail\n"))
+ return;
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, RFREG_MASK, 0x80000);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, RFREG_MASK, 0x00001);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD1, RFREG_MASK, 0x00034);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, RFREG_MASK, 0x4080c);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, RFREG_MASK, 0x00000);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, RFREG_MASK, 0x00000);
+
+ rtw8822b_toggle_igi(rtwdev);
+ rtw8822b_set_channel_cca(rtwdev, 1, RTW_CHANNEL_WIDTH_20, rfe_info);
+ (*rfe_info->rtw_set_channel_rfe)(rtwdev, ch);
+}
+
+static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ s8 min_rx_power = -120;
+ u8 pwdb = GET_PHY_STAT_P0_PWDB(phy_status);
+
+ pkt_stat->rx_power[RF_PATH_A] = pwdb - 110;
+ pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
+ pkt_stat->bw = RTW_CHANNEL_WIDTH_20;
+ pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A],
+ min_rx_power);
+}
+
+static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ u8 rxsc, bw;
+ s8 min_rx_power = -120;
+
+ if (pkt_stat->rate > DESC_RATE11M && pkt_stat->rate < DESC_RATEMCS0)
+ rxsc = GET_PHY_STAT_P1_L_RXSC(phy_status);
+ else
+ rxsc = GET_PHY_STAT_P1_HT_RXSC(phy_status);
+
+ if (rxsc >= 1 && rxsc <= 8)
+ bw = RTW_CHANNEL_WIDTH_20;
+ else if (rxsc >= 9 && rxsc <= 12)
+ bw = RTW_CHANNEL_WIDTH_40;
+ else if (rxsc >= 13)
+ bw = RTW_CHANNEL_WIDTH_80;
+ else
+ bw = GET_PHY_STAT_P1_RF_MODE(phy_status);
+
+ pkt_stat->rx_power[RF_PATH_A] = GET_PHY_STAT_P1_PWDB_A(phy_status) - 110;
+ pkt_stat->rx_power[RF_PATH_B] = GET_PHY_STAT_P1_PWDB_B(phy_status) - 110;
+ pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 2);
+ pkt_stat->bw = bw;
+ pkt_stat->signal_power = max3(pkt_stat->rx_power[RF_PATH_A],
+ pkt_stat->rx_power[RF_PATH_B],
+ min_rx_power);
+}
+
+static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ u8 page;
+
+ page = *phy_status & 0xf;
+
+ switch (page) {
+ case 0:
+ query_phy_status_page0(rtwdev, phy_status, pkt_stat);
+ break;
+ case 1:
+ query_phy_status_page1(rtwdev, phy_status, pkt_stat);
+ break;
+ default:
+ rtw_warn(rtwdev, "unused phy status page (%d)\n", page);
+ return;
+ }
+}
+
+static void rtw8822b_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc,
+ struct rtw_rx_pkt_stat *pkt_stat,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_hdr *hdr;
+ u32 desc_sz = rtwdev->chip->rx_pkt_desc_sz;
+ u8 *phy_status = NULL;
+
+ memset(pkt_stat, 0, sizeof(*pkt_stat));
+
+ pkt_stat->phy_status = GET_RX_DESC_PHYST(rx_desc);
+ pkt_stat->icv_err = GET_RX_DESC_ICV_ERR(rx_desc);
+ pkt_stat->crc_err = GET_RX_DESC_CRC32(rx_desc);
+ pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc);
+ pkt_stat->is_c2h = GET_RX_DESC_C2H(rx_desc);
+ pkt_stat->pkt_len = GET_RX_DESC_PKT_LEN(rx_desc);
+ pkt_stat->drv_info_sz = GET_RX_DESC_DRV_INFO_SIZE(rx_desc);
+ pkt_stat->shift = GET_RX_DESC_SHIFT(rx_desc);
+ pkt_stat->rate = GET_RX_DESC_RX_RATE(rx_desc);
+ pkt_stat->cam_id = GET_RX_DESC_MACID(rx_desc);
+ pkt_stat->ppdu_cnt = GET_RX_DESC_PPDU_CNT(rx_desc);
+ pkt_stat->tsf_low = GET_RX_DESC_TSFL(rx_desc);
+
+ /* drv_info_sz is in unit of 8-bytes */
+ pkt_stat->drv_info_sz *= 8;
+
+ /* c2h cmd pkt's rx/phy status is not interested */
+ if (pkt_stat->is_c2h)
+ return;
+
+ hdr = (struct ieee80211_hdr *)(rx_desc + desc_sz + pkt_stat->shift +
+ pkt_stat->drv_info_sz);
+ if (pkt_stat->phy_status) {
+ phy_status = rx_desc + desc_sz + pkt_stat->shift;
+ query_phy_status(rtwdev, phy_status, pkt_stat);
+ }
+
+ rtw_rx_fill_rx_status(rtwdev, pkt_stat, hdr, rx_status, phy_status);
+}
+
+static void
+rtw8822b_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ static const u32 offset_txagc[2] = {0x1d00, 0x1d80};
+ static u32 phy_pwr_idx;
+ u8 rate, rate_idx, pwr_index, shift;
+ int j;
+
+ for (j = 0; j < rtw_rate_size[rs]; j++) {
+ rate = rtw_rate_section[rs][j];
+ pwr_index = hal->tx_pwr_tbl[path][rate];
+ shift = rate & 0x3;
+ phy_pwr_idx |= ((u32)pwr_index << (shift * 8));
+ if (shift == 0x3) {
+ rate_idx = rate & 0xfc;
+ rtw_write32(rtwdev, offset_txagc[path] + rate_idx,
+ phy_pwr_idx);
+ phy_pwr_idx = 0;
+ }
+ }
+}
+
+static void rtw8822b_set_tx_power_index(struct rtw_dev *rtwdev)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ int rs, path;
+
+ for (path = 0; path < hal->rf_path_num; path++) {
+ for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
+ rtw8822b_set_tx_power_index_by_rate(rtwdev, path, rs);
+ }
+}
+
+static bool rtw8822b_check_rf_path(u8 antenna)
+{
+ switch (antenna) {
+ case BB_PATH_A:
+ case BB_PATH_B:
+ case BB_PATH_AB:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void rtw8822b_set_antenna(struct rtw_dev *rtwdev, u8 antenna_tx,
+ u8 antenna_rx)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+
+ rtw_dbg(rtwdev, RTW_DBG_PHY, "config RF path, tx=0x%x rx=0x%x\n",
+ antenna_tx, antenna_rx);
+
+ if (!rtw8822b_check_rf_path(antenna_tx)) {
+ rtw_info(rtwdev, "unsupport tx path, set to default path ab\n");
+ antenna_tx = BB_PATH_AB;
+ }
+ if (!rtw8822b_check_rf_path(antenna_rx)) {
+ rtw_info(rtwdev, "unsupport rx path, set to default path ab\n");
+ antenna_rx = BB_PATH_AB;
+ }
+ hal->antenna_tx = antenna_tx;
+ hal->antenna_rx = antenna_rx;
+ rtw8822b_config_trx_mode(rtwdev, antenna_tx, antenna_rx, false);
+}
+
+static void rtw8822b_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 ldo_pwr;
+
+ ldo_pwr = rtw_read8(rtwdev, REG_LDO_EFUSE_CTRL + 3);
+ ldo_pwr = enable ? ldo_pwr | BIT(7) : ldo_pwr & ~BIT(7);
+ rtw_write8(rtwdev, REG_LDO_EFUSE_CTRL + 3, ldo_pwr);
+}
+
+static void rtw8822b_false_alarm_statistics(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u32 cck_enable;
+ u32 cck_fa_cnt;
+ u32 ofdm_fa_cnt;
+
+ cck_enable = rtw_read32(rtwdev, 0x808) & BIT(28);
+ cck_fa_cnt = rtw_read16(rtwdev, 0xa5c);
+ ofdm_fa_cnt = rtw_read16(rtwdev, 0xf48);
+
+ dm_info->cck_fa_cnt = cck_fa_cnt;
+ dm_info->ofdm_fa_cnt = ofdm_fa_cnt;
+ dm_info->total_fa_cnt = ofdm_fa_cnt;
+ dm_info->total_fa_cnt += cck_enable ? cck_fa_cnt : 0;
+
+ rtw_write32_set(rtwdev, 0x9a4, BIT(17));
+ rtw_write32_clr(rtwdev, 0x9a4, BIT(17));
+ rtw_write32_clr(rtwdev, 0xa2c, BIT(15));
+ rtw_write32_set(rtwdev, 0xa2c, BIT(15));
+ rtw_write32_set(rtwdev, 0xb58, BIT(0));
+ rtw_write32_clr(rtwdev, 0xb58, BIT(0));
+}
+
+static void rtw8822b_do_iqk(struct rtw_dev *rtwdev)
+{
+ static int do_iqk_cnt;
+ struct rtw_iqk_para para = {.clear = 0, .segment_iqk = 0};
+ u32 rf_reg, iqk_fail_mask;
+ int counter;
+ bool reload;
+
+ rtw_fw_do_iqk(rtwdev, &para);
+
+ for (counter = 0; counter < 300; counter++) {
+ rf_reg = rtw_read_rf(rtwdev, RF_PATH_A, RF_DTXLOK, RFREG_MASK);
+ if (rf_reg == 0xabcde)
+ break;
+ msleep(20);
+ }
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_DTXLOK, RFREG_MASK, 0x0);
+
+ reload = !!rtw_read32_mask(rtwdev, REG_IQKFAILMSK, BIT(16));
+ iqk_fail_mask = rtw_read32_mask(rtwdev, REG_IQKFAILMSK, GENMASK(0, 7));
+ rtw_dbg(rtwdev, RTW_DBG_PHY,
+ "iqk counter=%d reload=%d do_iqk_cnt=%d n_iqk_fail(mask)=0x%02x\n",
+ counter, reload, ++do_iqk_cnt, iqk_fail_mask);
+}
+
+static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822b[] = {
+ {0x0086,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0086,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_POLLING, BIT(1), BIT(1)},
+ {0x004A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3) | BIT(4) | BIT(7), 0},
+ {0x0300,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0x0301,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822b[] = {
+ {0x0012,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0012,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0020,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0001,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_DELAY, 1, RTW_PWR_DELAY_MS},
+ {0x0000,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3) | BIT(2)), 0},
+ {0x0075,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(1), BIT(1)},
+ {0x0075,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0xFF1A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3)), 0},
+ {0x10C3,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(0), 0},
+ {0x0020,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), BIT(3)},
+ {0x10A8,
+ RTW_PWR_CUT_C_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0x10A9,
+ RTW_PWR_CUT_C_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0xef},
+ {0x10AA,
+ RTW_PWR_CUT_C_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x0c},
+ {0x0068,
+ RTW_PWR_CUT_C_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), BIT(4)},
+ {0x0029,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0xF9},
+ {0x0024,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), 0},
+ {0x0074,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ {0x00AF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822b[] = {
+ {0x0003,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), 0},
+ {0x0093,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), 0},
+ {0x001F,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0x00EF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0xFF1A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x30},
+ {0x0049,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x10C3,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(1), 0},
+ {0x0020,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), 0},
+ {0x0000,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822b[] = {
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), BIT(7)},
+ {0x0007,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x20},
+ {0x0067,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), BIT(2)},
+ {0x004A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0067,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), 0},
+ {0x0067,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), 0},
+ {0x004F,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0067,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0046,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(6), BIT(6)},
+ {0x0067,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), 0},
+ {0x0046,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), BIT(7)},
+ {0x0062,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), BIT(4)},
+ {0x0081,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7) | BIT(6), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)},
+ {0x0086,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0086,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_POLLING, BIT(1), 0},
+ {0x0090,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0044,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0x0040,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x90},
+ {0x0041,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x00},
+ {0x0042,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x04},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static struct rtw_pwr_seq_cmd *card_enable_flow_8822b[] = {
+ trans_carddis_to_cardemu_8822b,
+ trans_cardemu_to_act_8822b,
+ NULL
+};
+
+static struct rtw_pwr_seq_cmd *card_disable_flow_8822b[] = {
+ trans_act_to_cardemu_8822b,
+ trans_cardemu_to_carddis_8822b,
+ NULL
+};
+
+static struct rtw_intf_phy_para usb2_param_8822b[] = {
+ {0xFFFF, 0x00,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_ALL,
+ RTW_INTF_PHY_PLATFORM_ALL},
+};
+
+static struct rtw_intf_phy_para usb3_param_8822b[] = {
+ {0x0001, 0xA841,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_D,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0xFFFF, 0x0000,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_ALL,
+ RTW_INTF_PHY_PLATFORM_ALL},
+};
+
+static struct rtw_intf_phy_para pcie_gen1_param_8822b[] = {
+ {0x0001, 0xA841,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0002, 0x60C6,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0008, 0x3596,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0009, 0x321C,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x000A, 0x9623,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0020, 0x94FF,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0021, 0xFFCF,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0026, 0xC006,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0029, 0xFF0E,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x002A, 0x1840,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0xFFFF, 0x0000,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_ALL,
+ RTW_INTF_PHY_PLATFORM_ALL},
+};
+
+static struct rtw_intf_phy_para pcie_gen2_param_8822b[] = {
+ {0x0001, 0xA841,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0002, 0x60C6,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0008, 0x3597,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0009, 0x321C,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x000A, 0x9623,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0020, 0x94FF,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0021, 0xFFCF,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0026, 0xC006,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x0029, 0xFF0E,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0x002A, 0x3040,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_C,
+ RTW_INTF_PHY_PLATFORM_ALL},
+ {0xFFFF, 0x0000,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_ALL,
+ RTW_INTF_PHY_PLATFORM_ALL},
+};
+
+static struct rtw_intf_phy_para_table phy_para_table_8822b = {
+ .usb2_para = usb2_param_8822b,
+ .usb3_para = usb3_param_8822b,
+ .gen1_para = pcie_gen1_param_8822b,
+ .gen2_para = pcie_gen2_param_8822b,
+ .n_usb2_para = ARRAY_SIZE(usb2_param_8822b),
+ .n_usb3_para = ARRAY_SIZE(usb2_param_8822b),
+ .n_gen1_para = ARRAY_SIZE(pcie_gen1_param_8822b),
+ .n_gen2_para = ARRAY_SIZE(pcie_gen2_param_8822b),
+};
+
+static const struct rtw_rfe_def rtw8822b_rfe_defs[] = {
+ [2] = RTW_DEF_RFE(8822b, 2, 2),
+ [5] = RTW_DEF_RFE(8822b, 5, 5),
+};
+
+static struct rtw_hw_reg rtw8822b_dig[] = {
+ [0] = { .addr = 0xc50, .mask = 0x7f },
+ [1] = { .addr = 0xe50, .mask = 0x7f },
+};
+
+static struct rtw_page_table page_table_8822b[] = {
+ {64, 64, 64, 64, 1},
+ {64, 64, 64, 64, 1},
+ {64, 64, 0, 0, 1},
+ {64, 64, 64, 0, 1},
+ {64, 64, 64, 64, 1},
+};
+
+static struct rtw_rqpn rqpn_table_8822b[] = {
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_HIGH,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+};
+
+static struct rtw_chip_ops rtw8822b_ops = {
+ .phy_set_param = rtw8822b_phy_set_param,
+ .read_efuse = rtw8822b_read_efuse,
+ .query_rx_desc = rtw8822b_query_rx_desc,
+ .set_channel = rtw8822b_set_channel,
+ .mac_init = rtw8822b_mac_init,
+ .read_rf = rtw_phy_read_rf,
+ .write_rf = rtw_phy_write_rf_reg_sipi,
+ .set_tx_power_index = rtw8822b_set_tx_power_index,
+ .set_antenna = rtw8822b_set_antenna,
+ .cfg_ldo25 = rtw8822b_cfg_ldo25,
+ .false_alarm_statistics = rtw8822b_false_alarm_statistics,
+ .do_iqk = rtw8822b_do_iqk,
+};
+
+struct rtw_chip_info rtw8822b_hw_spec = {
+ .ops = &rtw8822b_ops,
+ .id = RTW_CHIP_TYPE_8822B,
+ .fw_name = "rtw88/rtw8822b_fw.bin",
+ .tx_pkt_desc_sz = 48,
+ .tx_buf_desc_sz = 16,
+ .rx_pkt_desc_sz = 24,
+ .rx_buf_desc_sz = 8,
+ .phy_efuse_size = 1024,
+ .log_efuse_size = 768,
+ .ptct_efuse_size = 96,
+ .txff_size = 262144,
+ .rxff_size = 24576,
+ .txgi_factor = 1,
+ .is_pwr_by_rate_dec = true,
+ .max_power_index = 0x3f,
+ .csi_buf_pg_num = 0,
+ .band = RTW_BAND_2G | RTW_BAND_5G,
+ .page_size = 128,
+ .dig_min = 0x1c,
+ .ht_supported = true,
+ .vht_supported = true,
+ .sys_func_en = 0xDC,
+ .pwr_on_seq = card_enable_flow_8822b,
+ .pwr_off_seq = card_disable_flow_8822b,
+ .page_table = page_table_8822b,
+ .rqpn_table = rqpn_table_8822b,
+ .intf_table = &phy_para_table_8822b,
+ .dig = rtw8822b_dig,
+ .rf_base_addr = {0x2800, 0x2c00},
+ .rf_sipi_addr = {0xc90, 0xe90},
+ .mac_tbl = &rtw8822b_mac_tbl,
+ .agc_tbl = &rtw8822b_agc_tbl,
+ .bb_tbl = &rtw8822b_bb_tbl,
+ .rf_tbl = {&rtw8822b_rf_a_tbl, &rtw8822b_rf_b_tbl},
+ .rfe_defs = rtw8822b_rfe_defs,
+ .rfe_defs_size = ARRAY_SIZE(rtw8822b_rfe_defs),
+};
+EXPORT_SYMBOL(rtw8822b_hw_spec);
+
+MODULE_FIRMWARE("rtw88/rtw8822b_fw.bin");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.h b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
new file mode 100644
index 000000000000..0cb93d7d4cfd
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW8822B_H__
+#define __RTW8822B_H__
+
+#include <asm/byteorder.h>
+
+#define RCR_VHT_ACK BIT(26)
+
+struct rtw8822bu_efuse {
+ u8 res4[4]; /* 0xd0 */
+ u8 usb_optional_function;
+ u8 res5[0x1e];
+ u8 res6[2];
+ u8 serial[0x0b]; /* 0xf5 */
+ u8 vid; /* 0x100 */
+ u8 res7;
+ u8 pid;
+ u8 res8[4];
+ u8 mac_addr[ETH_ALEN]; /* 0x107 */
+ u8 res9[2];
+ u8 vendor_name[0x07];
+ u8 res10[2];
+ u8 device_name[0x14];
+ u8 res11[0xcf];
+ u8 package_type; /* 0x1fb */
+ u8 res12[0x4];
+};
+
+struct rtw8822be_efuse {
+ u8 mac_addr[ETH_ALEN]; /* 0xd0 */
+ u8 vender_id[2];
+ u8 device_id[2];
+ u8 sub_vender_id[2];
+ u8 sub_device_id[2];
+ u8 pmc[2];
+ u8 exp_device_cap[2];
+ u8 msi_cap;
+ u8 ltr_cap; /* 0xe3 */
+ u8 exp_link_control[2];
+ u8 link_cap[4];
+ u8 link_control[2];
+ u8 serial_number[8];
+ u8 res0:2; /* 0xf4 */
+ u8 ltr_en:1;
+ u8 res1:2;
+ u8 obff:2;
+ u8 res2:3;
+ u8 obff_cap:2;
+ u8 res3:4;
+ u8 res4[3];
+ u8 class_code[3];
+ u8 pci_pm_L1_2_supp:1;
+ u8 pci_pm_L1_1_supp:1;
+ u8 aspm_pm_L1_2_supp:1;
+ u8 aspm_pm_L1_1_supp:1;
+ u8 L1_pm_substates_supp:1;
+ u8 res5:3;
+ u8 port_common_mode_restore_time;
+ u8 port_t_power_on_scale:2;
+ u8 res6:1;
+ u8 port_t_power_on_value:5;
+ u8 res7;
+};
+
+struct rtw8822b_efuse {
+ __le16 rtl_id;
+ u8 res0[0x0e];
+
+ /* power index for four RF paths */
+ struct rtw_txpwr_idx txpwr_idx_table[4];
+
+ u8 channel_plan; /* 0xb8 */
+ u8 xtal_k;
+ u8 thermal_meter;
+ u8 iqk_lck;
+ u8 pa_type; /* 0xbc */
+ u8 lna_type_2g[2]; /* 0xbd */
+ u8 lna_type_5g[2];
+ u8 rf_board_option;
+ u8 rf_feature_option;
+ u8 rf_bt_setting;
+ u8 eeprom_version;
+ u8 eeprom_customer_id;
+ u8 tx_bb_swing_setting_2g;
+ u8 tx_bb_swing_setting_5g;
+ u8 tx_pwr_calibrate_rate;
+ u8 rf_antenna_option; /* 0xc9 */
+ u8 rfe_option;
+ u8 country_code[2];
+ u8 res[3];
+ union {
+ struct rtw8822bu_efuse u;
+ struct rtw8822be_efuse e;
+ };
+};
+
+static inline void
+_rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
+{
+ /* 0xC00-0xCFF and 0xE00-0xEFF have the same layout */
+ rtw_write32_mask(rtwdev, addr, mask, data);
+ rtw_write32_mask(rtwdev, addr + 0x200, mask, data);
+}
+
+#define rtw_write32s_mask(rtwdev, addr, mask, data) \
+ do { \
+ BUILD_BUG_ON((addr) < 0xC00 || (addr) >= 0xD00); \
+ \
+ _rtw_write32s_mask(rtwdev, addr, mask, data); \
+ } while (0)
+
+/* phy status page0 */
+#define GET_PHY_STAT_P0_PWDB(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(15, 8))
+
+/* phy status page1 */
+#define GET_PHY_STAT_P1_PWDB_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(15, 8))
+#define GET_PHY_STAT_P1_PWDB_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(23, 16))
+#define GET_PHY_STAT_P1_RF_MODE(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x03), GENMASK(29, 28))
+#define GET_PHY_STAT_P1_L_RXSC(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(11, 8))
+#define GET_PHY_STAT_P1_HT_RXSC(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(15, 12))
+
+#define REG_HTSTFWT 0x800
+#define REG_RXPSEL 0x808
+#define BIT_RX_PSEL_RST (BIT(28) | BIT(29))
+#define REG_TXPSEL 0x80c
+#define REG_RXCCAMSK 0x814
+#define REG_CCASEL 0x82c
+#define REG_PDMFTH 0x830
+#define REG_CCA2ND 0x838
+#define REG_L1WT 0x83c
+#define REG_L1PKWT 0x840
+#define REG_MRC 0x850
+#define REG_CLKTRK 0x860
+#define REG_ADCCLK 0x8ac
+#define REG_ADC160 0x8c4
+#define REG_ADC40 0x8c8
+#define REG_CDDTXP 0x93c
+#define REG_TXPSEL1 0x940
+#define REG_ACBB0 0x948
+#define REG_ACBBRXFIR 0x94c
+#define REG_ACGG2TBL 0x958
+#define REG_RXSB 0xa00
+#define REG_ADCINI 0xa04
+#define REG_TXSF2 0xa24
+#define REG_TXSF6 0xa28
+#define REG_RXDESC 0xa2c
+#define REG_ENTXCCK 0xa80
+#define REG_AGCTR_A 0xc08
+#define REG_TXDFIR 0xc20
+#define REG_RXIGI_A 0xc50
+#define REG_TRSW 0xca0
+#define REG_RFESEL0 0xcb0
+#define REG_RFESEL8 0xcb4
+#define REG_RFECTL 0xcb8
+#define REG_RFEINV 0xcbc
+#define REG_AGCTR_B 0xe08
+#define REG_RXIGI_B 0xe50
+#define REG_ANTWT 0x1904
+#define REG_IQKFAILMSK 0x1bf0
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c
new file mode 100644
index 000000000000..2d2dfb495ce1
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.c
@@ -0,0 +1,20783 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "phy.h"
+#include "rtw8822b_table.h"
+
+static const u32 rtw8822b_mac[] = {
+ 0x029, 0x000000F9,
+ 0x420, 0x00000080,
+ 0x421, 0x0000001F,
+ 0x428, 0x0000000A,
+ 0x429, 0x00000010,
+ 0x430, 0x00000000,
+ 0x431, 0x00000000,
+ 0x432, 0x00000000,
+ 0x433, 0x00000001,
+ 0x434, 0x00000004,
+ 0x435, 0x00000005,
+ 0x436, 0x00000007,
+ 0x437, 0x00000008,
+ 0x43C, 0x00000004,
+ 0x43D, 0x00000005,
+ 0x43E, 0x00000007,
+ 0x43F, 0x00000008,
+ 0x440, 0x0000005D,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000010,
+ 0x445, 0x000000F0,
+ 0x446, 0x00000001,
+ 0x447, 0x000000FE,
+ 0x448, 0x00000000,
+ 0x449, 0x00000000,
+ 0x44A, 0x00000000,
+ 0x44B, 0x00000040,
+ 0x44C, 0x00000010,
+ 0x44D, 0x000000F0,
+ 0x44E, 0x0000003F,
+ 0x44F, 0x00000000,
+ 0x450, 0x00000000,
+ 0x451, 0x00000000,
+ 0x452, 0x00000000,
+ 0x453, 0x00000040,
+ 0x455, 0x00000070,
+ 0x45E, 0x00000004,
+ 0x49C, 0x00000010,
+ 0x49D, 0x000000F0,
+ 0x49E, 0x00000000,
+ 0x49F, 0x00000006,
+ 0x4A0, 0x000000E0,
+ 0x4A1, 0x00000003,
+ 0x4A2, 0x00000000,
+ 0x4A3, 0x00000040,
+ 0x4A4, 0x00000015,
+ 0x4A5, 0x000000F0,
+ 0x4A6, 0x00000000,
+ 0x4A7, 0x00000006,
+ 0x4A8, 0x000000E0,
+ 0x4A9, 0x00000000,
+ 0x4AA, 0x00000000,
+ 0x4AB, 0x00000000,
+ 0x7DA, 0x00000008,
+ 0x1448, 0x00000006,
+ 0x144A, 0x00000006,
+ 0x144C, 0x00000006,
+ 0x144E, 0x00000006,
+ 0x4C8, 0x000000FF,
+ 0x4C9, 0x00000008,
+ 0x4CA, 0x00000020,
+ 0x4CB, 0x00000020,
+ 0x4CC, 0x000000FF,
+ 0x4CD, 0x000000FF,
+ 0x4CE, 0x00000001,
+ 0x4CF, 0x00000008,
+ 0x500, 0x00000026,
+ 0x501, 0x000000A2,
+ 0x502, 0x0000002F,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000A3,
+ 0x506, 0x0000005E,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002B,
+ 0x509, 0x000000A4,
+ 0x50A, 0x0000005E,
+ 0x50B, 0x00000000,
+ 0x50C, 0x0000004F,
+ 0x50D, 0x000000A4,
+ 0x50E, 0x00000000,
+ 0x50F, 0x00000000,
+ 0x512, 0x0000001C,
+ 0x514, 0x0000000A,
+ 0x516, 0x0000000A,
+ 0x521, 0x0000002F,
+ 0x525, 0x0000004F,
+ 0x551, 0x00000010,
+ 0x559, 0x00000002,
+ 0x55C, 0x00000050,
+ 0x55D, 0x000000FF,
+ 0x577, 0x0000000B,
+ 0x5BE, 0x00000064,
+ 0x605, 0x00000030,
+ 0x608, 0x0000000E,
+ 0x609, 0x00000022,
+ 0x60C, 0x00000018,
+ 0x6A0, 0x000000FF,
+ 0x6A1, 0x000000FF,
+ 0x6A2, 0x000000FF,
+ 0x6A3, 0x000000FF,
+ 0x6A4, 0x000000FF,
+ 0x6A5, 0x000000FF,
+ 0x6DE, 0x00000084,
+ 0x620, 0x000000FF,
+ 0x621, 0x000000FF,
+ 0x622, 0x000000FF,
+ 0x623, 0x000000FF,
+ 0x624, 0x000000FF,
+ 0x625, 0x000000FF,
+ 0x626, 0x000000FF,
+ 0x627, 0x000000FF,
+ 0x638, 0x00000050,
+ 0x63C, 0x0000000A,
+ 0x63D, 0x0000000A,
+ 0x63E, 0x0000000E,
+ 0x63F, 0x0000000E,
+ 0x640, 0x00000040,
+ 0x642, 0x00000040,
+ 0x643, 0x00000000,
+ 0x652, 0x000000C8,
+ 0x66E, 0x00000005,
+ 0x718, 0x00000040,
+ 0x7D4, 0x00000098,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8822b_mac, rtw_phy_cfg_mac);
+
+static const u32 rtw8822b_agc[] = {
+ 0x80000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFD020003,
+ 0x81C, 0xFC040003,
+ 0x81C, 0xFB060003,
+ 0x81C, 0xFA080003,
+ 0x81C, 0xF90A0003,
+ 0x81C, 0xF80C0003,
+ 0x81C, 0xF70E0003,
+ 0x81C, 0xF6100003,
+ 0x81C, 0xF5120003,
+ 0x81C, 0xF4140003,
+ 0x81C, 0xF3160003,
+ 0x81C, 0xF2180003,
+ 0x81C, 0xF11A0003,
+ 0x81C, 0xF01C0003,
+ 0x81C, 0xEF1E0003,
+ 0x81C, 0xEE200003,
+ 0x81C, 0xED220003,
+ 0x81C, 0xEC240003,
+ 0x81C, 0xEB260003,
+ 0x81C, 0xEA280003,
+ 0x81C, 0xE92A0003,
+ 0x81C, 0xE82C0003,
+ 0x81C, 0xE72E0003,
+ 0x81C, 0xE6300003,
+ 0x81C, 0xE5320003,
+ 0x81C, 0xC8340003,
+ 0x81C, 0xC7360003,
+ 0x81C, 0xC6380003,
+ 0x81C, 0xC53A0003,
+ 0x81C, 0xC43C0003,
+ 0x81C, 0xC33E0003,
+ 0x81C, 0xC2400003,
+ 0x81C, 0xC1420003,
+ 0x81C, 0xC0440003,
+ 0x81C, 0xA3460003,
+ 0x81C, 0xA2480003,
+ 0x81C, 0xA14A0003,
+ 0x81C, 0xA04C0003,
+ 0x81C, 0x824E0003,
+ 0x81C, 0x81500003,
+ 0x81C, 0x80520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x445A0003,
+ 0x81C, 0x435C0003,
+ 0x81C, 0x425E0003,
+ 0x81C, 0x41600003,
+ 0x81C, 0x40620003,
+ 0x81C, 0x05640003,
+ 0x81C, 0x04660003,
+ 0x81C, 0x03680003,
+ 0x81C, 0x026A0003,
+ 0x81C, 0x016C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xF5000003,
+ 0x81C, 0xF4020003,
+ 0x81C, 0xF3040003,
+ 0x81C, 0xF2060003,
+ 0x81C, 0xF1080003,
+ 0x81C, 0xF00A0003,
+ 0x81C, 0xEF0C0003,
+ 0x81C, 0xEE0E0003,
+ 0x81C, 0xED100003,
+ 0x81C, 0xEC120003,
+ 0x81C, 0xEB140003,
+ 0x81C, 0xEA160003,
+ 0x81C, 0xE9180003,
+ 0x81C, 0xE81A0003,
+ 0x81C, 0xE71C0003,
+ 0x81C, 0xE61E0003,
+ 0x81C, 0xE5200003,
+ 0x81C, 0xE4220003,
+ 0x81C, 0xE3240003,
+ 0x81C, 0xE2260003,
+ 0x81C, 0xE1280003,
+ 0x81C, 0xE02A0003,
+ 0x81C, 0xC32C0003,
+ 0x81C, 0xC22E0003,
+ 0x81C, 0xC1300003,
+ 0x81C, 0xC0320003,
+ 0x81C, 0xA4340003,
+ 0x81C, 0xA3360003,
+ 0x81C, 0xA2380003,
+ 0x81C, 0xA13A0003,
+ 0x81C, 0xA03C0003,
+ 0x81C, 0x823E0003,
+ 0x81C, 0x81400003,
+ 0x81C, 0x80420003,
+ 0x81C, 0x64440003,
+ 0x81C, 0x63460003,
+ 0x81C, 0x62480003,
+ 0x81C, 0x614A0003,
+ 0x81C, 0x604C0003,
+ 0x81C, 0x454E0003,
+ 0x81C, 0x44500003,
+ 0x81C, 0x43520003,
+ 0x81C, 0x42540003,
+ 0x81C, 0x41560003,
+ 0x81C, 0x40580003,
+ 0x81C, 0x055A0003,
+ 0x81C, 0x045C0003,
+ 0x81C, 0x035E0003,
+ 0x81C, 0x02600003,
+ 0x81C, 0x01620003,
+ 0x81C, 0x00640003,
+ 0x81C, 0x00660003,
+ 0x81C, 0x00680003,
+ 0x81C, 0x006A0003,
+ 0x81C, 0x006C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFD000003,
+ 0x81C, 0xFC020003,
+ 0x81C, 0xFB040003,
+ 0x81C, 0xFA060003,
+ 0x81C, 0xF9080003,
+ 0x81C, 0xF80A0003,
+ 0x81C, 0xF70C0003,
+ 0x81C, 0xF60E0003,
+ 0x81C, 0xF5100003,
+ 0x81C, 0xF4120003,
+ 0x81C, 0xF3140003,
+ 0x81C, 0xF2160003,
+ 0x81C, 0xF1180003,
+ 0x81C, 0xF01A0003,
+ 0x81C, 0xEF1C0003,
+ 0x81C, 0xEE1E0003,
+ 0x81C, 0xED200003,
+ 0x81C, 0xEC220003,
+ 0x81C, 0xEB240003,
+ 0x81C, 0xEA260003,
+ 0x81C, 0xE9280003,
+ 0x81C, 0xE82A0003,
+ 0x81C, 0xE72C0003,
+ 0x81C, 0xE62E0003,
+ 0x81C, 0xE5300003,
+ 0x81C, 0xC8320003,
+ 0x81C, 0xC7340003,
+ 0x81C, 0xC6360003,
+ 0x81C, 0xC5380003,
+ 0x81C, 0xC43A0003,
+ 0x81C, 0xC33C0003,
+ 0x81C, 0xC23E0003,
+ 0x81C, 0xC1400003,
+ 0x81C, 0xC0420003,
+ 0x81C, 0xA5440003,
+ 0x81C, 0xA4460003,
+ 0x81C, 0xA3480003,
+ 0x81C, 0xA24A0003,
+ 0x81C, 0xA14C0003,
+ 0x81C, 0x834E0003,
+ 0x81C, 0x82500003,
+ 0x81C, 0x81520003,
+ 0x81C, 0x80540003,
+ 0x81C, 0x65560003,
+ 0x81C, 0x64580003,
+ 0x81C, 0x635A0003,
+ 0x81C, 0x625C0003,
+ 0x81C, 0x435E0003,
+ 0x81C, 0x42600003,
+ 0x81C, 0x41620003,
+ 0x81C, 0x40640003,
+ 0x81C, 0x06660003,
+ 0x81C, 0x05680003,
+ 0x81C, 0x046A0003,
+ 0x81C, 0x036C0003,
+ 0x81C, 0x026E0003,
+ 0x81C, 0x01700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFD000003,
+ 0x81C, 0xFC020003,
+ 0x81C, 0xFB040003,
+ 0x81C, 0xFA060003,
+ 0x81C, 0xF9080003,
+ 0x81C, 0xF80A0003,
+ 0x81C, 0xF70C0003,
+ 0x81C, 0xF60E0003,
+ 0x81C, 0xF5100003,
+ 0x81C, 0xF4120003,
+ 0x81C, 0xF3140003,
+ 0x81C, 0xF2160003,
+ 0x81C, 0xF1180003,
+ 0x81C, 0xF01A0003,
+ 0x81C, 0xEF1C0003,
+ 0x81C, 0xEE1E0003,
+ 0x81C, 0xED200003,
+ 0x81C, 0xEC220003,
+ 0x81C, 0xEB240003,
+ 0x81C, 0xEA260003,
+ 0x81C, 0xE9280003,
+ 0x81C, 0xE82A0003,
+ 0x81C, 0xE72C0003,
+ 0x81C, 0xE62E0003,
+ 0x81C, 0xE5300003,
+ 0x81C, 0xC8320003,
+ 0x81C, 0xC7340003,
+ 0x81C, 0xC6360003,
+ 0x81C, 0xC5380003,
+ 0x81C, 0xC43A0003,
+ 0x81C, 0xC33C0003,
+ 0x81C, 0xC23E0003,
+ 0x81C, 0xC1400003,
+ 0x81C, 0xC0420003,
+ 0x81C, 0xA5440003,
+ 0x81C, 0xA4460003,
+ 0x81C, 0xA3480003,
+ 0x81C, 0xA24A0003,
+ 0x81C, 0xA14C0003,
+ 0x81C, 0x834E0003,
+ 0x81C, 0x82500003,
+ 0x81C, 0x81520003,
+ 0x81C, 0x80540003,
+ 0x81C, 0x65560003,
+ 0x81C, 0x64580003,
+ 0x81C, 0x635A0003,
+ 0x81C, 0x625C0003,
+ 0x81C, 0x435E0003,
+ 0x81C, 0x42600003,
+ 0x81C, 0x41620003,
+ 0x81C, 0x40640003,
+ 0x81C, 0x06660003,
+ 0x81C, 0x05680003,
+ 0x81C, 0x046A0003,
+ 0x81C, 0x036C0003,
+ 0x81C, 0x026E0003,
+ 0x81C, 0x01700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xDC000003,
+ 0x81C, 0xDB020003,
+ 0x81C, 0xDA040003,
+ 0x81C, 0xD9060003,
+ 0x81C, 0xD8080003,
+ 0x81C, 0xD70A0003,
+ 0x81C, 0xD60C0003,
+ 0x81C, 0xD50E0003,
+ 0x81C, 0xD4100003,
+ 0x81C, 0xD3120003,
+ 0x81C, 0xD2140003,
+ 0x81C, 0xD1160003,
+ 0x81C, 0xD0180003,
+ 0x81C, 0xB41A0003,
+ 0x81C, 0xB31C0003,
+ 0x81C, 0xB21E0003,
+ 0x81C, 0xB1200003,
+ 0x81C, 0xB0220003,
+ 0x81C, 0xAF240003,
+ 0x81C, 0xAE260003,
+ 0x81C, 0xAD280003,
+ 0x81C, 0xAC2A0003,
+ 0x81C, 0xAB2C0003,
+ 0x81C, 0x8C2E0003,
+ 0x81C, 0x8B300003,
+ 0x81C, 0x8A320003,
+ 0x81C, 0x89340003,
+ 0x81C, 0x88360003,
+ 0x81C, 0x87380003,
+ 0x81C, 0x863A0003,
+ 0x81C, 0x853C0003,
+ 0x81C, 0x693E0003,
+ 0x81C, 0x68400003,
+ 0x81C, 0x67420003,
+ 0x81C, 0x66440003,
+ 0x81C, 0x65460003,
+ 0x81C, 0x48480003,
+ 0x81C, 0x474A0003,
+ 0x81C, 0x464C0003,
+ 0x81C, 0x454E0003,
+ 0x81C, 0x44500003,
+ 0x81C, 0x43520003,
+ 0x81C, 0x27540003,
+ 0x81C, 0x26560003,
+ 0x81C, 0x25580003,
+ 0x81C, 0x245A0003,
+ 0x81C, 0x235C0003,
+ 0x81C, 0x045E0003,
+ 0x81C, 0x03600003,
+ 0x81C, 0x02620003,
+ 0x81C, 0x01640003,
+ 0x81C, 0x00660003,
+ 0x81C, 0x00680003,
+ 0x81C, 0x006A0003,
+ 0x81C, 0x006C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFD000003,
+ 0x81C, 0xFC020003,
+ 0x81C, 0xFB040003,
+ 0x81C, 0xFA060003,
+ 0x81C, 0xF9080003,
+ 0x81C, 0xF80A0003,
+ 0x81C, 0xF70C0003,
+ 0x81C, 0xF60E0003,
+ 0x81C, 0xF5100003,
+ 0x81C, 0xF4120003,
+ 0x81C, 0xF3140003,
+ 0x81C, 0xF2160003,
+ 0x81C, 0xF1180003,
+ 0x81C, 0xF01A0003,
+ 0x81C, 0xEF1C0003,
+ 0x81C, 0xEE1E0003,
+ 0x81C, 0xED200003,
+ 0x81C, 0xEC220003,
+ 0x81C, 0xEB240003,
+ 0x81C, 0xEA260003,
+ 0x81C, 0xE9280003,
+ 0x81C, 0xE82A0003,
+ 0x81C, 0xE72C0003,
+ 0x81C, 0xE62E0003,
+ 0x81C, 0xE5300003,
+ 0x81C, 0xC8320003,
+ 0x81C, 0xC7340003,
+ 0x81C, 0xC6360003,
+ 0x81C, 0xC5380003,
+ 0x81C, 0xC43A0003,
+ 0x81C, 0xC33C0003,
+ 0x81C, 0xC23E0003,
+ 0x81C, 0xC1400003,
+ 0x81C, 0xC0420003,
+ 0x81C, 0xA5440003,
+ 0x81C, 0xA4460003,
+ 0x81C, 0xA3480003,
+ 0x81C, 0xA24A0003,
+ 0x81C, 0xA14C0003,
+ 0x81C, 0x834E0003,
+ 0x81C, 0x82500003,
+ 0x81C, 0x81520003,
+ 0x81C, 0x80540003,
+ 0x81C, 0x65560003,
+ 0x81C, 0x64580003,
+ 0x81C, 0x635A0003,
+ 0x81C, 0x625C0003,
+ 0x81C, 0x435E0003,
+ 0x81C, 0x42600003,
+ 0x81C, 0x41620003,
+ 0x81C, 0x40640003,
+ 0x81C, 0x06660003,
+ 0x81C, 0x05680003,
+ 0x81C, 0x046A0003,
+ 0x81C, 0x036C0003,
+ 0x81C, 0x026E0003,
+ 0x81C, 0x01700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xF5000003,
+ 0x81C, 0xF4020003,
+ 0x81C, 0xF3040003,
+ 0x81C, 0xF2060003,
+ 0x81C, 0xF1080003,
+ 0x81C, 0xF00A0003,
+ 0x81C, 0xEF0C0003,
+ 0x81C, 0xEE0E0003,
+ 0x81C, 0xED100003,
+ 0x81C, 0xEC120003,
+ 0x81C, 0xEB140003,
+ 0x81C, 0xEA160003,
+ 0x81C, 0xE9180003,
+ 0x81C, 0xE81A0003,
+ 0x81C, 0xE71C0003,
+ 0x81C, 0xE61E0003,
+ 0x81C, 0xE5200003,
+ 0x81C, 0xE4220003,
+ 0x81C, 0xE3240003,
+ 0x81C, 0xE2260003,
+ 0x81C, 0xE1280003,
+ 0x81C, 0xE02A0003,
+ 0x81C, 0xC32C0003,
+ 0x81C, 0xC22E0003,
+ 0x81C, 0xC1300003,
+ 0x81C, 0xC0320003,
+ 0x81C, 0xA4340003,
+ 0x81C, 0xA3360003,
+ 0x81C, 0xA2380003,
+ 0x81C, 0xA13A0003,
+ 0x81C, 0xA03C0003,
+ 0x81C, 0x823E0003,
+ 0x81C, 0x81400003,
+ 0x81C, 0x80420003,
+ 0x81C, 0x64440003,
+ 0x81C, 0x63460003,
+ 0x81C, 0x62480003,
+ 0x81C, 0x614A0003,
+ 0x81C, 0x604C0003,
+ 0x81C, 0x454E0003,
+ 0x81C, 0x44500003,
+ 0x81C, 0x43520003,
+ 0x81C, 0x42540003,
+ 0x81C, 0x41560003,
+ 0x81C, 0x40580003,
+ 0x81C, 0x055A0003,
+ 0x81C, 0x045C0003,
+ 0x81C, 0x035E0003,
+ 0x81C, 0x02600003,
+ 0x81C, 0x01620003,
+ 0x81C, 0x00640003,
+ 0x81C, 0x00660003,
+ 0x81C, 0x00680003,
+ 0x81C, 0x006A0003,
+ 0x81C, 0x006C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xF5000003,
+ 0x81C, 0xF4020003,
+ 0x81C, 0xF3040003,
+ 0x81C, 0xF2060003,
+ 0x81C, 0xF1080003,
+ 0x81C, 0xF00A0003,
+ 0x81C, 0xEF0C0003,
+ 0x81C, 0xEE0E0003,
+ 0x81C, 0xED100003,
+ 0x81C, 0xEC120003,
+ 0x81C, 0xEB140003,
+ 0x81C, 0xEA160003,
+ 0x81C, 0xE9180003,
+ 0x81C, 0xE81A0003,
+ 0x81C, 0xE71C0003,
+ 0x81C, 0xE61E0003,
+ 0x81C, 0xE5200003,
+ 0x81C, 0xE4220003,
+ 0x81C, 0xE3240003,
+ 0x81C, 0xE2260003,
+ 0x81C, 0xE1280003,
+ 0x81C, 0xE02A0003,
+ 0x81C, 0xC32C0003,
+ 0x81C, 0xC22E0003,
+ 0x81C, 0xC1300003,
+ 0x81C, 0xC0320003,
+ 0x81C, 0xA4340003,
+ 0x81C, 0xA3360003,
+ 0x81C, 0xA2380003,
+ 0x81C, 0xA13A0003,
+ 0x81C, 0xA03C0003,
+ 0x81C, 0x823E0003,
+ 0x81C, 0x81400003,
+ 0x81C, 0x80420003,
+ 0x81C, 0x64440003,
+ 0x81C, 0x63460003,
+ 0x81C, 0x62480003,
+ 0x81C, 0x614A0003,
+ 0x81C, 0x604C0003,
+ 0x81C, 0x454E0003,
+ 0x81C, 0x44500003,
+ 0x81C, 0x43520003,
+ 0x81C, 0x42540003,
+ 0x81C, 0x41560003,
+ 0x81C, 0x40580003,
+ 0x81C, 0x055A0003,
+ 0x81C, 0x045C0003,
+ 0x81C, 0x035E0003,
+ 0x81C, 0x02600003,
+ 0x81C, 0x01620003,
+ 0x81C, 0x00640003,
+ 0x81C, 0x00660003,
+ 0x81C, 0x00680003,
+ 0x81C, 0x006A0003,
+ 0x81C, 0x006C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFD020003,
+ 0x81C, 0xFC040003,
+ 0x81C, 0xFB060003,
+ 0x81C, 0xFA080003,
+ 0x81C, 0xF90A0003,
+ 0x81C, 0xF80C0003,
+ 0x81C, 0xF70E0003,
+ 0x81C, 0xF6100003,
+ 0x81C, 0xF5120003,
+ 0x81C, 0xF4140003,
+ 0x81C, 0xF3160003,
+ 0x81C, 0xF2180003,
+ 0x81C, 0xF11A0003,
+ 0x81C, 0xF01C0003,
+ 0x81C, 0xEF1E0003,
+ 0x81C, 0xEE200003,
+ 0x81C, 0xED220003,
+ 0x81C, 0xEC240003,
+ 0x81C, 0xEB260003,
+ 0x81C, 0xEA280003,
+ 0x81C, 0xE92A0003,
+ 0x81C, 0xE82C0003,
+ 0x81C, 0xE72E0003,
+ 0x81C, 0xE6300003,
+ 0x81C, 0xE5320003,
+ 0x81C, 0xC8340003,
+ 0x81C, 0xC7360003,
+ 0x81C, 0xC6380003,
+ 0x81C, 0xC53A0003,
+ 0x81C, 0xC43C0003,
+ 0x81C, 0xC33E0003,
+ 0x81C, 0xC2400003,
+ 0x81C, 0xC1420003,
+ 0x81C, 0xC0440003,
+ 0x81C, 0xA3460003,
+ 0x81C, 0xA2480003,
+ 0x81C, 0xA14A0003,
+ 0x81C, 0xA04C0003,
+ 0x81C, 0x824E0003,
+ 0x81C, 0x81500003,
+ 0x81C, 0x80520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x445A0003,
+ 0x81C, 0x435C0003,
+ 0x81C, 0x425E0003,
+ 0x81C, 0x41600003,
+ 0x81C, 0x40620003,
+ 0x81C, 0x05640003,
+ 0x81C, 0x04660003,
+ 0x81C, 0x03680003,
+ 0x81C, 0x026A0003,
+ 0x81C, 0x016C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xF5000003,
+ 0x81C, 0xF4020003,
+ 0x81C, 0xF3040003,
+ 0x81C, 0xF2060003,
+ 0x81C, 0xF1080003,
+ 0x81C, 0xF00A0003,
+ 0x81C, 0xEF0C0003,
+ 0x81C, 0xEE0E0003,
+ 0x81C, 0xED100003,
+ 0x81C, 0xEC120003,
+ 0x81C, 0xEB140003,
+ 0x81C, 0xEA160003,
+ 0x81C, 0xE9180003,
+ 0x81C, 0xE81A0003,
+ 0x81C, 0xE71C0003,
+ 0x81C, 0xE61E0003,
+ 0x81C, 0xE5200003,
+ 0x81C, 0xE4220003,
+ 0x81C, 0xE3240003,
+ 0x81C, 0xE2260003,
+ 0x81C, 0xE1280003,
+ 0x81C, 0xE02A0003,
+ 0x81C, 0xC32C0003,
+ 0x81C, 0xC22E0003,
+ 0x81C, 0xC1300003,
+ 0x81C, 0xC0320003,
+ 0x81C, 0xA4340003,
+ 0x81C, 0xA3360003,
+ 0x81C, 0xA2380003,
+ 0x81C, 0xA13A0003,
+ 0x81C, 0xA03C0003,
+ 0x81C, 0x823E0003,
+ 0x81C, 0x81400003,
+ 0x81C, 0x80420003,
+ 0x81C, 0x64440003,
+ 0x81C, 0x63460003,
+ 0x81C, 0x62480003,
+ 0x81C, 0x614A0003,
+ 0x81C, 0x604C0003,
+ 0x81C, 0x454E0003,
+ 0x81C, 0x44500003,
+ 0x81C, 0x43520003,
+ 0x81C, 0x42540003,
+ 0x81C, 0x41560003,
+ 0x81C, 0x40580003,
+ 0x81C, 0x055A0003,
+ 0x81C, 0x045C0003,
+ 0x81C, 0x035E0003,
+ 0x81C, 0x02600003,
+ 0x81C, 0x01620003,
+ 0x81C, 0x00640003,
+ 0x81C, 0x00660003,
+ 0x81C, 0x00680003,
+ 0x81C, 0x006A0003,
+ 0x81C, 0x006C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFD020003,
+ 0x81C, 0xFC040003,
+ 0x81C, 0xFB060003,
+ 0x81C, 0xFA080003,
+ 0x81C, 0xF90A0003,
+ 0x81C, 0xF80C0003,
+ 0x81C, 0xF70E0003,
+ 0x81C, 0xF6100003,
+ 0x81C, 0xF5120003,
+ 0x81C, 0xF4140003,
+ 0x81C, 0xF3160003,
+ 0x81C, 0xF2180003,
+ 0x81C, 0xF11A0003,
+ 0x81C, 0xF01C0003,
+ 0x81C, 0xEF1E0003,
+ 0x81C, 0xEE200003,
+ 0x81C, 0xED220003,
+ 0x81C, 0xEC240003,
+ 0x81C, 0xEB260003,
+ 0x81C, 0xEA280003,
+ 0x81C, 0xE92A0003,
+ 0x81C, 0xE82C0003,
+ 0x81C, 0xE72E0003,
+ 0x81C, 0xE6300003,
+ 0x81C, 0xE5320003,
+ 0x81C, 0xC8340003,
+ 0x81C, 0xC7360003,
+ 0x81C, 0xC6380003,
+ 0x81C, 0xC53A0003,
+ 0x81C, 0xC43C0003,
+ 0x81C, 0xC33E0003,
+ 0x81C, 0xC2400003,
+ 0x81C, 0xC1420003,
+ 0x81C, 0xC0440003,
+ 0x81C, 0xA3460003,
+ 0x81C, 0xA2480003,
+ 0x81C, 0xA14A0003,
+ 0x81C, 0xA04C0003,
+ 0x81C, 0x824E0003,
+ 0x81C, 0x81500003,
+ 0x81C, 0x80520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x445A0003,
+ 0x81C, 0x435C0003,
+ 0x81C, 0x425E0003,
+ 0x81C, 0x41600003,
+ 0x81C, 0x40620003,
+ 0x81C, 0x05640003,
+ 0x81C, 0x04660003,
+ 0x81C, 0x03680003,
+ 0x81C, 0x026A0003,
+ 0x81C, 0x016C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xF5000003,
+ 0x81C, 0xF4020003,
+ 0x81C, 0xF3040003,
+ 0x81C, 0xF2060003,
+ 0x81C, 0xF1080003,
+ 0x81C, 0xF00A0003,
+ 0x81C, 0xEF0C0003,
+ 0x81C, 0xEE0E0003,
+ 0x81C, 0xED100003,
+ 0x81C, 0xEC120003,
+ 0x81C, 0xEB140003,
+ 0x81C, 0xEA160003,
+ 0x81C, 0xE9180003,
+ 0x81C, 0xE81A0003,
+ 0x81C, 0xE71C0003,
+ 0x81C, 0xE61E0003,
+ 0x81C, 0xE5200003,
+ 0x81C, 0xE4220003,
+ 0x81C, 0xE3240003,
+ 0x81C, 0xE2260003,
+ 0x81C, 0xE1280003,
+ 0x81C, 0xE02A0003,
+ 0x81C, 0xC32C0003,
+ 0x81C, 0xC22E0003,
+ 0x81C, 0xC1300003,
+ 0x81C, 0xC0320003,
+ 0x81C, 0xA4340003,
+ 0x81C, 0xA3360003,
+ 0x81C, 0xA2380003,
+ 0x81C, 0xA13A0003,
+ 0x81C, 0xA03C0003,
+ 0x81C, 0x823E0003,
+ 0x81C, 0x81400003,
+ 0x81C, 0x80420003,
+ 0x81C, 0x64440003,
+ 0x81C, 0x63460003,
+ 0x81C, 0x62480003,
+ 0x81C, 0x614A0003,
+ 0x81C, 0x604C0003,
+ 0x81C, 0x454E0003,
+ 0x81C, 0x44500003,
+ 0x81C, 0x43520003,
+ 0x81C, 0x42540003,
+ 0x81C, 0x41560003,
+ 0x81C, 0x40580003,
+ 0x81C, 0x055A0003,
+ 0x81C, 0x045C0003,
+ 0x81C, 0x035E0003,
+ 0x81C, 0x02600003,
+ 0x81C, 0x01620003,
+ 0x81C, 0x00640003,
+ 0x81C, 0x00660003,
+ 0x81C, 0x00680003,
+ 0x81C, 0x006A0003,
+ 0x81C, 0x006C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x9000000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFD000003,
+ 0x81C, 0xFC020003,
+ 0x81C, 0xFB040003,
+ 0x81C, 0xFA060003,
+ 0x81C, 0xF9080003,
+ 0x81C, 0xF80A0003,
+ 0x81C, 0xF70C0003,
+ 0x81C, 0xF60E0003,
+ 0x81C, 0xF5100003,
+ 0x81C, 0xF4120003,
+ 0x81C, 0xF3140003,
+ 0x81C, 0xF2160003,
+ 0x81C, 0xF1180003,
+ 0x81C, 0xF01A0003,
+ 0x81C, 0xEF1C0003,
+ 0x81C, 0xEE1E0003,
+ 0x81C, 0xED200003,
+ 0x81C, 0xEC220003,
+ 0x81C, 0xEB240003,
+ 0x81C, 0xEA260003,
+ 0x81C, 0xE9280003,
+ 0x81C, 0xE82A0003,
+ 0x81C, 0xE72C0003,
+ 0x81C, 0xE62E0003,
+ 0x81C, 0xE5300003,
+ 0x81C, 0xC8320003,
+ 0x81C, 0xC7340003,
+ 0x81C, 0xC6360003,
+ 0x81C, 0xC5380003,
+ 0x81C, 0xC43A0003,
+ 0x81C, 0xC33C0003,
+ 0x81C, 0xC23E0003,
+ 0x81C, 0xC1400003,
+ 0x81C, 0xC0420003,
+ 0x81C, 0xA5440003,
+ 0x81C, 0xA4460003,
+ 0x81C, 0xA3480003,
+ 0x81C, 0xA24A0003,
+ 0x81C, 0xA14C0003,
+ 0x81C, 0x834E0003,
+ 0x81C, 0x82500003,
+ 0x81C, 0x81520003,
+ 0x81C, 0x80540003,
+ 0x81C, 0x65560003,
+ 0x81C, 0x64580003,
+ 0x81C, 0x635A0003,
+ 0x81C, 0x625C0003,
+ 0x81C, 0x435E0003,
+ 0x81C, 0x42600003,
+ 0x81C, 0x41620003,
+ 0x81C, 0x40640003,
+ 0x81C, 0x06660003,
+ 0x81C, 0x05680003,
+ 0x81C, 0x046A0003,
+ 0x81C, 0x036C0003,
+ 0x81C, 0x026E0003,
+ 0x81C, 0x01700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x9000000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFD020003,
+ 0x81C, 0xFC040003,
+ 0x81C, 0xFB060003,
+ 0x81C, 0xFA080003,
+ 0x81C, 0xF90A0003,
+ 0x81C, 0xF80C0003,
+ 0x81C, 0xF70E0003,
+ 0x81C, 0xF6100003,
+ 0x81C, 0xF5120003,
+ 0x81C, 0xF4140003,
+ 0x81C, 0xF3160003,
+ 0x81C, 0xF2180003,
+ 0x81C, 0xF11A0003,
+ 0x81C, 0xF01C0003,
+ 0x81C, 0xEF1E0003,
+ 0x81C, 0xEE200003,
+ 0x81C, 0xED220003,
+ 0x81C, 0xEC240003,
+ 0x81C, 0xEB260003,
+ 0x81C, 0xEA280003,
+ 0x81C, 0xE92A0003,
+ 0x81C, 0xE82C0003,
+ 0x81C, 0xE72E0003,
+ 0x81C, 0xE6300003,
+ 0x81C, 0xE5320003,
+ 0x81C, 0xC8340003,
+ 0x81C, 0xC7360003,
+ 0x81C, 0xC6380003,
+ 0x81C, 0xC53A0003,
+ 0x81C, 0xC43C0003,
+ 0x81C, 0xC33E0003,
+ 0x81C, 0xC2400003,
+ 0x81C, 0xC1420003,
+ 0x81C, 0xC0440003,
+ 0x81C, 0xA3460003,
+ 0x81C, 0xA2480003,
+ 0x81C, 0xA14A0003,
+ 0x81C, 0xA04C0003,
+ 0x81C, 0x824E0003,
+ 0x81C, 0x81500003,
+ 0x81C, 0x80520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x445A0003,
+ 0x81C, 0x435C0003,
+ 0x81C, 0x425E0003,
+ 0x81C, 0x41600003,
+ 0x81C, 0x40620003,
+ 0x81C, 0x05640003,
+ 0x81C, 0x04660003,
+ 0x81C, 0x03680003,
+ 0x81C, 0x026A0003,
+ 0x81C, 0x016C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x9000000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFD020003,
+ 0x81C, 0xFC040003,
+ 0x81C, 0xFB060003,
+ 0x81C, 0xFA080003,
+ 0x81C, 0xF90A0003,
+ 0x81C, 0xF80C0003,
+ 0x81C, 0xF70E0003,
+ 0x81C, 0xF6100003,
+ 0x81C, 0xF5120003,
+ 0x81C, 0xF4140003,
+ 0x81C, 0xF3160003,
+ 0x81C, 0xF2180003,
+ 0x81C, 0xF11A0003,
+ 0x81C, 0xF01C0003,
+ 0x81C, 0xEF1E0003,
+ 0x81C, 0xEE200003,
+ 0x81C, 0xED220003,
+ 0x81C, 0xEC240003,
+ 0x81C, 0xEB260003,
+ 0x81C, 0xEA280003,
+ 0x81C, 0xE92A0003,
+ 0x81C, 0xE82C0003,
+ 0x81C, 0xE72E0003,
+ 0x81C, 0xE6300003,
+ 0x81C, 0xE5320003,
+ 0x81C, 0xC8340003,
+ 0x81C, 0xC7360003,
+ 0x81C, 0xC6380003,
+ 0x81C, 0xC53A0003,
+ 0x81C, 0xC43C0003,
+ 0x81C, 0xC33E0003,
+ 0x81C, 0xC2400003,
+ 0x81C, 0xC1420003,
+ 0x81C, 0xC0440003,
+ 0x81C, 0xA3460003,
+ 0x81C, 0xA2480003,
+ 0x81C, 0xA14A0003,
+ 0x81C, 0xA04C0003,
+ 0x81C, 0x824E0003,
+ 0x81C, 0x81500003,
+ 0x81C, 0x80520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x445A0003,
+ 0x81C, 0x435C0003,
+ 0x81C, 0x425E0003,
+ 0x81C, 0x41600003,
+ 0x81C, 0x40620003,
+ 0x81C, 0x05640003,
+ 0x81C, 0x04660003,
+ 0x81C, 0x03680003,
+ 0x81C, 0x026A0003,
+ 0x81C, 0x016C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x9000000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFC000003,
+ 0x81C, 0xFB020003,
+ 0x81C, 0xFA040003,
+ 0x81C, 0xF9060003,
+ 0x81C, 0xF8080003,
+ 0x81C, 0xF70A0003,
+ 0x81C, 0xF60C0003,
+ 0x81C, 0xF50E0003,
+ 0x81C, 0xF4100003,
+ 0x81C, 0xF3120003,
+ 0x81C, 0xF2140003,
+ 0x81C, 0xF1160003,
+ 0x81C, 0xF0180003,
+ 0x81C, 0xEF1A0003,
+ 0x81C, 0xEE1C0003,
+ 0x81C, 0xED1E0003,
+ 0x81C, 0xEC200003,
+ 0x81C, 0xEB220003,
+ 0x81C, 0xEA240003,
+ 0x81C, 0xE9260003,
+ 0x81C, 0xE8280003,
+ 0x81C, 0xE72A0003,
+ 0x81C, 0xE62C0003,
+ 0x81C, 0xE52E0003,
+ 0x81C, 0xC8300003,
+ 0x81C, 0xC7320003,
+ 0x81C, 0xC6340003,
+ 0x81C, 0xC5360003,
+ 0x81C, 0xC4380003,
+ 0x81C, 0xC33A0003,
+ 0x81C, 0xC23C0003,
+ 0x81C, 0xC13E0003,
+ 0x81C, 0xA4400003,
+ 0x81C, 0xA3420003,
+ 0x81C, 0xA2440003,
+ 0x81C, 0xA1460003,
+ 0x81C, 0xA0480003,
+ 0x81C, 0x684A0003,
+ 0x81C, 0x674C0003,
+ 0x81C, 0x664E0003,
+ 0x81C, 0x65500003,
+ 0x81C, 0x64520003,
+ 0x81C, 0x63540003,
+ 0x81C, 0x44560003,
+ 0x81C, 0x43580003,
+ 0x81C, 0x425A0003,
+ 0x81C, 0x415C0003,
+ 0x81C, 0x405E0003,
+ 0x81C, 0x23600003,
+ 0x81C, 0x22620003,
+ 0x81C, 0x21640003,
+ 0x81C, 0x03660003,
+ 0x81C, 0x02680003,
+ 0x81C, 0x016A0003,
+ 0x81C, 0x006C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x90000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFD000003,
+ 0x81C, 0xFC020003,
+ 0x81C, 0xFB040003,
+ 0x81C, 0xFA060003,
+ 0x81C, 0xF9080003,
+ 0x81C, 0xF80A0003,
+ 0x81C, 0xF70C0003,
+ 0x81C, 0xF60E0003,
+ 0x81C, 0xF5100003,
+ 0x81C, 0xF4120003,
+ 0x81C, 0xF3140003,
+ 0x81C, 0xF2160003,
+ 0x81C, 0xF1180003,
+ 0x81C, 0xF01A0003,
+ 0x81C, 0xEF1C0003,
+ 0x81C, 0xEE1E0003,
+ 0x81C, 0xED200003,
+ 0x81C, 0xEC220003,
+ 0x81C, 0xEB240003,
+ 0x81C, 0xEA260003,
+ 0x81C, 0xE9280003,
+ 0x81C, 0xE82A0003,
+ 0x81C, 0xE72C0003,
+ 0x81C, 0xE62E0003,
+ 0x81C, 0xE5300003,
+ 0x81C, 0xC8320003,
+ 0x81C, 0xC7340003,
+ 0x81C, 0xC6360003,
+ 0x81C, 0xC5380003,
+ 0x81C, 0xC43A0003,
+ 0x81C, 0xC33C0003,
+ 0x81C, 0xC23E0003,
+ 0x81C, 0xC1400003,
+ 0x81C, 0xC0420003,
+ 0x81C, 0xA5440003,
+ 0x81C, 0xA4460003,
+ 0x81C, 0xA3480003,
+ 0x81C, 0xA24A0003,
+ 0x81C, 0xA14C0003,
+ 0x81C, 0x834E0003,
+ 0x81C, 0x82500003,
+ 0x81C, 0x81520003,
+ 0x81C, 0x80540003,
+ 0x81C, 0x65560003,
+ 0x81C, 0x64580003,
+ 0x81C, 0x635A0003,
+ 0x81C, 0x625C0003,
+ 0x81C, 0x435E0003,
+ 0x81C, 0x42600003,
+ 0x81C, 0x41620003,
+ 0x81C, 0x40640003,
+ 0x81C, 0x06660003,
+ 0x81C, 0x05680003,
+ 0x81C, 0x046A0003,
+ 0x81C, 0x036C0003,
+ 0x81C, 0x026E0003,
+ 0x81C, 0x01700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0x90000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xDC000003,
+ 0x81C, 0xDB020003,
+ 0x81C, 0xDA040003,
+ 0x81C, 0xD9060003,
+ 0x81C, 0xD8080003,
+ 0x81C, 0xD70A0003,
+ 0x81C, 0xD60C0003,
+ 0x81C, 0xD50E0003,
+ 0x81C, 0xD4100003,
+ 0x81C, 0xD3120003,
+ 0x81C, 0xD2140003,
+ 0x81C, 0xD1160003,
+ 0x81C, 0xD0180003,
+ 0x81C, 0xB41A0003,
+ 0x81C, 0xB31C0003,
+ 0x81C, 0xB21E0003,
+ 0x81C, 0xB1200003,
+ 0x81C, 0xB0220003,
+ 0x81C, 0xAF240003,
+ 0x81C, 0xAE260003,
+ 0x81C, 0xAD280003,
+ 0x81C, 0xAC2A0003,
+ 0x81C, 0xAB2C0003,
+ 0x81C, 0x8C2E0003,
+ 0x81C, 0x8B300003,
+ 0x81C, 0x8A320003,
+ 0x81C, 0x89340003,
+ 0x81C, 0x88360003,
+ 0x81C, 0x87380003,
+ 0x81C, 0x863A0003,
+ 0x81C, 0x853C0003,
+ 0x81C, 0x693E0003,
+ 0x81C, 0x68400003,
+ 0x81C, 0x67420003,
+ 0x81C, 0x66440003,
+ 0x81C, 0x65460003,
+ 0x81C, 0x48480003,
+ 0x81C, 0x474A0003,
+ 0x81C, 0x464C0003,
+ 0x81C, 0x454E0003,
+ 0x81C, 0x44500003,
+ 0x81C, 0x43520003,
+ 0x81C, 0x27540003,
+ 0x81C, 0x26560003,
+ 0x81C, 0x25580003,
+ 0x81C, 0x245A0003,
+ 0x81C, 0x235C0003,
+ 0x81C, 0x045E0003,
+ 0x81C, 0x03600003,
+ 0x81C, 0x02620003,
+ 0x81C, 0x01640003,
+ 0x81C, 0x00660003,
+ 0x81C, 0x00680003,
+ 0x81C, 0x006A0003,
+ 0x81C, 0x006C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFF000003,
+ 0x81C, 0xFE000003,
+ 0x81C, 0xFD020003,
+ 0x81C, 0xFC040003,
+ 0x81C, 0xFB060003,
+ 0x81C, 0xFA080003,
+ 0x81C, 0xF90A0003,
+ 0x81C, 0xF80C0003,
+ 0x81C, 0xF70E0003,
+ 0x81C, 0xF6100003,
+ 0x81C, 0xF5120003,
+ 0x81C, 0xF4140003,
+ 0x81C, 0xF3160003,
+ 0x81C, 0xF2180003,
+ 0x81C, 0xF11A0003,
+ 0x81C, 0xF01C0003,
+ 0x81C, 0xEF1E0003,
+ 0x81C, 0xEE200003,
+ 0x81C, 0xED220003,
+ 0x81C, 0xEC240003,
+ 0x81C, 0xEB260003,
+ 0x81C, 0xEA280003,
+ 0x81C, 0xE92A0003,
+ 0x81C, 0xE82C0003,
+ 0x81C, 0xE72E0003,
+ 0x81C, 0xE6300003,
+ 0x81C, 0xE5320003,
+ 0x81C, 0xC8340003,
+ 0x81C, 0xC7360003,
+ 0x81C, 0xC6380003,
+ 0x81C, 0xC53A0003,
+ 0x81C, 0xC43C0003,
+ 0x81C, 0xC33E0003,
+ 0x81C, 0xC2400003,
+ 0x81C, 0xC1420003,
+ 0x81C, 0xC0440003,
+ 0x81C, 0xA3460003,
+ 0x81C, 0xA2480003,
+ 0x81C, 0xA14A0003,
+ 0x81C, 0xA04C0003,
+ 0x81C, 0x824E0003,
+ 0x81C, 0x81500003,
+ 0x81C, 0x80520003,
+ 0x81C, 0x64540003,
+ 0x81C, 0x63560003,
+ 0x81C, 0x62580003,
+ 0x81C, 0x445A0003,
+ 0x81C, 0x435C0003,
+ 0x81C, 0x425E0003,
+ 0x81C, 0x41600003,
+ 0x81C, 0x40620003,
+ 0x81C, 0x05640003,
+ 0x81C, 0x04660003,
+ 0x81C, 0x03680003,
+ 0x81C, 0x026A0003,
+ 0x81C, 0x016C0003,
+ 0x81C, 0x006E0003,
+ 0x81C, 0x00700003,
+ 0x81C, 0x00720003,
+ 0x81C, 0x00740003,
+ 0x81C, 0x00760003,
+ 0x81C, 0x00780003,
+ 0x81C, 0x007A0003,
+ 0x81C, 0x007C0003,
+ 0x81C, 0x007E0003,
+ 0xB0000000, 0x00000000,
+ 0x80000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000103,
+ 0x81C, 0xFC020103,
+ 0x81C, 0xFB040103,
+ 0x81C, 0xFA060103,
+ 0x81C, 0xF9080103,
+ 0x81C, 0xF80A0103,
+ 0x81C, 0xF70C0103,
+ 0x81C, 0xF60E0103,
+ 0x81C, 0xF5100103,
+ 0x81C, 0xF4120103,
+ 0x81C, 0xF3140103,
+ 0x81C, 0xF2160103,
+ 0x81C, 0xF1180103,
+ 0x81C, 0xF01A0103,
+ 0x81C, 0xEE1C0103,
+ 0x81C, 0xED1E0103,
+ 0x81C, 0xEC200103,
+ 0x81C, 0xEB220103,
+ 0x81C, 0xEA240103,
+ 0x81C, 0xE9260103,
+ 0x81C, 0xE8280103,
+ 0x81C, 0xE72A0103,
+ 0x81C, 0xE62C0103,
+ 0x81C, 0xE52E0103,
+ 0x81C, 0xE4300103,
+ 0x81C, 0xE3320103,
+ 0x81C, 0xE2340103,
+ 0x81C, 0xC5360103,
+ 0x81C, 0xC4380103,
+ 0x81C, 0xC33A0103,
+ 0x81C, 0xC23C0103,
+ 0x81C, 0xA53E0103,
+ 0x81C, 0xA4400103,
+ 0x81C, 0xA3420103,
+ 0x81C, 0xA2440103,
+ 0x81C, 0xA1460103,
+ 0x81C, 0x83480103,
+ 0x81C, 0x824A0103,
+ 0x81C, 0x814C0103,
+ 0x81C, 0x804E0103,
+ 0x81C, 0x63500103,
+ 0x81C, 0x62520103,
+ 0x81C, 0x61540103,
+ 0x81C, 0x43560103,
+ 0x81C, 0x42580103,
+ 0x81C, 0x415A0103,
+ 0x81C, 0x405C0103,
+ 0x81C, 0x225E0103,
+ 0x81C, 0x21600103,
+ 0x81C, 0x20620103,
+ 0x81C, 0x03640103,
+ 0x81C, 0x02660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000103,
+ 0x81C, 0xF7020103,
+ 0x81C, 0xF6040103,
+ 0x81C, 0xF5060103,
+ 0x81C, 0xF4080103,
+ 0x81C, 0xF30A0103,
+ 0x81C, 0xF20C0103,
+ 0x81C, 0xF10E0103,
+ 0x81C, 0xF0100103,
+ 0x81C, 0xEF120103,
+ 0x81C, 0xEE140103,
+ 0x81C, 0xED160103,
+ 0x81C, 0xEC180103,
+ 0x81C, 0xEB1A0103,
+ 0x81C, 0xEA1C0103,
+ 0x81C, 0xE91E0103,
+ 0x81C, 0xE8200103,
+ 0x81C, 0xE7220103,
+ 0x81C, 0xE6240103,
+ 0x81C, 0xE5260103,
+ 0x81C, 0xE4280103,
+ 0x81C, 0xE32A0103,
+ 0x81C, 0xC32C0103,
+ 0x81C, 0xC22E0103,
+ 0x81C, 0xC1300103,
+ 0x81C, 0xC0320103,
+ 0x81C, 0xA3340103,
+ 0x81C, 0xA2360103,
+ 0x81C, 0xA1380103,
+ 0x81C, 0xA03A0103,
+ 0x81C, 0x823C0103,
+ 0x81C, 0x813E0103,
+ 0x81C, 0x80400103,
+ 0x81C, 0x63420103,
+ 0x81C, 0x62440103,
+ 0x81C, 0x61460103,
+ 0x81C, 0x60480103,
+ 0x81C, 0x424A0103,
+ 0x81C, 0x414C0103,
+ 0x81C, 0x404E0103,
+ 0x81C, 0x06500103,
+ 0x81C, 0x05520103,
+ 0x81C, 0x04540103,
+ 0x81C, 0x03560103,
+ 0x81C, 0x02580103,
+ 0x81C, 0x015A0103,
+ 0x81C, 0x005C0103,
+ 0x81C, 0x005E0103,
+ 0x81C, 0x00600103,
+ 0x81C, 0x00620103,
+ 0x81C, 0x00640103,
+ 0x81C, 0x00660103,
+ 0x81C, 0x00680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000103,
+ 0x81C, 0xF7020103,
+ 0x81C, 0xF6040103,
+ 0x81C, 0xF5060103,
+ 0x81C, 0xF4080103,
+ 0x81C, 0xF30A0103,
+ 0x81C, 0xF20C0103,
+ 0x81C, 0xF10E0103,
+ 0x81C, 0xF0100103,
+ 0x81C, 0xEF120103,
+ 0x81C, 0xEE140103,
+ 0x81C, 0xED160103,
+ 0x81C, 0xEC180103,
+ 0x81C, 0xEB1A0103,
+ 0x81C, 0xEA1C0103,
+ 0x81C, 0xE91E0103,
+ 0x81C, 0xE8200103,
+ 0x81C, 0xE7220103,
+ 0x81C, 0xE6240103,
+ 0x81C, 0xE5260103,
+ 0x81C, 0xE4280103,
+ 0x81C, 0xE32A0103,
+ 0x81C, 0xC32C0103,
+ 0x81C, 0xC22E0103,
+ 0x81C, 0xC1300103,
+ 0x81C, 0xC0320103,
+ 0x81C, 0xA3340103,
+ 0x81C, 0xA2360103,
+ 0x81C, 0xA1380103,
+ 0x81C, 0xA03A0103,
+ 0x81C, 0x823C0103,
+ 0x81C, 0x813E0103,
+ 0x81C, 0x80400103,
+ 0x81C, 0x63420103,
+ 0x81C, 0x62440103,
+ 0x81C, 0x61460103,
+ 0x81C, 0x60480103,
+ 0x81C, 0x424A0103,
+ 0x81C, 0x414C0103,
+ 0x81C, 0x404E0103,
+ 0x81C, 0x22500103,
+ 0x81C, 0x21520103,
+ 0x81C, 0x20540103,
+ 0x81C, 0x03560103,
+ 0x81C, 0x02580103,
+ 0x81C, 0x015A0103,
+ 0x81C, 0x005C0103,
+ 0x81C, 0x005E0103,
+ 0x81C, 0x00600103,
+ 0x81C, 0x00620103,
+ 0x81C, 0x00640103,
+ 0x81C, 0x00660103,
+ 0x81C, 0x00680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000103,
+ 0x81C, 0xFD020103,
+ 0x81C, 0xFC040103,
+ 0x81C, 0xFB060103,
+ 0x81C, 0xFA080103,
+ 0x81C, 0xF90A0103,
+ 0x81C, 0xF80C0103,
+ 0x81C, 0xF70E0103,
+ 0x81C, 0xF6100103,
+ 0x81C, 0xF5120103,
+ 0x81C, 0xF4140103,
+ 0x81C, 0xF3160103,
+ 0x81C, 0xF2180103,
+ 0x81C, 0xF11A0103,
+ 0x81C, 0xF01C0103,
+ 0x81C, 0xEF1E0103,
+ 0x81C, 0xEE200103,
+ 0x81C, 0xED220103,
+ 0x81C, 0xEC240103,
+ 0x81C, 0xEB260103,
+ 0x81C, 0xEA280103,
+ 0x81C, 0xE92A0103,
+ 0x81C, 0xE82C0103,
+ 0x81C, 0xE72E0103,
+ 0x81C, 0xE6300103,
+ 0x81C, 0xE5320103,
+ 0x81C, 0xE4340103,
+ 0x81C, 0xE3360103,
+ 0x81C, 0xC6380103,
+ 0x81C, 0xC53A0103,
+ 0x81C, 0xC43C0103,
+ 0x81C, 0xC33E0103,
+ 0x81C, 0xA5400103,
+ 0x81C, 0xA4420103,
+ 0x81C, 0xA3440103,
+ 0x81C, 0xA2460103,
+ 0x81C, 0xA1480103,
+ 0x81C, 0xA04A0103,
+ 0x81C, 0x824C0103,
+ 0x81C, 0x814E0103,
+ 0x81C, 0x80500103,
+ 0x81C, 0x64520103,
+ 0x81C, 0x63540103,
+ 0x81C, 0x62560103,
+ 0x81C, 0x61580103,
+ 0x81C, 0x605A0103,
+ 0x81C, 0x235C0103,
+ 0x81C, 0x225E0103,
+ 0x81C, 0x21600103,
+ 0x81C, 0x20620103,
+ 0x81C, 0x03640103,
+ 0x81C, 0x02660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000103,
+ 0x81C, 0xF7020103,
+ 0x81C, 0xF6040103,
+ 0x81C, 0xF5060103,
+ 0x81C, 0xF4080103,
+ 0x81C, 0xF30A0103,
+ 0x81C, 0xF20C0103,
+ 0x81C, 0xF10E0103,
+ 0x81C, 0xF0100103,
+ 0x81C, 0xEF120103,
+ 0x81C, 0xEE140103,
+ 0x81C, 0xED160103,
+ 0x81C, 0xEC180103,
+ 0x81C, 0xEB1A0103,
+ 0x81C, 0xEA1C0103,
+ 0x81C, 0xE91E0103,
+ 0x81C, 0xE8200103,
+ 0x81C, 0xE7220103,
+ 0x81C, 0xE6240103,
+ 0x81C, 0xE5260103,
+ 0x81C, 0xE4280103,
+ 0x81C, 0xE32A0103,
+ 0x81C, 0xC32C0103,
+ 0x81C, 0xC22E0103,
+ 0x81C, 0xC1300103,
+ 0x81C, 0xC0320103,
+ 0x81C, 0xA3340103,
+ 0x81C, 0xA2360103,
+ 0x81C, 0xA1380103,
+ 0x81C, 0xA03A0103,
+ 0x81C, 0x823C0103,
+ 0x81C, 0x813E0103,
+ 0x81C, 0x80400103,
+ 0x81C, 0x63420103,
+ 0x81C, 0x62440103,
+ 0x81C, 0x61460103,
+ 0x81C, 0x60480103,
+ 0x81C, 0x424A0103,
+ 0x81C, 0x414C0103,
+ 0x81C, 0x404E0103,
+ 0x81C, 0x22500103,
+ 0x81C, 0x21520103,
+ 0x81C, 0x20540103,
+ 0x81C, 0x03560103,
+ 0x81C, 0x02580103,
+ 0x81C, 0x015A0103,
+ 0x81C, 0x005C0103,
+ 0x81C, 0x005E0103,
+ 0x81C, 0x00600103,
+ 0x81C, 0x00620103,
+ 0x81C, 0x00640103,
+ 0x81C, 0x00660103,
+ 0x81C, 0x00680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000103,
+ 0x81C, 0xFC020103,
+ 0x81C, 0xFB040103,
+ 0x81C, 0xFA060103,
+ 0x81C, 0xF9080103,
+ 0x81C, 0xF80A0103,
+ 0x81C, 0xF70C0103,
+ 0x81C, 0xF60E0103,
+ 0x81C, 0xF5100103,
+ 0x81C, 0xF4120103,
+ 0x81C, 0xF3140103,
+ 0x81C, 0xF2160103,
+ 0x81C, 0xF1180103,
+ 0x81C, 0xF01A0103,
+ 0x81C, 0xEF1C0103,
+ 0x81C, 0xEE1E0103,
+ 0x81C, 0xED200103,
+ 0x81C, 0xEC220103,
+ 0x81C, 0xEB240103,
+ 0x81C, 0xEA260103,
+ 0x81C, 0xE9280103,
+ 0x81C, 0xE82A0103,
+ 0x81C, 0xE72C0103,
+ 0x81C, 0xE62E0103,
+ 0x81C, 0xE5300103,
+ 0x81C, 0xE4320103,
+ 0x81C, 0xE3340103,
+ 0x81C, 0xE2360103,
+ 0x81C, 0xC5380103,
+ 0x81C, 0xC43A0103,
+ 0x81C, 0xC33C0103,
+ 0x81C, 0xC23E0103,
+ 0x81C, 0xA5400103,
+ 0x81C, 0xA4420103,
+ 0x81C, 0xA3440103,
+ 0x81C, 0xA2460103,
+ 0x81C, 0xA1480103,
+ 0x81C, 0x834A0103,
+ 0x81C, 0x824C0103,
+ 0x81C, 0x814E0103,
+ 0x81C, 0x64500103,
+ 0x81C, 0x63520103,
+ 0x81C, 0x62540103,
+ 0x81C, 0x61560103,
+ 0x81C, 0x42580103,
+ 0x81C, 0x415A0103,
+ 0x81C, 0x405C0103,
+ 0x81C, 0x065E0103,
+ 0x81C, 0x05600103,
+ 0x81C, 0x04620103,
+ 0x81C, 0x03640103,
+ 0x81C, 0x02660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFA000103,
+ 0x81C, 0xF9020103,
+ 0x81C, 0xF8040103,
+ 0x81C, 0xF7060103,
+ 0x81C, 0xF6080103,
+ 0x81C, 0xF50A0103,
+ 0x81C, 0xF40C0103,
+ 0x81C, 0xF30E0103,
+ 0x81C, 0xF2100103,
+ 0x81C, 0xF1120103,
+ 0x81C, 0xF0140103,
+ 0x81C, 0xEF160103,
+ 0x81C, 0xEE180103,
+ 0x81C, 0xED1A0103,
+ 0x81C, 0xEC1C0103,
+ 0x81C, 0xEB1E0103,
+ 0x81C, 0xEA200103,
+ 0x81C, 0xE9220103,
+ 0x81C, 0xE8240103,
+ 0x81C, 0xE7260103,
+ 0x81C, 0xE6280103,
+ 0x81C, 0xE52A0103,
+ 0x81C, 0xC42C0103,
+ 0x81C, 0xC32E0103,
+ 0x81C, 0xC2300103,
+ 0x81C, 0xC1320103,
+ 0x81C, 0xA4340103,
+ 0x81C, 0xA3360103,
+ 0x81C, 0xA2380103,
+ 0x81C, 0xA13A0103,
+ 0x81C, 0x833C0103,
+ 0x81C, 0x823E0103,
+ 0x81C, 0x81400103,
+ 0x81C, 0x63420103,
+ 0x81C, 0x62440103,
+ 0x81C, 0x61460103,
+ 0x81C, 0x60480103,
+ 0x81C, 0x424A0103,
+ 0x81C, 0x414C0103,
+ 0x81C, 0x404E0103,
+ 0x81C, 0x22500103,
+ 0x81C, 0x21520103,
+ 0x81C, 0x20540103,
+ 0x81C, 0x03560103,
+ 0x81C, 0x02580103,
+ 0x81C, 0x015A0103,
+ 0x81C, 0x005C0103,
+ 0x81C, 0x005E0103,
+ 0x81C, 0x00600103,
+ 0x81C, 0x00620103,
+ 0x81C, 0x00640103,
+ 0x81C, 0x00660103,
+ 0x81C, 0x00680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000103,
+ 0x81C, 0xF7020103,
+ 0x81C, 0xF6040103,
+ 0x81C, 0xF5060103,
+ 0x81C, 0xF4080103,
+ 0x81C, 0xF30A0103,
+ 0x81C, 0xF20C0103,
+ 0x81C, 0xF10E0103,
+ 0x81C, 0xF0100103,
+ 0x81C, 0xEF120103,
+ 0x81C, 0xEE140103,
+ 0x81C, 0xED160103,
+ 0x81C, 0xEC180103,
+ 0x81C, 0xEB1A0103,
+ 0x81C, 0xEA1C0103,
+ 0x81C, 0xE91E0103,
+ 0x81C, 0xE8200103,
+ 0x81C, 0xE7220103,
+ 0x81C, 0xE6240103,
+ 0x81C, 0xE5260103,
+ 0x81C, 0xE4280103,
+ 0x81C, 0xE32A0103,
+ 0x81C, 0xE22C0103,
+ 0x81C, 0xC32E0103,
+ 0x81C, 0xC2300103,
+ 0x81C, 0xC1320103,
+ 0x81C, 0xA3340103,
+ 0x81C, 0xA2360103,
+ 0x81C, 0xA1380103,
+ 0x81C, 0xA03A0103,
+ 0x81C, 0x823C0103,
+ 0x81C, 0x813E0103,
+ 0x81C, 0x80400103,
+ 0x81C, 0x64420103,
+ 0x81C, 0x63440103,
+ 0x81C, 0x62460103,
+ 0x81C, 0x61480103,
+ 0x81C, 0x434A0103,
+ 0x81C, 0x424C0103,
+ 0x81C, 0x414E0103,
+ 0x81C, 0x40500103,
+ 0x81C, 0x22520103,
+ 0x81C, 0x21540103,
+ 0x81C, 0x20560103,
+ 0x81C, 0x04580103,
+ 0x81C, 0x035A0103,
+ 0x81C, 0x025C0103,
+ 0x81C, 0x015E0103,
+ 0x81C, 0x00600103,
+ 0x81C, 0x00620103,
+ 0x81C, 0x00640103,
+ 0x81C, 0x00660103,
+ 0x81C, 0x00680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000103,
+ 0x81C, 0xFC020103,
+ 0x81C, 0xFB040103,
+ 0x81C, 0xFA060103,
+ 0x81C, 0xF9080103,
+ 0x81C, 0xF80A0103,
+ 0x81C, 0xF70C0103,
+ 0x81C, 0xF60E0103,
+ 0x81C, 0xF5100103,
+ 0x81C, 0xF4120103,
+ 0x81C, 0xF3140103,
+ 0x81C, 0xF2160103,
+ 0x81C, 0xF1180103,
+ 0x81C, 0xF01A0103,
+ 0x81C, 0xEF1C0103,
+ 0x81C, 0xEE1E0103,
+ 0x81C, 0xED200103,
+ 0x81C, 0xEC220103,
+ 0x81C, 0xEB240103,
+ 0x81C, 0xEA260103,
+ 0x81C, 0xE9280103,
+ 0x81C, 0xE82A0103,
+ 0x81C, 0xE72C0103,
+ 0x81C, 0xE62E0103,
+ 0x81C, 0xE5300103,
+ 0x81C, 0xE4320103,
+ 0x81C, 0xE3340103,
+ 0x81C, 0xC6360103,
+ 0x81C, 0xC5380103,
+ 0x81C, 0xC43A0103,
+ 0x81C, 0xC33C0103,
+ 0x81C, 0xC23E0103,
+ 0x81C, 0xA5400103,
+ 0x81C, 0xA4420103,
+ 0x81C, 0xA3440103,
+ 0x81C, 0xA2460103,
+ 0x81C, 0xA1480103,
+ 0x81C, 0x834A0103,
+ 0x81C, 0x824C0103,
+ 0x81C, 0x814E0103,
+ 0x81C, 0x63500103,
+ 0x81C, 0x62520103,
+ 0x81C, 0x61540103,
+ 0x81C, 0x43560103,
+ 0x81C, 0x42580103,
+ 0x81C, 0x245A0103,
+ 0x81C, 0x235C0103,
+ 0x81C, 0x225E0103,
+ 0x81C, 0x21600103,
+ 0x81C, 0x04620103,
+ 0x81C, 0x03640103,
+ 0x81C, 0x02660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000103,
+ 0x81C, 0xF7020103,
+ 0x81C, 0xF6040103,
+ 0x81C, 0xF5060103,
+ 0x81C, 0xF4080103,
+ 0x81C, 0xF30A0103,
+ 0x81C, 0xF20C0103,
+ 0x81C, 0xF10E0103,
+ 0x81C, 0xF0100103,
+ 0x81C, 0xEF120103,
+ 0x81C, 0xEE140103,
+ 0x81C, 0xED160103,
+ 0x81C, 0xEC180103,
+ 0x81C, 0xEB1A0103,
+ 0x81C, 0xEA1C0103,
+ 0x81C, 0xE91E0103,
+ 0x81C, 0xE8200103,
+ 0x81C, 0xE7220103,
+ 0x81C, 0xE6240103,
+ 0x81C, 0xE5260103,
+ 0x81C, 0xE4280103,
+ 0x81C, 0xE32A0103,
+ 0x81C, 0xE22C0103,
+ 0x81C, 0xC32E0103,
+ 0x81C, 0xC2300103,
+ 0x81C, 0xC1320103,
+ 0x81C, 0xA3340103,
+ 0x81C, 0xA2360103,
+ 0x81C, 0xA1380103,
+ 0x81C, 0xA03A0103,
+ 0x81C, 0x823C0103,
+ 0x81C, 0x813E0103,
+ 0x81C, 0x80400103,
+ 0x81C, 0x64420103,
+ 0x81C, 0x63440103,
+ 0x81C, 0x62460103,
+ 0x81C, 0x61480103,
+ 0x81C, 0x434A0103,
+ 0x81C, 0x424C0103,
+ 0x81C, 0x414E0103,
+ 0x81C, 0x40500103,
+ 0x81C, 0x22520103,
+ 0x81C, 0x21540103,
+ 0x81C, 0x20560103,
+ 0x81C, 0x04580103,
+ 0x81C, 0x035A0103,
+ 0x81C, 0x025C0103,
+ 0x81C, 0x015E0103,
+ 0x81C, 0x00600103,
+ 0x81C, 0x00620103,
+ 0x81C, 0x00640103,
+ 0x81C, 0x00660103,
+ 0x81C, 0x00680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000103,
+ 0x81C, 0xFC020103,
+ 0x81C, 0xFB040103,
+ 0x81C, 0xFA060103,
+ 0x81C, 0xF9080103,
+ 0x81C, 0xF80A0103,
+ 0x81C, 0xF70C0103,
+ 0x81C, 0xF60E0103,
+ 0x81C, 0xF5100103,
+ 0x81C, 0xF4120103,
+ 0x81C, 0xF3140103,
+ 0x81C, 0xF2160103,
+ 0x81C, 0xF1180103,
+ 0x81C, 0xF01A0103,
+ 0x81C, 0xEE1C0103,
+ 0x81C, 0xED1E0103,
+ 0x81C, 0xEC200103,
+ 0x81C, 0xEB220103,
+ 0x81C, 0xEA240103,
+ 0x81C, 0xE9260103,
+ 0x81C, 0xE8280103,
+ 0x81C, 0xE72A0103,
+ 0x81C, 0xE62C0103,
+ 0x81C, 0xE52E0103,
+ 0x81C, 0xE4300103,
+ 0x81C, 0xE3320103,
+ 0x81C, 0xE2340103,
+ 0x81C, 0xC5360103,
+ 0x81C, 0xC4380103,
+ 0x81C, 0xC33A0103,
+ 0x81C, 0xC23C0103,
+ 0x81C, 0xA53E0103,
+ 0x81C, 0xA4400103,
+ 0x81C, 0xA3420103,
+ 0x81C, 0xA2440103,
+ 0x81C, 0xA1460103,
+ 0x81C, 0x83480103,
+ 0x81C, 0x824A0103,
+ 0x81C, 0x814C0103,
+ 0x81C, 0x804E0103,
+ 0x81C, 0x63500103,
+ 0x81C, 0x62520103,
+ 0x81C, 0x61540103,
+ 0x81C, 0x43560103,
+ 0x81C, 0x42580103,
+ 0x81C, 0x415A0103,
+ 0x81C, 0x405C0103,
+ 0x81C, 0x225E0103,
+ 0x81C, 0x21600103,
+ 0x81C, 0x20620103,
+ 0x81C, 0x03640103,
+ 0x81C, 0x02660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000103,
+ 0x81C, 0xF8020103,
+ 0x81C, 0xF7040103,
+ 0x81C, 0xF6060103,
+ 0x81C, 0xF5080103,
+ 0x81C, 0xF40A0103,
+ 0x81C, 0xF30C0103,
+ 0x81C, 0xF20E0103,
+ 0x81C, 0xF1100103,
+ 0x81C, 0xF0120103,
+ 0x81C, 0xEF140103,
+ 0x81C, 0xEE160103,
+ 0x81C, 0xED180103,
+ 0x81C, 0xEC1A0103,
+ 0x81C, 0xEB1C0103,
+ 0x81C, 0xEA1E0103,
+ 0x81C, 0xE9200103,
+ 0x81C, 0xE8220103,
+ 0x81C, 0xE7240103,
+ 0x81C, 0xE6260103,
+ 0x81C, 0xE5280103,
+ 0x81C, 0xE42A0103,
+ 0x81C, 0xE32C0103,
+ 0x81C, 0xC32E0103,
+ 0x81C, 0xC2300103,
+ 0x81C, 0xC1320103,
+ 0x81C, 0xA4340103,
+ 0x81C, 0xA3360103,
+ 0x81C, 0xA2380103,
+ 0x81C, 0xA13A0103,
+ 0x81C, 0xA03C0103,
+ 0x81C, 0x823E0103,
+ 0x81C, 0x81400103,
+ 0x81C, 0x80420103,
+ 0x81C, 0x63440103,
+ 0x81C, 0x62460103,
+ 0x81C, 0x61480103,
+ 0x81C, 0x604A0103,
+ 0x81C, 0x244C0103,
+ 0x81C, 0x234E0103,
+ 0x81C, 0x22500103,
+ 0x81C, 0x21520103,
+ 0x81C, 0x20540103,
+ 0x81C, 0x05560103,
+ 0x81C, 0x04580103,
+ 0x81C, 0x035A0103,
+ 0x81C, 0x025C0103,
+ 0x81C, 0x015E0103,
+ 0x81C, 0x00600103,
+ 0x81C, 0x00620103,
+ 0x81C, 0x00640103,
+ 0x81C, 0x00660103,
+ 0x81C, 0x00680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x9000000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000103,
+ 0x81C, 0xFD020103,
+ 0x81C, 0xFC040103,
+ 0x81C, 0xFB060103,
+ 0x81C, 0xFA080103,
+ 0x81C, 0xF90A0103,
+ 0x81C, 0xF80C0103,
+ 0x81C, 0xF70E0103,
+ 0x81C, 0xF6100103,
+ 0x81C, 0xF5120103,
+ 0x81C, 0xF4140103,
+ 0x81C, 0xF3160103,
+ 0x81C, 0xF2180103,
+ 0x81C, 0xF11A0103,
+ 0x81C, 0xF01C0103,
+ 0x81C, 0xEF1E0103,
+ 0x81C, 0xEE200103,
+ 0x81C, 0xED220103,
+ 0x81C, 0xEC240103,
+ 0x81C, 0xEB260103,
+ 0x81C, 0xEA280103,
+ 0x81C, 0xE92A0103,
+ 0x81C, 0xE82C0103,
+ 0x81C, 0xE72E0103,
+ 0x81C, 0xE6300103,
+ 0x81C, 0xE5320103,
+ 0x81C, 0xE4340103,
+ 0x81C, 0xE3360103,
+ 0x81C, 0xC6380103,
+ 0x81C, 0xC53A0103,
+ 0x81C, 0xC43C0103,
+ 0x81C, 0xC33E0103,
+ 0x81C, 0xA5400103,
+ 0x81C, 0xA4420103,
+ 0x81C, 0xA3440103,
+ 0x81C, 0xA2460103,
+ 0x81C, 0xA1480103,
+ 0x81C, 0xA04A0103,
+ 0x81C, 0x824C0103,
+ 0x81C, 0x814E0103,
+ 0x81C, 0x80500103,
+ 0x81C, 0x64520103,
+ 0x81C, 0x63540103,
+ 0x81C, 0x62560103,
+ 0x81C, 0x61580103,
+ 0x81C, 0x605A0103,
+ 0x81C, 0x235C0103,
+ 0x81C, 0x225E0103,
+ 0x81C, 0x21600103,
+ 0x81C, 0x20620103,
+ 0x81C, 0x03640103,
+ 0x81C, 0x02660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x9000000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000103,
+ 0x81C, 0xFB020103,
+ 0x81C, 0xFA040103,
+ 0x81C, 0xF9060103,
+ 0x81C, 0xF8080103,
+ 0x81C, 0xF70A0103,
+ 0x81C, 0xF60C0103,
+ 0x81C, 0xF50E0103,
+ 0x81C, 0xF4100103,
+ 0x81C, 0xF3120103,
+ 0x81C, 0xF2140103,
+ 0x81C, 0xF1160103,
+ 0x81C, 0xF0180103,
+ 0x81C, 0xEE1A0103,
+ 0x81C, 0xED1C0103,
+ 0x81C, 0xEC1E0103,
+ 0x81C, 0xEB200103,
+ 0x81C, 0xEA220103,
+ 0x81C, 0xE9240103,
+ 0x81C, 0xE8260103,
+ 0x81C, 0xE7280103,
+ 0x81C, 0xE62A0103,
+ 0x81C, 0xE52C0103,
+ 0x81C, 0xE42E0103,
+ 0x81C, 0xE3300103,
+ 0x81C, 0xE2320103,
+ 0x81C, 0xE1340103,
+ 0x81C, 0xC5360103,
+ 0x81C, 0xC4380103,
+ 0x81C, 0xC33A0103,
+ 0x81C, 0xC23C0103,
+ 0x81C, 0xA53E0103,
+ 0x81C, 0xA4400103,
+ 0x81C, 0xA3420103,
+ 0x81C, 0xA2440103,
+ 0x81C, 0xA1460103,
+ 0x81C, 0x83480103,
+ 0x81C, 0x824A0103,
+ 0x81C, 0x814C0103,
+ 0x81C, 0x804E0103,
+ 0x81C, 0x63500103,
+ 0x81C, 0x62520103,
+ 0x81C, 0x61540103,
+ 0x81C, 0x43560103,
+ 0x81C, 0x42580103,
+ 0x81C, 0x415A0103,
+ 0x81C, 0x405C0103,
+ 0x81C, 0x225E0103,
+ 0x81C, 0x21600103,
+ 0x81C, 0x20620103,
+ 0x81C, 0x03640103,
+ 0x81C, 0x02660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x9000000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000103,
+ 0x81C, 0xFC020103,
+ 0x81C, 0xFB040103,
+ 0x81C, 0xFA060103,
+ 0x81C, 0xF9080103,
+ 0x81C, 0xF80A0103,
+ 0x81C, 0xF70C0103,
+ 0x81C, 0xF60E0103,
+ 0x81C, 0xF5100103,
+ 0x81C, 0xF4120103,
+ 0x81C, 0xF3140103,
+ 0x81C, 0xF2160103,
+ 0x81C, 0xF1180103,
+ 0x81C, 0xF01A0103,
+ 0x81C, 0xEE1C0103,
+ 0x81C, 0xED1E0103,
+ 0x81C, 0xEC200103,
+ 0x81C, 0xEB220103,
+ 0x81C, 0xEA240103,
+ 0x81C, 0xE9260103,
+ 0x81C, 0xE8280103,
+ 0x81C, 0xE72A0103,
+ 0x81C, 0xE62C0103,
+ 0x81C, 0xE52E0103,
+ 0x81C, 0xE4300103,
+ 0x81C, 0xE3320103,
+ 0x81C, 0xE2340103,
+ 0x81C, 0xC5360103,
+ 0x81C, 0xC4380103,
+ 0x81C, 0xC33A0103,
+ 0x81C, 0xC23C0103,
+ 0x81C, 0xA53E0103,
+ 0x81C, 0xA4400103,
+ 0x81C, 0xA3420103,
+ 0x81C, 0xA2440103,
+ 0x81C, 0xA1460103,
+ 0x81C, 0x83480103,
+ 0x81C, 0x824A0103,
+ 0x81C, 0x814C0103,
+ 0x81C, 0x804E0103,
+ 0x81C, 0x63500103,
+ 0x81C, 0x62520103,
+ 0x81C, 0x61540103,
+ 0x81C, 0x43560103,
+ 0x81C, 0x42580103,
+ 0x81C, 0x415A0103,
+ 0x81C, 0x405C0103,
+ 0x81C, 0x225E0103,
+ 0x81C, 0x21600103,
+ 0x81C, 0x20620103,
+ 0x81C, 0x03640103,
+ 0x81C, 0x02660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x9000000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000103,
+ 0x81C, 0xFB020103,
+ 0x81C, 0xFA040103,
+ 0x81C, 0xF9060103,
+ 0x81C, 0xF8080103,
+ 0x81C, 0xF70A0103,
+ 0x81C, 0xF60C0103,
+ 0x81C, 0xF50E0103,
+ 0x81C, 0xF4100103,
+ 0x81C, 0xF3120103,
+ 0x81C, 0xF2140103,
+ 0x81C, 0xF1160103,
+ 0x81C, 0xF0180103,
+ 0x81C, 0xEF1A0103,
+ 0x81C, 0xEE1C0103,
+ 0x81C, 0xED1E0103,
+ 0x81C, 0xEC200103,
+ 0x81C, 0xEB220103,
+ 0x81C, 0xEA240103,
+ 0x81C, 0xE9260103,
+ 0x81C, 0xE8280103,
+ 0x81C, 0xE72A0103,
+ 0x81C, 0xE62C0103,
+ 0x81C, 0xE52E0103,
+ 0x81C, 0xE4300103,
+ 0x81C, 0xE3320103,
+ 0x81C, 0xE2340103,
+ 0x81C, 0xE1360103,
+ 0x81C, 0xC3380103,
+ 0x81C, 0xC23A0103,
+ 0x81C, 0xC13C0103,
+ 0x81C, 0xC03E0103,
+ 0x81C, 0xA4400103,
+ 0x81C, 0xA3420103,
+ 0x81C, 0xA2440103,
+ 0x81C, 0xA1460103,
+ 0x81C, 0x82480103,
+ 0x81C, 0x814A0103,
+ 0x81C, 0x804C0103,
+ 0x81C, 0x634E0103,
+ 0x81C, 0x62500103,
+ 0x81C, 0x61520103,
+ 0x81C, 0x42540103,
+ 0x81C, 0x41560103,
+ 0x81C, 0x24580103,
+ 0x81C, 0x235A0103,
+ 0x81C, 0x225C0103,
+ 0x81C, 0x215E0103,
+ 0x81C, 0x20600103,
+ 0x81C, 0x03620103,
+ 0x81C, 0x02640103,
+ 0x81C, 0x01660103,
+ 0x81C, 0x00680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x90000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000103,
+ 0x81C, 0xFD020103,
+ 0x81C, 0xFC040103,
+ 0x81C, 0xFB060103,
+ 0x81C, 0xFA080103,
+ 0x81C, 0xF90A0103,
+ 0x81C, 0xF80C0103,
+ 0x81C, 0xF70E0103,
+ 0x81C, 0xF6100103,
+ 0x81C, 0xF5120103,
+ 0x81C, 0xF4140103,
+ 0x81C, 0xF3160103,
+ 0x81C, 0xF2180103,
+ 0x81C, 0xF11A0103,
+ 0x81C, 0xF01C0103,
+ 0x81C, 0xEF1E0103,
+ 0x81C, 0xEE200103,
+ 0x81C, 0xED220103,
+ 0x81C, 0xEC240103,
+ 0x81C, 0xEB260103,
+ 0x81C, 0xEA280103,
+ 0x81C, 0xE92A0103,
+ 0x81C, 0xE82C0103,
+ 0x81C, 0xE72E0103,
+ 0x81C, 0xE6300103,
+ 0x81C, 0xE5320103,
+ 0x81C, 0xE4340103,
+ 0x81C, 0xE3360103,
+ 0x81C, 0xC6380103,
+ 0x81C, 0xC53A0103,
+ 0x81C, 0xC43C0103,
+ 0x81C, 0xC33E0103,
+ 0x81C, 0xA5400103,
+ 0x81C, 0xA4420103,
+ 0x81C, 0xA3440103,
+ 0x81C, 0xA2460103,
+ 0x81C, 0xA1480103,
+ 0x81C, 0xA04A0103,
+ 0x81C, 0x824C0103,
+ 0x81C, 0x814E0103,
+ 0x81C, 0x80500103,
+ 0x81C, 0x64520103,
+ 0x81C, 0x63540103,
+ 0x81C, 0x62560103,
+ 0x81C, 0x61580103,
+ 0x81C, 0x605A0103,
+ 0x81C, 0x235C0103,
+ 0x81C, 0x225E0103,
+ 0x81C, 0x21600103,
+ 0x81C, 0x20620103,
+ 0x81C, 0x03640103,
+ 0x81C, 0x02660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0x90000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000103,
+ 0x81C, 0xF7020103,
+ 0x81C, 0xF6040103,
+ 0x81C, 0xF5060103,
+ 0x81C, 0xF4080103,
+ 0x81C, 0xF30A0103,
+ 0x81C, 0xF20C0103,
+ 0x81C, 0xF10E0103,
+ 0x81C, 0xF0100103,
+ 0x81C, 0xEF120103,
+ 0x81C, 0xEE140103,
+ 0x81C, 0xED160103,
+ 0x81C, 0xEC180103,
+ 0x81C, 0xEB1A0103,
+ 0x81C, 0xEA1C0103,
+ 0x81C, 0xE91E0103,
+ 0x81C, 0xE8200103,
+ 0x81C, 0xE7220103,
+ 0x81C, 0xE6240103,
+ 0x81C, 0xE5260103,
+ 0x81C, 0xE4280103,
+ 0x81C, 0xE32A0103,
+ 0x81C, 0xC32C0103,
+ 0x81C, 0xC22E0103,
+ 0x81C, 0xC1300103,
+ 0x81C, 0xC0320103,
+ 0x81C, 0xA3340103,
+ 0x81C, 0xA2360103,
+ 0x81C, 0xA1380103,
+ 0x81C, 0xA03A0103,
+ 0x81C, 0x823C0103,
+ 0x81C, 0x813E0103,
+ 0x81C, 0x80400103,
+ 0x81C, 0x63420103,
+ 0x81C, 0x62440103,
+ 0x81C, 0x61460103,
+ 0x81C, 0x60480103,
+ 0x81C, 0x424A0103,
+ 0x81C, 0x414C0103,
+ 0x81C, 0x404E0103,
+ 0x81C, 0x22500103,
+ 0x81C, 0x21520103,
+ 0x81C, 0x20540103,
+ 0x81C, 0x03560103,
+ 0x81C, 0x02580103,
+ 0x81C, 0x015A0103,
+ 0x81C, 0x005C0103,
+ 0x81C, 0x005E0103,
+ 0x81C, 0x00600103,
+ 0x81C, 0x00620103,
+ 0x81C, 0x00640103,
+ 0x81C, 0x00660103,
+ 0x81C, 0x00680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFE000103,
+ 0x81C, 0xFD020103,
+ 0x81C, 0xFC040103,
+ 0x81C, 0xFB060103,
+ 0x81C, 0xFA080103,
+ 0x81C, 0xF90A0103,
+ 0x81C, 0xF80C0103,
+ 0x81C, 0xF70E0103,
+ 0x81C, 0xF6100103,
+ 0x81C, 0xF5120103,
+ 0x81C, 0xF4140103,
+ 0x81C, 0xF3160103,
+ 0x81C, 0xF2180103,
+ 0x81C, 0xF11A0103,
+ 0x81C, 0xF01C0103,
+ 0x81C, 0xEF1E0103,
+ 0x81C, 0xEE200103,
+ 0x81C, 0xED220103,
+ 0x81C, 0xEC240103,
+ 0x81C, 0xEB260103,
+ 0x81C, 0xEA280103,
+ 0x81C, 0xE92A0103,
+ 0x81C, 0xE82C0103,
+ 0x81C, 0xE72E0103,
+ 0x81C, 0xE6300103,
+ 0x81C, 0xE5320103,
+ 0x81C, 0xE4340103,
+ 0x81C, 0xE3360103,
+ 0x81C, 0xC6380103,
+ 0x81C, 0xC53A0103,
+ 0x81C, 0xC43C0103,
+ 0x81C, 0xC33E0103,
+ 0x81C, 0xA5400103,
+ 0x81C, 0xA4420103,
+ 0x81C, 0xA3440103,
+ 0x81C, 0xA2460103,
+ 0x81C, 0xA1480103,
+ 0x81C, 0xA04A0103,
+ 0x81C, 0x824C0103,
+ 0x81C, 0x814E0103,
+ 0x81C, 0x80500103,
+ 0x81C, 0x64520103,
+ 0x81C, 0x63540103,
+ 0x81C, 0x62560103,
+ 0x81C, 0x61580103,
+ 0x81C, 0x605A0103,
+ 0x81C, 0x235C0103,
+ 0x81C, 0x225E0103,
+ 0x81C, 0x21600103,
+ 0x81C, 0x20620103,
+ 0x81C, 0x03640103,
+ 0x81C, 0x02660103,
+ 0x81C, 0x01680103,
+ 0x81C, 0x006A0103,
+ 0x81C, 0x006C0103,
+ 0x81C, 0x006E0103,
+ 0x81C, 0x00700103,
+ 0x81C, 0x00720103,
+ 0x81C, 0x00740103,
+ 0x81C, 0x00760103,
+ 0x81C, 0x00780103,
+ 0x81C, 0x007A0103,
+ 0x81C, 0x007C0103,
+ 0x81C, 0x007E0103,
+ 0xB0000000, 0x00000000,
+ 0x80000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000203,
+ 0x81C, 0xFB020203,
+ 0x81C, 0xFA040203,
+ 0x81C, 0xF9060203,
+ 0x81C, 0xF8080203,
+ 0x81C, 0xF70A0203,
+ 0x81C, 0xF60C0203,
+ 0x81C, 0xF50E0203,
+ 0x81C, 0xF4100203,
+ 0x81C, 0xF3120203,
+ 0x81C, 0xF2140203,
+ 0x81C, 0xF1160203,
+ 0x81C, 0xF0180203,
+ 0x81C, 0xEE1A0203,
+ 0x81C, 0xED1C0203,
+ 0x81C, 0xEC1E0203,
+ 0x81C, 0xEB200203,
+ 0x81C, 0xEA220203,
+ 0x81C, 0xE9240203,
+ 0x81C, 0xE8260203,
+ 0x81C, 0xE7280203,
+ 0x81C, 0xE62A0203,
+ 0x81C, 0xE52C0203,
+ 0x81C, 0xE42E0203,
+ 0x81C, 0xE3300203,
+ 0x81C, 0xE2320203,
+ 0x81C, 0xC6340203,
+ 0x81C, 0xC5360203,
+ 0x81C, 0xC4380203,
+ 0x81C, 0xC33A0203,
+ 0x81C, 0xA63C0203,
+ 0x81C, 0xA53E0203,
+ 0x81C, 0xA4400203,
+ 0x81C, 0xA3420203,
+ 0x81C, 0xA2440203,
+ 0x81C, 0xA1460203,
+ 0x81C, 0x83480203,
+ 0x81C, 0x824A0203,
+ 0x81C, 0x814C0203,
+ 0x81C, 0x804E0203,
+ 0x81C, 0x63500203,
+ 0x81C, 0x62520203,
+ 0x81C, 0x61540203,
+ 0x81C, 0x42560203,
+ 0x81C, 0x41580203,
+ 0x81C, 0x405A0203,
+ 0x81C, 0x225C0203,
+ 0x81C, 0x215E0203,
+ 0x81C, 0x20600203,
+ 0x81C, 0x04620203,
+ 0x81C, 0x03640203,
+ 0x81C, 0x02660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000203,
+ 0x81C, 0xF6020203,
+ 0x81C, 0xF5040203,
+ 0x81C, 0xF4060203,
+ 0x81C, 0xF3080203,
+ 0x81C, 0xF20A0203,
+ 0x81C, 0xF10C0203,
+ 0x81C, 0xF00E0203,
+ 0x81C, 0xEF100203,
+ 0x81C, 0xEE120203,
+ 0x81C, 0xED140203,
+ 0x81C, 0xEC160203,
+ 0x81C, 0xEB180203,
+ 0x81C, 0xEA1A0203,
+ 0x81C, 0xE91C0203,
+ 0x81C, 0xE81E0203,
+ 0x81C, 0xE7200203,
+ 0x81C, 0xE6220203,
+ 0x81C, 0xE5240203,
+ 0x81C, 0xE4260203,
+ 0x81C, 0xE3280203,
+ 0x81C, 0xC42A0203,
+ 0x81C, 0xC32C0203,
+ 0x81C, 0xC22E0203,
+ 0x81C, 0xC1300203,
+ 0x81C, 0xC0320203,
+ 0x81C, 0xA3340203,
+ 0x81C, 0xA2360203,
+ 0x81C, 0xA1380203,
+ 0x81C, 0xA03A0203,
+ 0x81C, 0x823C0203,
+ 0x81C, 0x813E0203,
+ 0x81C, 0x80400203,
+ 0x81C, 0x63420203,
+ 0x81C, 0x62440203,
+ 0x81C, 0x61460203,
+ 0x81C, 0x60480203,
+ 0x81C, 0x424A0203,
+ 0x81C, 0x414C0203,
+ 0x81C, 0x404E0203,
+ 0x81C, 0x06500203,
+ 0x81C, 0x05520203,
+ 0x81C, 0x04540203,
+ 0x81C, 0x03560203,
+ 0x81C, 0x02580203,
+ 0x81C, 0x015A0203,
+ 0x81C, 0x005C0203,
+ 0x81C, 0x005E0203,
+ 0x81C, 0x00600203,
+ 0x81C, 0x00620203,
+ 0x81C, 0x00640203,
+ 0x81C, 0x00660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000203,
+ 0x81C, 0xF6020203,
+ 0x81C, 0xF5040203,
+ 0x81C, 0xF4060203,
+ 0x81C, 0xF3080203,
+ 0x81C, 0xF20A0203,
+ 0x81C, 0xF10C0203,
+ 0x81C, 0xF00E0203,
+ 0x81C, 0xEF100203,
+ 0x81C, 0xEE120203,
+ 0x81C, 0xED140203,
+ 0x81C, 0xEC160203,
+ 0x81C, 0xEB180203,
+ 0x81C, 0xEA1A0203,
+ 0x81C, 0xE91C0203,
+ 0x81C, 0xE81E0203,
+ 0x81C, 0xE7200203,
+ 0x81C, 0xE6220203,
+ 0x81C, 0xE5240203,
+ 0x81C, 0xE4260203,
+ 0x81C, 0xE3280203,
+ 0x81C, 0xC42A0203,
+ 0x81C, 0xC32C0203,
+ 0x81C, 0xC22E0203,
+ 0x81C, 0xC1300203,
+ 0x81C, 0xC0320203,
+ 0x81C, 0xA3340203,
+ 0x81C, 0xA2360203,
+ 0x81C, 0xA1380203,
+ 0x81C, 0xA03A0203,
+ 0x81C, 0x823C0203,
+ 0x81C, 0x813E0203,
+ 0x81C, 0x80400203,
+ 0x81C, 0x64420203,
+ 0x81C, 0x63440203,
+ 0x81C, 0x62460203,
+ 0x81C, 0x61480203,
+ 0x81C, 0x604A0203,
+ 0x81C, 0x414C0203,
+ 0x81C, 0x404E0203,
+ 0x81C, 0x22500203,
+ 0x81C, 0x21520203,
+ 0x81C, 0x20540203,
+ 0x81C, 0x03560203,
+ 0x81C, 0x02580203,
+ 0x81C, 0x015A0203,
+ 0x81C, 0x005C0203,
+ 0x81C, 0x005E0203,
+ 0x81C, 0x00600203,
+ 0x81C, 0x00620203,
+ 0x81C, 0x00640203,
+ 0x81C, 0x00660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000203,
+ 0x81C, 0xFB020203,
+ 0x81C, 0xFA040203,
+ 0x81C, 0xF9060203,
+ 0x81C, 0xF8080203,
+ 0x81C, 0xF70A0203,
+ 0x81C, 0xF60C0203,
+ 0x81C, 0xF50E0203,
+ 0x81C, 0xF4100203,
+ 0x81C, 0xF3120203,
+ 0x81C, 0xF2140203,
+ 0x81C, 0xF1160203,
+ 0x81C, 0xF0180203,
+ 0x81C, 0xEF1A0203,
+ 0x81C, 0xEE1C0203,
+ 0x81C, 0xED1E0203,
+ 0x81C, 0xEC200203,
+ 0x81C, 0xEB220203,
+ 0x81C, 0xEA240203,
+ 0x81C, 0xE9260203,
+ 0x81C, 0xE8280203,
+ 0x81C, 0xE72A0203,
+ 0x81C, 0xE62C0203,
+ 0x81C, 0xE52E0203,
+ 0x81C, 0xE4300203,
+ 0x81C, 0xE3320203,
+ 0x81C, 0xE2340203,
+ 0x81C, 0xC6360203,
+ 0x81C, 0xC5380203,
+ 0x81C, 0xC43A0203,
+ 0x81C, 0xC33C0203,
+ 0x81C, 0xA63E0203,
+ 0x81C, 0xA5400203,
+ 0x81C, 0xA4420203,
+ 0x81C, 0xA3440203,
+ 0x81C, 0xA2460203,
+ 0x81C, 0xA1480203,
+ 0x81C, 0x834A0203,
+ 0x81C, 0x824C0203,
+ 0x81C, 0x814E0203,
+ 0x81C, 0x64500203,
+ 0x81C, 0x63520203,
+ 0x81C, 0x62540203,
+ 0x81C, 0x61560203,
+ 0x81C, 0x60580203,
+ 0x81C, 0x405A0203,
+ 0x81C, 0x215C0203,
+ 0x81C, 0x205E0203,
+ 0x81C, 0x03600203,
+ 0x81C, 0x02620203,
+ 0x81C, 0x01640203,
+ 0x81C, 0x00660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000203,
+ 0x81C, 0xF6020203,
+ 0x81C, 0xF5040203,
+ 0x81C, 0xF4060203,
+ 0x81C, 0xF3080203,
+ 0x81C, 0xF20A0203,
+ 0x81C, 0xF10C0203,
+ 0x81C, 0xF00E0203,
+ 0x81C, 0xEF100203,
+ 0x81C, 0xEE120203,
+ 0x81C, 0xED140203,
+ 0x81C, 0xEC160203,
+ 0x81C, 0xEB180203,
+ 0x81C, 0xEA1A0203,
+ 0x81C, 0xE91C0203,
+ 0x81C, 0xE81E0203,
+ 0x81C, 0xE7200203,
+ 0x81C, 0xE6220203,
+ 0x81C, 0xE5240203,
+ 0x81C, 0xE4260203,
+ 0x81C, 0xE3280203,
+ 0x81C, 0xC42A0203,
+ 0x81C, 0xC32C0203,
+ 0x81C, 0xC22E0203,
+ 0x81C, 0xC1300203,
+ 0x81C, 0xC0320203,
+ 0x81C, 0xA3340203,
+ 0x81C, 0xA2360203,
+ 0x81C, 0xA1380203,
+ 0x81C, 0xA03A0203,
+ 0x81C, 0x823C0203,
+ 0x81C, 0x813E0203,
+ 0x81C, 0x80400203,
+ 0x81C, 0x64420203,
+ 0x81C, 0x63440203,
+ 0x81C, 0x62460203,
+ 0x81C, 0x61480203,
+ 0x81C, 0x604A0203,
+ 0x81C, 0x414C0203,
+ 0x81C, 0x404E0203,
+ 0x81C, 0x22500203,
+ 0x81C, 0x21520203,
+ 0x81C, 0x20540203,
+ 0x81C, 0x03560203,
+ 0x81C, 0x02580203,
+ 0x81C, 0x015A0203,
+ 0x81C, 0x005C0203,
+ 0x81C, 0x005E0203,
+ 0x81C, 0x00600203,
+ 0x81C, 0x00620203,
+ 0x81C, 0x00640203,
+ 0x81C, 0x00660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000203,
+ 0x81C, 0xFB020203,
+ 0x81C, 0xFA040203,
+ 0x81C, 0xF9060203,
+ 0x81C, 0xF8080203,
+ 0x81C, 0xF70A0203,
+ 0x81C, 0xF60C0203,
+ 0x81C, 0xF50E0203,
+ 0x81C, 0xF4100203,
+ 0x81C, 0xF3120203,
+ 0x81C, 0xF2140203,
+ 0x81C, 0xF1160203,
+ 0x81C, 0xF0180203,
+ 0x81C, 0xEF1A0203,
+ 0x81C, 0xEE1C0203,
+ 0x81C, 0xED1E0203,
+ 0x81C, 0xEC200203,
+ 0x81C, 0xEB220203,
+ 0x81C, 0xEA240203,
+ 0x81C, 0xE9260203,
+ 0x81C, 0xE8280203,
+ 0x81C, 0xE72A0203,
+ 0x81C, 0xE62C0203,
+ 0x81C, 0xE52E0203,
+ 0x81C, 0xE4300203,
+ 0x81C, 0xE3320203,
+ 0x81C, 0xE2340203,
+ 0x81C, 0xE1360203,
+ 0x81C, 0xC5380203,
+ 0x81C, 0xC43A0203,
+ 0x81C, 0xC33C0203,
+ 0x81C, 0xC23E0203,
+ 0x81C, 0xC1400203,
+ 0x81C, 0xA3420203,
+ 0x81C, 0xA2440203,
+ 0x81C, 0xA1460203,
+ 0x81C, 0xA0480203,
+ 0x81C, 0x834A0203,
+ 0x81C, 0x824C0203,
+ 0x81C, 0x814E0203,
+ 0x81C, 0x64500203,
+ 0x81C, 0x63520203,
+ 0x81C, 0x62540203,
+ 0x81C, 0x61560203,
+ 0x81C, 0x25580203,
+ 0x81C, 0x245A0203,
+ 0x81C, 0x235C0203,
+ 0x81C, 0x225E0203,
+ 0x81C, 0x21600203,
+ 0x81C, 0x04620203,
+ 0x81C, 0x03640203,
+ 0x81C, 0x02660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000203,
+ 0x81C, 0xF8020203,
+ 0x81C, 0xF7040203,
+ 0x81C, 0xF6060203,
+ 0x81C, 0xF5080203,
+ 0x81C, 0xF40A0203,
+ 0x81C, 0xF30C0203,
+ 0x81C, 0xF20E0203,
+ 0x81C, 0xF1100203,
+ 0x81C, 0xF0120203,
+ 0x81C, 0xEF140203,
+ 0x81C, 0xEE160203,
+ 0x81C, 0xED180203,
+ 0x81C, 0xEC1A0203,
+ 0x81C, 0xEB1C0203,
+ 0x81C, 0xEA1E0203,
+ 0x81C, 0xE9200203,
+ 0x81C, 0xE8220203,
+ 0x81C, 0xE7240203,
+ 0x81C, 0xE6260203,
+ 0x81C, 0xE5280203,
+ 0x81C, 0xC42A0203,
+ 0x81C, 0xC32C0203,
+ 0x81C, 0xC22E0203,
+ 0x81C, 0xC1300203,
+ 0x81C, 0xC0320203,
+ 0x81C, 0xA3340203,
+ 0x81C, 0xA2360203,
+ 0x81C, 0xA1380203,
+ 0x81C, 0xA03A0203,
+ 0x81C, 0x823C0203,
+ 0x81C, 0x813E0203,
+ 0x81C, 0x80400203,
+ 0x81C, 0x64420203,
+ 0x81C, 0x63440203,
+ 0x81C, 0x62460203,
+ 0x81C, 0x61480203,
+ 0x81C, 0x604A0203,
+ 0x81C, 0x414C0203,
+ 0x81C, 0x404E0203,
+ 0x81C, 0x22500203,
+ 0x81C, 0x21520203,
+ 0x81C, 0x20540203,
+ 0x81C, 0x03560203,
+ 0x81C, 0x02580203,
+ 0x81C, 0x015A0203,
+ 0x81C, 0x005C0203,
+ 0x81C, 0x005E0203,
+ 0x81C, 0x00600203,
+ 0x81C, 0x00620203,
+ 0x81C, 0x00640203,
+ 0x81C, 0x00660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000203,
+ 0x81C, 0xF7020203,
+ 0x81C, 0xF6040203,
+ 0x81C, 0xF5060203,
+ 0x81C, 0xF4080203,
+ 0x81C, 0xF30A0203,
+ 0x81C, 0xF20C0203,
+ 0x81C, 0xF10E0203,
+ 0x81C, 0xF0100203,
+ 0x81C, 0xEF120203,
+ 0x81C, 0xEE140203,
+ 0x81C, 0xED160203,
+ 0x81C, 0xEC180203,
+ 0x81C, 0xEB1A0203,
+ 0x81C, 0xEA1C0203,
+ 0x81C, 0xE91E0203,
+ 0x81C, 0xE8200203,
+ 0x81C, 0xE7220203,
+ 0x81C, 0xE6240203,
+ 0x81C, 0xE5260203,
+ 0x81C, 0xE4280203,
+ 0x81C, 0xE32A0203,
+ 0x81C, 0xC42C0203,
+ 0x81C, 0xC32E0203,
+ 0x81C, 0xC2300203,
+ 0x81C, 0xC1320203,
+ 0x81C, 0xA3340203,
+ 0x81C, 0xA2360203,
+ 0x81C, 0xA1380203,
+ 0x81C, 0xA03A0203,
+ 0x81C, 0x823C0203,
+ 0x81C, 0x813E0203,
+ 0x81C, 0x80400203,
+ 0x81C, 0x65420203,
+ 0x81C, 0x64440203,
+ 0x81C, 0x63460203,
+ 0x81C, 0x62480203,
+ 0x81C, 0x614A0203,
+ 0x81C, 0x424C0203,
+ 0x81C, 0x414E0203,
+ 0x81C, 0x40500203,
+ 0x81C, 0x22520203,
+ 0x81C, 0x21540203,
+ 0x81C, 0x20560203,
+ 0x81C, 0x04580203,
+ 0x81C, 0x035A0203,
+ 0x81C, 0x025C0203,
+ 0x81C, 0x015E0203,
+ 0x81C, 0x00600203,
+ 0x81C, 0x00620203,
+ 0x81C, 0x00640203,
+ 0x81C, 0x00660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000203,
+ 0x81C, 0xFA020203,
+ 0x81C, 0xF9040203,
+ 0x81C, 0xF8060203,
+ 0x81C, 0xF7080203,
+ 0x81C, 0xF60A0203,
+ 0x81C, 0xF50C0203,
+ 0x81C, 0xF40E0203,
+ 0x81C, 0xF3100203,
+ 0x81C, 0xF2120203,
+ 0x81C, 0xF1140203,
+ 0x81C, 0xF0160203,
+ 0x81C, 0xEF180203,
+ 0x81C, 0xEE1A0203,
+ 0x81C, 0xED1C0203,
+ 0x81C, 0xEC1E0203,
+ 0x81C, 0xEB200203,
+ 0x81C, 0xEA220203,
+ 0x81C, 0xE9240203,
+ 0x81C, 0xE8260203,
+ 0x81C, 0xE7280203,
+ 0x81C, 0xE62A0203,
+ 0x81C, 0xE52C0203,
+ 0x81C, 0xE42E0203,
+ 0x81C, 0xE3300203,
+ 0x81C, 0xE2320203,
+ 0x81C, 0xC6340203,
+ 0x81C, 0xC5360203,
+ 0x81C, 0xC4380203,
+ 0x81C, 0xC33A0203,
+ 0x81C, 0xC23C0203,
+ 0x81C, 0xC13E0203,
+ 0x81C, 0xC0400203,
+ 0x81C, 0xA3420203,
+ 0x81C, 0xA2440203,
+ 0x81C, 0xA1460203,
+ 0x81C, 0xA0480203,
+ 0x81C, 0x824A0203,
+ 0x81C, 0x814C0203,
+ 0x81C, 0x804E0203,
+ 0x81C, 0x63500203,
+ 0x81C, 0x62520203,
+ 0x81C, 0x61540203,
+ 0x81C, 0x60560203,
+ 0x81C, 0x24580203,
+ 0x81C, 0x235A0203,
+ 0x81C, 0x225C0203,
+ 0x81C, 0x215E0203,
+ 0x81C, 0x20600203,
+ 0x81C, 0x03620203,
+ 0x81C, 0x02640203,
+ 0x81C, 0x01660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000203,
+ 0x81C, 0xF7020203,
+ 0x81C, 0xF6040203,
+ 0x81C, 0xF5060203,
+ 0x81C, 0xF4080203,
+ 0x81C, 0xF30A0203,
+ 0x81C, 0xF20C0203,
+ 0x81C, 0xF10E0203,
+ 0x81C, 0xF0100203,
+ 0x81C, 0xEF120203,
+ 0x81C, 0xEE140203,
+ 0x81C, 0xED160203,
+ 0x81C, 0xEC180203,
+ 0x81C, 0xEB1A0203,
+ 0x81C, 0xEA1C0203,
+ 0x81C, 0xE91E0203,
+ 0x81C, 0xE8200203,
+ 0x81C, 0xE7220203,
+ 0x81C, 0xE6240203,
+ 0x81C, 0xE5260203,
+ 0x81C, 0xE4280203,
+ 0x81C, 0xE32A0203,
+ 0x81C, 0xC42C0203,
+ 0x81C, 0xC32E0203,
+ 0x81C, 0xC2300203,
+ 0x81C, 0xC1320203,
+ 0x81C, 0xA3340203,
+ 0x81C, 0xA2360203,
+ 0x81C, 0xA1380203,
+ 0x81C, 0xA03A0203,
+ 0x81C, 0x823C0203,
+ 0x81C, 0x813E0203,
+ 0x81C, 0x80400203,
+ 0x81C, 0x65420203,
+ 0x81C, 0x64440203,
+ 0x81C, 0x63460203,
+ 0x81C, 0x62480203,
+ 0x81C, 0x614A0203,
+ 0x81C, 0x424C0203,
+ 0x81C, 0x414E0203,
+ 0x81C, 0x40500203,
+ 0x81C, 0x22520203,
+ 0x81C, 0x21540203,
+ 0x81C, 0x20560203,
+ 0x81C, 0x04580203,
+ 0x81C, 0x035A0203,
+ 0x81C, 0x025C0203,
+ 0x81C, 0x015E0203,
+ 0x81C, 0x00600203,
+ 0x81C, 0x00620203,
+ 0x81C, 0x00640203,
+ 0x81C, 0x00660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000203,
+ 0x81C, 0xFB020203,
+ 0x81C, 0xFA040203,
+ 0x81C, 0xF9060203,
+ 0x81C, 0xF8080203,
+ 0x81C, 0xF70A0203,
+ 0x81C, 0xF60C0203,
+ 0x81C, 0xF50E0203,
+ 0x81C, 0xF4100203,
+ 0x81C, 0xF3120203,
+ 0x81C, 0xF2140203,
+ 0x81C, 0xF1160203,
+ 0x81C, 0xF0180203,
+ 0x81C, 0xEE1A0203,
+ 0x81C, 0xED1C0203,
+ 0x81C, 0xEC1E0203,
+ 0x81C, 0xEB200203,
+ 0x81C, 0xEA220203,
+ 0x81C, 0xE9240203,
+ 0x81C, 0xE8260203,
+ 0x81C, 0xE7280203,
+ 0x81C, 0xE62A0203,
+ 0x81C, 0xE52C0203,
+ 0x81C, 0xE42E0203,
+ 0x81C, 0xE3300203,
+ 0x81C, 0xE2320203,
+ 0x81C, 0xC6340203,
+ 0x81C, 0xC5360203,
+ 0x81C, 0xC4380203,
+ 0x81C, 0xC33A0203,
+ 0x81C, 0xA63C0203,
+ 0x81C, 0xA53E0203,
+ 0x81C, 0xA4400203,
+ 0x81C, 0xA3420203,
+ 0x81C, 0xA2440203,
+ 0x81C, 0xA1460203,
+ 0x81C, 0x83480203,
+ 0x81C, 0x824A0203,
+ 0x81C, 0x814C0203,
+ 0x81C, 0x804E0203,
+ 0x81C, 0x63500203,
+ 0x81C, 0x62520203,
+ 0x81C, 0x61540203,
+ 0x81C, 0x42560203,
+ 0x81C, 0x41580203,
+ 0x81C, 0x405A0203,
+ 0x81C, 0x225C0203,
+ 0x81C, 0x215E0203,
+ 0x81C, 0x20600203,
+ 0x81C, 0x04620203,
+ 0x81C, 0x03640203,
+ 0x81C, 0x02660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000203,
+ 0x81C, 0xF8020203,
+ 0x81C, 0xF7040203,
+ 0x81C, 0xF6060203,
+ 0x81C, 0xF5080203,
+ 0x81C, 0xF40A0203,
+ 0x81C, 0xF30C0203,
+ 0x81C, 0xF20E0203,
+ 0x81C, 0xF1100203,
+ 0x81C, 0xF0120203,
+ 0x81C, 0xEF140203,
+ 0x81C, 0xEE160203,
+ 0x81C, 0xED180203,
+ 0x81C, 0xEC1A0203,
+ 0x81C, 0xEB1C0203,
+ 0x81C, 0xEA1E0203,
+ 0x81C, 0xE9200203,
+ 0x81C, 0xE8220203,
+ 0x81C, 0xE7240203,
+ 0x81C, 0xE6260203,
+ 0x81C, 0xE5280203,
+ 0x81C, 0xE42A0203,
+ 0x81C, 0xC42C0203,
+ 0x81C, 0xC32E0203,
+ 0x81C, 0xC2300203,
+ 0x81C, 0xC1320203,
+ 0x81C, 0xA3340203,
+ 0x81C, 0xA2360203,
+ 0x81C, 0xA1380203,
+ 0x81C, 0xA03A0203,
+ 0x81C, 0x823C0203,
+ 0x81C, 0x813E0203,
+ 0x81C, 0x80400203,
+ 0x81C, 0x64420203,
+ 0x81C, 0x63440203,
+ 0x81C, 0x62460203,
+ 0x81C, 0x61480203,
+ 0x81C, 0x604A0203,
+ 0x81C, 0x244C0203,
+ 0x81C, 0x234E0203,
+ 0x81C, 0x22500203,
+ 0x81C, 0x21520203,
+ 0x81C, 0x20540203,
+ 0x81C, 0x05560203,
+ 0x81C, 0x04580203,
+ 0x81C, 0x035A0203,
+ 0x81C, 0x025C0203,
+ 0x81C, 0x015E0203,
+ 0x81C, 0x00600203,
+ 0x81C, 0x00620203,
+ 0x81C, 0x00640203,
+ 0x81C, 0x00660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x9000000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000203,
+ 0x81C, 0xFB020203,
+ 0x81C, 0xFA040203,
+ 0x81C, 0xF9060203,
+ 0x81C, 0xF8080203,
+ 0x81C, 0xF70A0203,
+ 0x81C, 0xF60C0203,
+ 0x81C, 0xF50E0203,
+ 0x81C, 0xF4100203,
+ 0x81C, 0xF3120203,
+ 0x81C, 0xF2140203,
+ 0x81C, 0xF1160203,
+ 0x81C, 0xF0180203,
+ 0x81C, 0xEF1A0203,
+ 0x81C, 0xEE1C0203,
+ 0x81C, 0xED1E0203,
+ 0x81C, 0xEC200203,
+ 0x81C, 0xEB220203,
+ 0x81C, 0xEA240203,
+ 0x81C, 0xE9260203,
+ 0x81C, 0xE8280203,
+ 0x81C, 0xE72A0203,
+ 0x81C, 0xE62C0203,
+ 0x81C, 0xE52E0203,
+ 0x81C, 0xE4300203,
+ 0x81C, 0xE3320203,
+ 0x81C, 0xE2340203,
+ 0x81C, 0xC6360203,
+ 0x81C, 0xC5380203,
+ 0x81C, 0xC43A0203,
+ 0x81C, 0xC33C0203,
+ 0x81C, 0xA63E0203,
+ 0x81C, 0xA5400203,
+ 0x81C, 0xA4420203,
+ 0x81C, 0xA3440203,
+ 0x81C, 0xA2460203,
+ 0x81C, 0xA1480203,
+ 0x81C, 0x834A0203,
+ 0x81C, 0x824C0203,
+ 0x81C, 0x814E0203,
+ 0x81C, 0x64500203,
+ 0x81C, 0x63520203,
+ 0x81C, 0x62540203,
+ 0x81C, 0x61560203,
+ 0x81C, 0x60580203,
+ 0x81C, 0x405A0203,
+ 0x81C, 0x215C0203,
+ 0x81C, 0x205E0203,
+ 0x81C, 0x03600203,
+ 0x81C, 0x02620203,
+ 0x81C, 0x01640203,
+ 0x81C, 0x00660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x9000000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000203,
+ 0x81C, 0xFB020203,
+ 0x81C, 0xFA040203,
+ 0x81C, 0xF9060203,
+ 0x81C, 0xF8080203,
+ 0x81C, 0xF70A0203,
+ 0x81C, 0xF60C0203,
+ 0x81C, 0xF50E0203,
+ 0x81C, 0xF4100203,
+ 0x81C, 0xF3120203,
+ 0x81C, 0xF2140203,
+ 0x81C, 0xF1160203,
+ 0x81C, 0xF0180203,
+ 0x81C, 0xEE1A0203,
+ 0x81C, 0xED1C0203,
+ 0x81C, 0xEC1E0203,
+ 0x81C, 0xEB200203,
+ 0x81C, 0xEA220203,
+ 0x81C, 0xE9240203,
+ 0x81C, 0xE8260203,
+ 0x81C, 0xE7280203,
+ 0x81C, 0xE62A0203,
+ 0x81C, 0xE52C0203,
+ 0x81C, 0xE42E0203,
+ 0x81C, 0xE3300203,
+ 0x81C, 0xE2320203,
+ 0x81C, 0xC6340203,
+ 0x81C, 0xC5360203,
+ 0x81C, 0xC4380203,
+ 0x81C, 0xC33A0203,
+ 0x81C, 0xA63C0203,
+ 0x81C, 0xA53E0203,
+ 0x81C, 0xA4400203,
+ 0x81C, 0xA3420203,
+ 0x81C, 0xA2440203,
+ 0x81C, 0xA1460203,
+ 0x81C, 0x83480203,
+ 0x81C, 0x824A0203,
+ 0x81C, 0x814C0203,
+ 0x81C, 0x804E0203,
+ 0x81C, 0x63500203,
+ 0x81C, 0x62520203,
+ 0x81C, 0x61540203,
+ 0x81C, 0x42560203,
+ 0x81C, 0x41580203,
+ 0x81C, 0x405A0203,
+ 0x81C, 0x225C0203,
+ 0x81C, 0x215E0203,
+ 0x81C, 0x20600203,
+ 0x81C, 0x04620203,
+ 0x81C, 0x03640203,
+ 0x81C, 0x02660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x9000000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000203,
+ 0x81C, 0xFB020203,
+ 0x81C, 0xFA040203,
+ 0x81C, 0xF9060203,
+ 0x81C, 0xF8080203,
+ 0x81C, 0xF70A0203,
+ 0x81C, 0xF60C0203,
+ 0x81C, 0xF50E0203,
+ 0x81C, 0xF4100203,
+ 0x81C, 0xF3120203,
+ 0x81C, 0xF2140203,
+ 0x81C, 0xF1160203,
+ 0x81C, 0xF0180203,
+ 0x81C, 0xEE1A0203,
+ 0x81C, 0xED1C0203,
+ 0x81C, 0xEC1E0203,
+ 0x81C, 0xEB200203,
+ 0x81C, 0xEA220203,
+ 0x81C, 0xE9240203,
+ 0x81C, 0xE8260203,
+ 0x81C, 0xE7280203,
+ 0x81C, 0xE62A0203,
+ 0x81C, 0xE52C0203,
+ 0x81C, 0xE42E0203,
+ 0x81C, 0xE3300203,
+ 0x81C, 0xE2320203,
+ 0x81C, 0xC6340203,
+ 0x81C, 0xC5360203,
+ 0x81C, 0xC4380203,
+ 0x81C, 0xC33A0203,
+ 0x81C, 0xA63C0203,
+ 0x81C, 0xA53E0203,
+ 0x81C, 0xA4400203,
+ 0x81C, 0xA3420203,
+ 0x81C, 0xA2440203,
+ 0x81C, 0xA1460203,
+ 0x81C, 0x83480203,
+ 0x81C, 0x824A0203,
+ 0x81C, 0x814C0203,
+ 0x81C, 0x804E0203,
+ 0x81C, 0x63500203,
+ 0x81C, 0x62520203,
+ 0x81C, 0x61540203,
+ 0x81C, 0x42560203,
+ 0x81C, 0x41580203,
+ 0x81C, 0x405A0203,
+ 0x81C, 0x225C0203,
+ 0x81C, 0x215E0203,
+ 0x81C, 0x20600203,
+ 0x81C, 0x04620203,
+ 0x81C, 0x03640203,
+ 0x81C, 0x02660203,
+ 0x81C, 0x01680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x9000000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000203,
+ 0x81C, 0xFB020203,
+ 0x81C, 0xFA040203,
+ 0x81C, 0xF9060203,
+ 0x81C, 0xF8080203,
+ 0x81C, 0xF70A0203,
+ 0x81C, 0xF60C0203,
+ 0x81C, 0xF50E0203,
+ 0x81C, 0xF4100203,
+ 0x81C, 0xF3120203,
+ 0x81C, 0xF2140203,
+ 0x81C, 0xF1160203,
+ 0x81C, 0xF0180203,
+ 0x81C, 0xEF1A0203,
+ 0x81C, 0xEE1C0203,
+ 0x81C, 0xED1E0203,
+ 0x81C, 0xEC200203,
+ 0x81C, 0xEB220203,
+ 0x81C, 0xEA240203,
+ 0x81C, 0xE9260203,
+ 0x81C, 0xE8280203,
+ 0x81C, 0xE72A0203,
+ 0x81C, 0xE62C0203,
+ 0x81C, 0xE52E0203,
+ 0x81C, 0xE4300203,
+ 0x81C, 0xE3320203,
+ 0x81C, 0xE2340203,
+ 0x81C, 0xE1360203,
+ 0x81C, 0xE0380203,
+ 0x81C, 0xC33A0203,
+ 0x81C, 0xC23C0203,
+ 0x81C, 0xC13E0203,
+ 0x81C, 0xA3400203,
+ 0x81C, 0xA2420203,
+ 0x81C, 0xA1440203,
+ 0x81C, 0xA0460203,
+ 0x81C, 0x83480203,
+ 0x81C, 0x824A0203,
+ 0x81C, 0x814C0203,
+ 0x81C, 0x644E0203,
+ 0x81C, 0x63500203,
+ 0x81C, 0x62520203,
+ 0x81C, 0x61540203,
+ 0x81C, 0x42560203,
+ 0x81C, 0x41580203,
+ 0x81C, 0x235A0203,
+ 0x81C, 0x225C0203,
+ 0x81C, 0x215E0203,
+ 0x81C, 0x04600203,
+ 0x81C, 0x03620203,
+ 0x81C, 0x02640203,
+ 0x81C, 0x01660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x90000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000203,
+ 0x81C, 0xFB020203,
+ 0x81C, 0xFA040203,
+ 0x81C, 0xF9060203,
+ 0x81C, 0xF8080203,
+ 0x81C, 0xF70A0203,
+ 0x81C, 0xF60C0203,
+ 0x81C, 0xF50E0203,
+ 0x81C, 0xF4100203,
+ 0x81C, 0xF3120203,
+ 0x81C, 0xF2140203,
+ 0x81C, 0xF1160203,
+ 0x81C, 0xF0180203,
+ 0x81C, 0xEF1A0203,
+ 0x81C, 0xEE1C0203,
+ 0x81C, 0xED1E0203,
+ 0x81C, 0xEC200203,
+ 0x81C, 0xEB220203,
+ 0x81C, 0xEA240203,
+ 0x81C, 0xE9260203,
+ 0x81C, 0xE8280203,
+ 0x81C, 0xE72A0203,
+ 0x81C, 0xE62C0203,
+ 0x81C, 0xE52E0203,
+ 0x81C, 0xE4300203,
+ 0x81C, 0xE3320203,
+ 0x81C, 0xE2340203,
+ 0x81C, 0xC6360203,
+ 0x81C, 0xC5380203,
+ 0x81C, 0xC43A0203,
+ 0x81C, 0xC33C0203,
+ 0x81C, 0xA63E0203,
+ 0x81C, 0xA5400203,
+ 0x81C, 0xA4420203,
+ 0x81C, 0xA3440203,
+ 0x81C, 0xA2460203,
+ 0x81C, 0xA1480203,
+ 0x81C, 0x834A0203,
+ 0x81C, 0x824C0203,
+ 0x81C, 0x814E0203,
+ 0x81C, 0x64500203,
+ 0x81C, 0x63520203,
+ 0x81C, 0x62540203,
+ 0x81C, 0x61560203,
+ 0x81C, 0x60580203,
+ 0x81C, 0x405A0203,
+ 0x81C, 0x215C0203,
+ 0x81C, 0x205E0203,
+ 0x81C, 0x03600203,
+ 0x81C, 0x02620203,
+ 0x81C, 0x01640203,
+ 0x81C, 0x00660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0x90000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000203,
+ 0x81C, 0xF6020203,
+ 0x81C, 0xF5040203,
+ 0x81C, 0xF4060203,
+ 0x81C, 0xF3080203,
+ 0x81C, 0xF20A0203,
+ 0x81C, 0xF10C0203,
+ 0x81C, 0xF00E0203,
+ 0x81C, 0xEF100203,
+ 0x81C, 0xEE120203,
+ 0x81C, 0xED140203,
+ 0x81C, 0xEC160203,
+ 0x81C, 0xEB180203,
+ 0x81C, 0xEA1A0203,
+ 0x81C, 0xE91C0203,
+ 0x81C, 0xE81E0203,
+ 0x81C, 0xE7200203,
+ 0x81C, 0xE6220203,
+ 0x81C, 0xE5240203,
+ 0x81C, 0xE4260203,
+ 0x81C, 0xE3280203,
+ 0x81C, 0xC42A0203,
+ 0x81C, 0xC32C0203,
+ 0x81C, 0xC22E0203,
+ 0x81C, 0xC1300203,
+ 0x81C, 0xC0320203,
+ 0x81C, 0xA3340203,
+ 0x81C, 0xA2360203,
+ 0x81C, 0xA1380203,
+ 0x81C, 0xA03A0203,
+ 0x81C, 0x823C0203,
+ 0x81C, 0x813E0203,
+ 0x81C, 0x80400203,
+ 0x81C, 0x64420203,
+ 0x81C, 0x63440203,
+ 0x81C, 0x62460203,
+ 0x81C, 0x61480203,
+ 0x81C, 0x604A0203,
+ 0x81C, 0x414C0203,
+ 0x81C, 0x404E0203,
+ 0x81C, 0x22500203,
+ 0x81C, 0x21520203,
+ 0x81C, 0x20540203,
+ 0x81C, 0x03560203,
+ 0x81C, 0x02580203,
+ 0x81C, 0x015A0203,
+ 0x81C, 0x005C0203,
+ 0x81C, 0x005E0203,
+ 0x81C, 0x00600203,
+ 0x81C, 0x00620203,
+ 0x81C, 0x00640203,
+ 0x81C, 0x00660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFD000203,
+ 0x81C, 0xFC020203,
+ 0x81C, 0xFB040203,
+ 0x81C, 0xFA060203,
+ 0x81C, 0xF9080203,
+ 0x81C, 0xF80A0203,
+ 0x81C, 0xF70C0203,
+ 0x81C, 0xF60E0203,
+ 0x81C, 0xF5100203,
+ 0x81C, 0xF4120203,
+ 0x81C, 0xF3140203,
+ 0x81C, 0xF2160203,
+ 0x81C, 0xF1180203,
+ 0x81C, 0xF01A0203,
+ 0x81C, 0xEF1C0203,
+ 0x81C, 0xEE1E0203,
+ 0x81C, 0xED200203,
+ 0x81C, 0xEC220203,
+ 0x81C, 0xEB240203,
+ 0x81C, 0xEA260203,
+ 0x81C, 0xE9280203,
+ 0x81C, 0xE82A0203,
+ 0x81C, 0xE72C0203,
+ 0x81C, 0xE62E0203,
+ 0x81C, 0xE5300203,
+ 0x81C, 0xE4320203,
+ 0x81C, 0xE3340203,
+ 0x81C, 0xC6360203,
+ 0x81C, 0xC5380203,
+ 0x81C, 0xC43A0203,
+ 0x81C, 0xC33C0203,
+ 0x81C, 0xA63E0203,
+ 0x81C, 0xA5400203,
+ 0x81C, 0xA4420203,
+ 0x81C, 0xA3440203,
+ 0x81C, 0xA2460203,
+ 0x81C, 0xA1480203,
+ 0x81C, 0x834A0203,
+ 0x81C, 0x824C0203,
+ 0x81C, 0x814E0203,
+ 0x81C, 0x64500203,
+ 0x81C, 0x63520203,
+ 0x81C, 0x62540203,
+ 0x81C, 0x61560203,
+ 0x81C, 0x60580203,
+ 0x81C, 0x235A0203,
+ 0x81C, 0x225C0203,
+ 0x81C, 0x215E0203,
+ 0x81C, 0x20600203,
+ 0x81C, 0x03620203,
+ 0x81C, 0x02640203,
+ 0x81C, 0x01660203,
+ 0x81C, 0x00680203,
+ 0x81C, 0x006A0203,
+ 0x81C, 0x006C0203,
+ 0x81C, 0x006E0203,
+ 0x81C, 0x00700203,
+ 0x81C, 0x00720203,
+ 0x81C, 0x00740203,
+ 0x81C, 0x00760203,
+ 0x81C, 0x00780203,
+ 0x81C, 0x007A0203,
+ 0x81C, 0x007C0203,
+ 0x81C, 0x007E0203,
+ 0xB0000000, 0x00000000,
+ 0x80000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000303,
+ 0x81C, 0xFB020303,
+ 0x81C, 0xFA040303,
+ 0x81C, 0xF9060303,
+ 0x81C, 0xF8080303,
+ 0x81C, 0xF70A0303,
+ 0x81C, 0xF60C0303,
+ 0x81C, 0xF50E0303,
+ 0x81C, 0xF4100303,
+ 0x81C, 0xF3120303,
+ 0x81C, 0xF2140303,
+ 0x81C, 0xF1160303,
+ 0x81C, 0xEF180303,
+ 0x81C, 0xEE1A0303,
+ 0x81C, 0xED1C0303,
+ 0x81C, 0xEC1E0303,
+ 0x81C, 0xEB200303,
+ 0x81C, 0xEA220303,
+ 0x81C, 0xE9240303,
+ 0x81C, 0xE8260303,
+ 0x81C, 0xE7280303,
+ 0x81C, 0xE62A0303,
+ 0x81C, 0xE52C0303,
+ 0x81C, 0xE42E0303,
+ 0x81C, 0xE3300303,
+ 0x81C, 0xE2320303,
+ 0x81C, 0xC6340303,
+ 0x81C, 0xC5360303,
+ 0x81C, 0xC4380303,
+ 0x81C, 0xC33A0303,
+ 0x81C, 0xA63C0303,
+ 0x81C, 0xA53E0303,
+ 0x81C, 0xA4400303,
+ 0x81C, 0xA3420303,
+ 0x81C, 0xA2440303,
+ 0x81C, 0xA1460303,
+ 0x81C, 0x83480303,
+ 0x81C, 0x824A0303,
+ 0x81C, 0x814C0303,
+ 0x81C, 0x804E0303,
+ 0x81C, 0x63500303,
+ 0x81C, 0x62520303,
+ 0x81C, 0x61540303,
+ 0x81C, 0x42560303,
+ 0x81C, 0x41580303,
+ 0x81C, 0x405A0303,
+ 0x81C, 0x225C0303,
+ 0x81C, 0x215E0303,
+ 0x81C, 0x20600303,
+ 0x81C, 0x04620303,
+ 0x81C, 0x03640303,
+ 0x81C, 0x02660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000303,
+ 0x81C, 0xF6020303,
+ 0x81C, 0xF5040303,
+ 0x81C, 0xF4060303,
+ 0x81C, 0xF3080303,
+ 0x81C, 0xF20A0303,
+ 0x81C, 0xF10C0303,
+ 0x81C, 0xF00E0303,
+ 0x81C, 0xEF100303,
+ 0x81C, 0xEE120303,
+ 0x81C, 0xED140303,
+ 0x81C, 0xEC160303,
+ 0x81C, 0xEB180303,
+ 0x81C, 0xEA1A0303,
+ 0x81C, 0xE91C0303,
+ 0x81C, 0xCA1E0303,
+ 0x81C, 0xC9200303,
+ 0x81C, 0xC8220303,
+ 0x81C, 0xC7240303,
+ 0x81C, 0xC6260303,
+ 0x81C, 0xC5280303,
+ 0x81C, 0xC42A0303,
+ 0x81C, 0xC32C0303,
+ 0x81C, 0xC22E0303,
+ 0x81C, 0xC1300303,
+ 0x81C, 0xA4320303,
+ 0x81C, 0xA3340303,
+ 0x81C, 0xA2360303,
+ 0x81C, 0xA1380303,
+ 0x81C, 0xA03A0303,
+ 0x81C, 0x823C0303,
+ 0x81C, 0x813E0303,
+ 0x81C, 0x80400303,
+ 0x81C, 0x64420303,
+ 0x81C, 0x63440303,
+ 0x81C, 0x62460303,
+ 0x81C, 0x61480303,
+ 0x81C, 0x604A0303,
+ 0x81C, 0x414C0303,
+ 0x81C, 0x404E0303,
+ 0x81C, 0x06500303,
+ 0x81C, 0x05520303,
+ 0x81C, 0x04540303,
+ 0x81C, 0x03560303,
+ 0x81C, 0x02580303,
+ 0x81C, 0x015A0303,
+ 0x81C, 0x005C0303,
+ 0x81C, 0x005E0303,
+ 0x81C, 0x00600303,
+ 0x81C, 0x00620303,
+ 0x81C, 0x00640303,
+ 0x81C, 0x00660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000303,
+ 0x81C, 0xF6020303,
+ 0x81C, 0xF5040303,
+ 0x81C, 0xF4060303,
+ 0x81C, 0xF3080303,
+ 0x81C, 0xF20A0303,
+ 0x81C, 0xF10C0303,
+ 0x81C, 0xF00E0303,
+ 0x81C, 0xEF100303,
+ 0x81C, 0xEE120303,
+ 0x81C, 0xED140303,
+ 0x81C, 0xEC160303,
+ 0x81C, 0xEB180303,
+ 0x81C, 0xEA1A0303,
+ 0x81C, 0xE91C0303,
+ 0x81C, 0xCA1E0303,
+ 0x81C, 0xC9200303,
+ 0x81C, 0xC8220303,
+ 0x81C, 0xC7240303,
+ 0x81C, 0xC6260303,
+ 0x81C, 0xC5280303,
+ 0x81C, 0xC42A0303,
+ 0x81C, 0xC32C0303,
+ 0x81C, 0xC22E0303,
+ 0x81C, 0xC1300303,
+ 0x81C, 0xA4320303,
+ 0x81C, 0xA3340303,
+ 0x81C, 0xA2360303,
+ 0x81C, 0xA1380303,
+ 0x81C, 0xA03A0303,
+ 0x81C, 0x823C0303,
+ 0x81C, 0x813E0303,
+ 0x81C, 0x80400303,
+ 0x81C, 0x64420303,
+ 0x81C, 0x63440303,
+ 0x81C, 0x62460303,
+ 0x81C, 0x61480303,
+ 0x81C, 0x604A0303,
+ 0x81C, 0x414C0303,
+ 0x81C, 0x404E0303,
+ 0x81C, 0x22500303,
+ 0x81C, 0x21520303,
+ 0x81C, 0x20540303,
+ 0x81C, 0x03560303,
+ 0x81C, 0x02580303,
+ 0x81C, 0x015A0303,
+ 0x81C, 0x005C0303,
+ 0x81C, 0x005E0303,
+ 0x81C, 0x00600303,
+ 0x81C, 0x00620303,
+ 0x81C, 0x00640303,
+ 0x81C, 0x00660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000303,
+ 0x81C, 0xFB020303,
+ 0x81C, 0xFA040303,
+ 0x81C, 0xF9060303,
+ 0x81C, 0xF8080303,
+ 0x81C, 0xF70A0303,
+ 0x81C, 0xF60C0303,
+ 0x81C, 0xF50E0303,
+ 0x81C, 0xF4100303,
+ 0x81C, 0xF3120303,
+ 0x81C, 0xF2140303,
+ 0x81C, 0xF1160303,
+ 0x81C, 0xF0180303,
+ 0x81C, 0xEF1A0303,
+ 0x81C, 0xEE1C0303,
+ 0x81C, 0xED1E0303,
+ 0x81C, 0xEC200303,
+ 0x81C, 0xEB220303,
+ 0x81C, 0xEA240303,
+ 0x81C, 0xE9260303,
+ 0x81C, 0xE8280303,
+ 0x81C, 0xE72A0303,
+ 0x81C, 0xE62C0303,
+ 0x81C, 0xE52E0303,
+ 0x81C, 0xE4300303,
+ 0x81C, 0xE3320303,
+ 0x81C, 0xE2340303,
+ 0x81C, 0xC6360303,
+ 0x81C, 0xC5380303,
+ 0x81C, 0xC43A0303,
+ 0x81C, 0xC33C0303,
+ 0x81C, 0xA63E0303,
+ 0x81C, 0xA5400303,
+ 0x81C, 0xA4420303,
+ 0x81C, 0xA3440303,
+ 0x81C, 0xA2460303,
+ 0x81C, 0x84480303,
+ 0x81C, 0x834A0303,
+ 0x81C, 0x824C0303,
+ 0x81C, 0x814E0303,
+ 0x81C, 0x80500303,
+ 0x81C, 0x63520303,
+ 0x81C, 0x62540303,
+ 0x81C, 0x61560303,
+ 0x81C, 0x60580303,
+ 0x81C, 0x225A0303,
+ 0x81C, 0x055C0303,
+ 0x81C, 0x045E0303,
+ 0x81C, 0x03600303,
+ 0x81C, 0x02620303,
+ 0x81C, 0x01640303,
+ 0x81C, 0x00660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000303,
+ 0x81C, 0xF6020303,
+ 0x81C, 0xF5040303,
+ 0x81C, 0xF4060303,
+ 0x81C, 0xF3080303,
+ 0x81C, 0xF20A0303,
+ 0x81C, 0xF10C0303,
+ 0x81C, 0xF00E0303,
+ 0x81C, 0xEF100303,
+ 0x81C, 0xEE120303,
+ 0x81C, 0xED140303,
+ 0x81C, 0xEC160303,
+ 0x81C, 0xEB180303,
+ 0x81C, 0xEA1A0303,
+ 0x81C, 0xE91C0303,
+ 0x81C, 0xCA1E0303,
+ 0x81C, 0xC9200303,
+ 0x81C, 0xC8220303,
+ 0x81C, 0xC7240303,
+ 0x81C, 0xC6260303,
+ 0x81C, 0xC5280303,
+ 0x81C, 0xC42A0303,
+ 0x81C, 0xC32C0303,
+ 0x81C, 0xC22E0303,
+ 0x81C, 0xC1300303,
+ 0x81C, 0xA4320303,
+ 0x81C, 0xA3340303,
+ 0x81C, 0xA2360303,
+ 0x81C, 0xA1380303,
+ 0x81C, 0xA03A0303,
+ 0x81C, 0x823C0303,
+ 0x81C, 0x813E0303,
+ 0x81C, 0x80400303,
+ 0x81C, 0x64420303,
+ 0x81C, 0x63440303,
+ 0x81C, 0x62460303,
+ 0x81C, 0x61480303,
+ 0x81C, 0x604A0303,
+ 0x81C, 0x414C0303,
+ 0x81C, 0x404E0303,
+ 0x81C, 0x22500303,
+ 0x81C, 0x21520303,
+ 0x81C, 0x20540303,
+ 0x81C, 0x03560303,
+ 0x81C, 0x02580303,
+ 0x81C, 0x015A0303,
+ 0x81C, 0x005C0303,
+ 0x81C, 0x005E0303,
+ 0x81C, 0x00600303,
+ 0x81C, 0x00620303,
+ 0x81C, 0x00640303,
+ 0x81C, 0x00660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000303,
+ 0x81C, 0xFA020303,
+ 0x81C, 0xF9040303,
+ 0x81C, 0xF8060303,
+ 0x81C, 0xF7080303,
+ 0x81C, 0xF60A0303,
+ 0x81C, 0xF50C0303,
+ 0x81C, 0xF40E0303,
+ 0x81C, 0xF3100303,
+ 0x81C, 0xF2120303,
+ 0x81C, 0xF1140303,
+ 0x81C, 0xF0160303,
+ 0x81C, 0xEF180303,
+ 0x81C, 0xEE1A0303,
+ 0x81C, 0xED1C0303,
+ 0x81C, 0xEC1E0303,
+ 0x81C, 0xEB200303,
+ 0x81C, 0xEA220303,
+ 0x81C, 0xE9240303,
+ 0x81C, 0xE8260303,
+ 0x81C, 0xE7280303,
+ 0x81C, 0xE62A0303,
+ 0x81C, 0xE52C0303,
+ 0x81C, 0xE42E0303,
+ 0x81C, 0xE3300303,
+ 0x81C, 0xE2320303,
+ 0x81C, 0xE1340303,
+ 0x81C, 0xC5360303,
+ 0x81C, 0xC4380303,
+ 0x81C, 0xC33A0303,
+ 0x81C, 0xC23C0303,
+ 0x81C, 0xC13E0303,
+ 0x81C, 0xA4400303,
+ 0x81C, 0xA3420303,
+ 0x81C, 0xA2440303,
+ 0x81C, 0xA1460303,
+ 0x81C, 0x83480303,
+ 0x81C, 0x824A0303,
+ 0x81C, 0x814C0303,
+ 0x81C, 0x804E0303,
+ 0x81C, 0x64500303,
+ 0x81C, 0x63520303,
+ 0x81C, 0x62540303,
+ 0x81C, 0x61560303,
+ 0x81C, 0x60580303,
+ 0x81C, 0x235A0303,
+ 0x81C, 0x225C0303,
+ 0x81C, 0x215E0303,
+ 0x81C, 0x20600303,
+ 0x81C, 0x04620303,
+ 0x81C, 0x03640303,
+ 0x81C, 0x02660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000303,
+ 0x81C, 0xF8020303,
+ 0x81C, 0xF7040303,
+ 0x81C, 0xF6060303,
+ 0x81C, 0xF5080303,
+ 0x81C, 0xF40A0303,
+ 0x81C, 0xF30C0303,
+ 0x81C, 0xF20E0303,
+ 0x81C, 0xF1100303,
+ 0x81C, 0xF0120303,
+ 0x81C, 0xEF140303,
+ 0x81C, 0xEE160303,
+ 0x81C, 0xED180303,
+ 0x81C, 0xEC1A0303,
+ 0x81C, 0xEB1C0303,
+ 0x81C, 0xEA1E0303,
+ 0x81C, 0xC9200303,
+ 0x81C, 0xC8220303,
+ 0x81C, 0xC7240303,
+ 0x81C, 0xC6260303,
+ 0x81C, 0xC5280303,
+ 0x81C, 0xC42A0303,
+ 0x81C, 0xC32C0303,
+ 0x81C, 0xC22E0303,
+ 0x81C, 0xC1300303,
+ 0x81C, 0xC0320303,
+ 0x81C, 0xA3340303,
+ 0x81C, 0xA2360303,
+ 0x81C, 0xA1380303,
+ 0x81C, 0xA03A0303,
+ 0x81C, 0x823C0303,
+ 0x81C, 0x813E0303,
+ 0x81C, 0x80400303,
+ 0x81C, 0x64420303,
+ 0x81C, 0x63440303,
+ 0x81C, 0x62460303,
+ 0x81C, 0x61480303,
+ 0x81C, 0x604A0303,
+ 0x81C, 0x414C0303,
+ 0x81C, 0x404E0303,
+ 0x81C, 0x22500303,
+ 0x81C, 0x21520303,
+ 0x81C, 0x20540303,
+ 0x81C, 0x03560303,
+ 0x81C, 0x02580303,
+ 0x81C, 0x015A0303,
+ 0x81C, 0x005C0303,
+ 0x81C, 0x005E0303,
+ 0x81C, 0x00600303,
+ 0x81C, 0x00620303,
+ 0x81C, 0x00640303,
+ 0x81C, 0x00660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000303,
+ 0x81C, 0xF7020303,
+ 0x81C, 0xF6040303,
+ 0x81C, 0xF5060303,
+ 0x81C, 0xF4080303,
+ 0x81C, 0xF30A0303,
+ 0x81C, 0xF20C0303,
+ 0x81C, 0xF10E0303,
+ 0x81C, 0xF0100303,
+ 0x81C, 0xEF120303,
+ 0x81C, 0xEE140303,
+ 0x81C, 0xED160303,
+ 0x81C, 0xEC180303,
+ 0x81C, 0xEB1A0303,
+ 0x81C, 0xEA1C0303,
+ 0x81C, 0xE91E0303,
+ 0x81C, 0xCA200303,
+ 0x81C, 0xC9220303,
+ 0x81C, 0xC8240303,
+ 0x81C, 0xC7260303,
+ 0x81C, 0xC6280303,
+ 0x81C, 0xC52A0303,
+ 0x81C, 0xC42C0303,
+ 0x81C, 0xC32E0303,
+ 0x81C, 0xC2300303,
+ 0x81C, 0xC1320303,
+ 0x81C, 0xA3340303,
+ 0x81C, 0xA2360303,
+ 0x81C, 0xA1380303,
+ 0x81C, 0xA03A0303,
+ 0x81C, 0x823C0303,
+ 0x81C, 0x813E0303,
+ 0x81C, 0x80400303,
+ 0x81C, 0x65420303,
+ 0x81C, 0x64440303,
+ 0x81C, 0x63460303,
+ 0x81C, 0x62480303,
+ 0x81C, 0x614A0303,
+ 0x81C, 0x424C0303,
+ 0x81C, 0x414E0303,
+ 0x81C, 0x40500303,
+ 0x81C, 0x22520303,
+ 0x81C, 0x21540303,
+ 0x81C, 0x20560303,
+ 0x81C, 0x04580303,
+ 0x81C, 0x035A0303,
+ 0x81C, 0x025C0303,
+ 0x81C, 0x015E0303,
+ 0x81C, 0x00600303,
+ 0x81C, 0x00620303,
+ 0x81C, 0x00640303,
+ 0x81C, 0x00660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000303,
+ 0x81C, 0xFA020303,
+ 0x81C, 0xF9040303,
+ 0x81C, 0xF8060303,
+ 0x81C, 0xF7080303,
+ 0x81C, 0xF60A0303,
+ 0x81C, 0xF50C0303,
+ 0x81C, 0xF40E0303,
+ 0x81C, 0xF3100303,
+ 0x81C, 0xF2120303,
+ 0x81C, 0xF1140303,
+ 0x81C, 0xF0160303,
+ 0x81C, 0xEF180303,
+ 0x81C, 0xEE1A0303,
+ 0x81C, 0xED1C0303,
+ 0x81C, 0xEC1E0303,
+ 0x81C, 0xEB200303,
+ 0x81C, 0xEA220303,
+ 0x81C, 0xE9240303,
+ 0x81C, 0xE8260303,
+ 0x81C, 0xE7280303,
+ 0x81C, 0xE62A0303,
+ 0x81C, 0xE52C0303,
+ 0x81C, 0xE42E0303,
+ 0x81C, 0xE3300303,
+ 0x81C, 0xE2320303,
+ 0x81C, 0xC6340303,
+ 0x81C, 0xC5360303,
+ 0x81C, 0xC4380303,
+ 0x81C, 0xC33A0303,
+ 0x81C, 0xC23C0303,
+ 0x81C, 0xC13E0303,
+ 0x81C, 0xA4400303,
+ 0x81C, 0xA3420303,
+ 0x81C, 0xA2440303,
+ 0x81C, 0xA1460303,
+ 0x81C, 0x83480303,
+ 0x81C, 0x824A0303,
+ 0x81C, 0x814C0303,
+ 0x81C, 0x804E0303,
+ 0x81C, 0x63500303,
+ 0x81C, 0x62520303,
+ 0x81C, 0x43540303,
+ 0x81C, 0x42560303,
+ 0x81C, 0x41580303,
+ 0x81C, 0x235A0303,
+ 0x81C, 0x225C0303,
+ 0x81C, 0x215E0303,
+ 0x81C, 0x20600303,
+ 0x81C, 0x04620303,
+ 0x81C, 0x03640303,
+ 0x81C, 0x02660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000303,
+ 0x81C, 0xF7020303,
+ 0x81C, 0xF6040303,
+ 0x81C, 0xF5060303,
+ 0x81C, 0xF4080303,
+ 0x81C, 0xF30A0303,
+ 0x81C, 0xF20C0303,
+ 0x81C, 0xF10E0303,
+ 0x81C, 0xF0100303,
+ 0x81C, 0xEF120303,
+ 0x81C, 0xEE140303,
+ 0x81C, 0xED160303,
+ 0x81C, 0xEC180303,
+ 0x81C, 0xEB1A0303,
+ 0x81C, 0xEA1C0303,
+ 0x81C, 0xE91E0303,
+ 0x81C, 0xCA200303,
+ 0x81C, 0xC9220303,
+ 0x81C, 0xC8240303,
+ 0x81C, 0xC7260303,
+ 0x81C, 0xC6280303,
+ 0x81C, 0xC52A0303,
+ 0x81C, 0xC42C0303,
+ 0x81C, 0xC32E0303,
+ 0x81C, 0xC2300303,
+ 0x81C, 0xC1320303,
+ 0x81C, 0xA3340303,
+ 0x81C, 0xA2360303,
+ 0x81C, 0xA1380303,
+ 0x81C, 0xA03A0303,
+ 0x81C, 0x823C0303,
+ 0x81C, 0x813E0303,
+ 0x81C, 0x80400303,
+ 0x81C, 0x65420303,
+ 0x81C, 0x64440303,
+ 0x81C, 0x63460303,
+ 0x81C, 0x62480303,
+ 0x81C, 0x614A0303,
+ 0x81C, 0x424C0303,
+ 0x81C, 0x414E0303,
+ 0x81C, 0x40500303,
+ 0x81C, 0x22520303,
+ 0x81C, 0x21540303,
+ 0x81C, 0x20560303,
+ 0x81C, 0x04580303,
+ 0x81C, 0x035A0303,
+ 0x81C, 0x025C0303,
+ 0x81C, 0x015E0303,
+ 0x81C, 0x00600303,
+ 0x81C, 0x00620303,
+ 0x81C, 0x00640303,
+ 0x81C, 0x00660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000303,
+ 0x81C, 0xFB020303,
+ 0x81C, 0xFA040303,
+ 0x81C, 0xF9060303,
+ 0x81C, 0xF8080303,
+ 0x81C, 0xF70A0303,
+ 0x81C, 0xF60C0303,
+ 0x81C, 0xF50E0303,
+ 0x81C, 0xF4100303,
+ 0x81C, 0xF3120303,
+ 0x81C, 0xF2140303,
+ 0x81C, 0xF1160303,
+ 0x81C, 0xEF180303,
+ 0x81C, 0xEE1A0303,
+ 0x81C, 0xED1C0303,
+ 0x81C, 0xEC1E0303,
+ 0x81C, 0xEB200303,
+ 0x81C, 0xEA220303,
+ 0x81C, 0xE9240303,
+ 0x81C, 0xE8260303,
+ 0x81C, 0xE7280303,
+ 0x81C, 0xE62A0303,
+ 0x81C, 0xE52C0303,
+ 0x81C, 0xE42E0303,
+ 0x81C, 0xE3300303,
+ 0x81C, 0xE2320303,
+ 0x81C, 0xC6340303,
+ 0x81C, 0xC5360303,
+ 0x81C, 0xC4380303,
+ 0x81C, 0xC33A0303,
+ 0x81C, 0xA63C0303,
+ 0x81C, 0xA53E0303,
+ 0x81C, 0xA4400303,
+ 0x81C, 0xA3420303,
+ 0x81C, 0xA2440303,
+ 0x81C, 0xA1460303,
+ 0x81C, 0x83480303,
+ 0x81C, 0x824A0303,
+ 0x81C, 0x814C0303,
+ 0x81C, 0x804E0303,
+ 0x81C, 0x63500303,
+ 0x81C, 0x62520303,
+ 0x81C, 0x61540303,
+ 0x81C, 0x42560303,
+ 0x81C, 0x41580303,
+ 0x81C, 0x405A0303,
+ 0x81C, 0x225C0303,
+ 0x81C, 0x215E0303,
+ 0x81C, 0x20600303,
+ 0x81C, 0x04620303,
+ 0x81C, 0x03640303,
+ 0x81C, 0x02660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000303,
+ 0x81C, 0xF7020303,
+ 0x81C, 0xF6040303,
+ 0x81C, 0xF5060303,
+ 0x81C, 0xF4080303,
+ 0x81C, 0xF30A0303,
+ 0x81C, 0xF20C0303,
+ 0x81C, 0xF10E0303,
+ 0x81C, 0xF0100303,
+ 0x81C, 0xEF120303,
+ 0x81C, 0xEE140303,
+ 0x81C, 0xED160303,
+ 0x81C, 0xEC180303,
+ 0x81C, 0xEB1A0303,
+ 0x81C, 0xEA1C0303,
+ 0x81C, 0xE91E0303,
+ 0x81C, 0xCA200303,
+ 0x81C, 0xC9220303,
+ 0x81C, 0xC8240303,
+ 0x81C, 0xC7260303,
+ 0x81C, 0xC6280303,
+ 0x81C, 0xC52A0303,
+ 0x81C, 0xC42C0303,
+ 0x81C, 0xC32E0303,
+ 0x81C, 0xC2300303,
+ 0x81C, 0xC1320303,
+ 0x81C, 0xA3340303,
+ 0x81C, 0xA2360303,
+ 0x81C, 0xA1380303,
+ 0x81C, 0xA03A0303,
+ 0x81C, 0x823C0303,
+ 0x81C, 0x813E0303,
+ 0x81C, 0x80400303,
+ 0x81C, 0x64420303,
+ 0x81C, 0x63440303,
+ 0x81C, 0x62460303,
+ 0x81C, 0x61480303,
+ 0x81C, 0x604A0303,
+ 0x81C, 0x234C0303,
+ 0x81C, 0x224E0303,
+ 0x81C, 0x21500303,
+ 0x81C, 0x20520303,
+ 0x81C, 0x06540303,
+ 0x81C, 0x05560303,
+ 0x81C, 0x04580303,
+ 0x81C, 0x035A0303,
+ 0x81C, 0x025C0303,
+ 0x81C, 0x015E0303,
+ 0x81C, 0x00600303,
+ 0x81C, 0x00620303,
+ 0x81C, 0x00640303,
+ 0x81C, 0x00660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x9000000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000303,
+ 0x81C, 0xFB020303,
+ 0x81C, 0xFA040303,
+ 0x81C, 0xF9060303,
+ 0x81C, 0xF8080303,
+ 0x81C, 0xF70A0303,
+ 0x81C, 0xF60C0303,
+ 0x81C, 0xF50E0303,
+ 0x81C, 0xF4100303,
+ 0x81C, 0xF3120303,
+ 0x81C, 0xF2140303,
+ 0x81C, 0xF1160303,
+ 0x81C, 0xF0180303,
+ 0x81C, 0xEF1A0303,
+ 0x81C, 0xEE1C0303,
+ 0x81C, 0xED1E0303,
+ 0x81C, 0xEC200303,
+ 0x81C, 0xEB220303,
+ 0x81C, 0xEA240303,
+ 0x81C, 0xE9260303,
+ 0x81C, 0xE8280303,
+ 0x81C, 0xE72A0303,
+ 0x81C, 0xE62C0303,
+ 0x81C, 0xE52E0303,
+ 0x81C, 0xE4300303,
+ 0x81C, 0xE3320303,
+ 0x81C, 0xE2340303,
+ 0x81C, 0xC6360303,
+ 0x81C, 0xC5380303,
+ 0x81C, 0xC43A0303,
+ 0x81C, 0xC33C0303,
+ 0x81C, 0xA63E0303,
+ 0x81C, 0xA5400303,
+ 0x81C, 0xA4420303,
+ 0x81C, 0xA3440303,
+ 0x81C, 0xA2460303,
+ 0x81C, 0x84480303,
+ 0x81C, 0x834A0303,
+ 0x81C, 0x824C0303,
+ 0x81C, 0x814E0303,
+ 0x81C, 0x80500303,
+ 0x81C, 0x63520303,
+ 0x81C, 0x62540303,
+ 0x81C, 0x61560303,
+ 0x81C, 0x60580303,
+ 0x81C, 0x225A0303,
+ 0x81C, 0x055C0303,
+ 0x81C, 0x045E0303,
+ 0x81C, 0x03600303,
+ 0x81C, 0x02620303,
+ 0x81C, 0x01640303,
+ 0x81C, 0x00660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x9000000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000303,
+ 0x81C, 0xFA020303,
+ 0x81C, 0xF9040303,
+ 0x81C, 0xF8060303,
+ 0x81C, 0xF7080303,
+ 0x81C, 0xF60A0303,
+ 0x81C, 0xF50C0303,
+ 0x81C, 0xF40E0303,
+ 0x81C, 0xF3100303,
+ 0x81C, 0xF2120303,
+ 0x81C, 0xF1140303,
+ 0x81C, 0xEF160303,
+ 0x81C, 0xEE180303,
+ 0x81C, 0xED1A0303,
+ 0x81C, 0xEC1C0303,
+ 0x81C, 0xEB1E0303,
+ 0x81C, 0xEA200303,
+ 0x81C, 0xE9220303,
+ 0x81C, 0xE8240303,
+ 0x81C, 0xE7260303,
+ 0x81C, 0xE6280303,
+ 0x81C, 0xE52A0303,
+ 0x81C, 0xE42C0303,
+ 0x81C, 0xE32E0303,
+ 0x81C, 0xE2300303,
+ 0x81C, 0xE1320303,
+ 0x81C, 0xC6340303,
+ 0x81C, 0xC5360303,
+ 0x81C, 0xC4380303,
+ 0x81C, 0xC33A0303,
+ 0x81C, 0xA63C0303,
+ 0x81C, 0xA53E0303,
+ 0x81C, 0xA4400303,
+ 0x81C, 0xA3420303,
+ 0x81C, 0xA2440303,
+ 0x81C, 0xA1460303,
+ 0x81C, 0x83480303,
+ 0x81C, 0x824A0303,
+ 0x81C, 0x814C0303,
+ 0x81C, 0x804E0303,
+ 0x81C, 0x63500303,
+ 0x81C, 0x62520303,
+ 0x81C, 0x61540303,
+ 0x81C, 0x42560303,
+ 0x81C, 0x41580303,
+ 0x81C, 0x405A0303,
+ 0x81C, 0x225C0303,
+ 0x81C, 0x215E0303,
+ 0x81C, 0x20600303,
+ 0x81C, 0x04620303,
+ 0x81C, 0x03640303,
+ 0x81C, 0x02660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x9000000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000303,
+ 0x81C, 0xFA020303,
+ 0x81C, 0xF9040303,
+ 0x81C, 0xF8060303,
+ 0x81C, 0xF7080303,
+ 0x81C, 0xF60A0303,
+ 0x81C, 0xF50C0303,
+ 0x81C, 0xF40E0303,
+ 0x81C, 0xF3100303,
+ 0x81C, 0xF2120303,
+ 0x81C, 0xF1140303,
+ 0x81C, 0xEF160303,
+ 0x81C, 0xEE180303,
+ 0x81C, 0xED1A0303,
+ 0x81C, 0xEC1C0303,
+ 0x81C, 0xEB1E0303,
+ 0x81C, 0xEA200303,
+ 0x81C, 0xE9220303,
+ 0x81C, 0xE8240303,
+ 0x81C, 0xE7260303,
+ 0x81C, 0xE6280303,
+ 0x81C, 0xE52A0303,
+ 0x81C, 0xE42C0303,
+ 0x81C, 0xE32E0303,
+ 0x81C, 0xE2300303,
+ 0x81C, 0xE1320303,
+ 0x81C, 0xC6340303,
+ 0x81C, 0xC5360303,
+ 0x81C, 0xC4380303,
+ 0x81C, 0xC33A0303,
+ 0x81C, 0xA63C0303,
+ 0x81C, 0xA53E0303,
+ 0x81C, 0xA4400303,
+ 0x81C, 0xA3420303,
+ 0x81C, 0xA2440303,
+ 0x81C, 0xA1460303,
+ 0x81C, 0x83480303,
+ 0x81C, 0x824A0303,
+ 0x81C, 0x814C0303,
+ 0x81C, 0x804E0303,
+ 0x81C, 0x63500303,
+ 0x81C, 0x62520303,
+ 0x81C, 0x61540303,
+ 0x81C, 0x42560303,
+ 0x81C, 0x41580303,
+ 0x81C, 0x405A0303,
+ 0x81C, 0x225C0303,
+ 0x81C, 0x215E0303,
+ 0x81C, 0x20600303,
+ 0x81C, 0x04620303,
+ 0x81C, 0x03640303,
+ 0x81C, 0x02660303,
+ 0x81C, 0x01680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x9000000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000303,
+ 0x81C, 0xFA020303,
+ 0x81C, 0xF9040303,
+ 0x81C, 0xF8060303,
+ 0x81C, 0xF7080303,
+ 0x81C, 0xF60A0303,
+ 0x81C, 0xF50C0303,
+ 0x81C, 0xF40E0303,
+ 0x81C, 0xF3100303,
+ 0x81C, 0xF2120303,
+ 0x81C, 0xF1140303,
+ 0x81C, 0xF0160303,
+ 0x81C, 0xEF180303,
+ 0x81C, 0xEE1A0303,
+ 0x81C, 0xED1C0303,
+ 0x81C, 0xEC1E0303,
+ 0x81C, 0xEB200303,
+ 0x81C, 0xEA220303,
+ 0x81C, 0xE9240303,
+ 0x81C, 0xE8260303,
+ 0x81C, 0xE7280303,
+ 0x81C, 0xE62A0303,
+ 0x81C, 0xE52C0303,
+ 0x81C, 0xE42E0303,
+ 0x81C, 0xE3300303,
+ 0x81C, 0xE2320303,
+ 0x81C, 0xE1340303,
+ 0x81C, 0xE0360303,
+ 0x81C, 0xC3380303,
+ 0x81C, 0xC23A0303,
+ 0x81C, 0xC13C0303,
+ 0x81C, 0xC03E0303,
+ 0x81C, 0xA3400303,
+ 0x81C, 0xA2420303,
+ 0x81C, 0xA1440303,
+ 0x81C, 0xA0460303,
+ 0x81C, 0x83480303,
+ 0x81C, 0x824A0303,
+ 0x81C, 0x814C0303,
+ 0x81C, 0x644E0303,
+ 0x81C, 0x63500303,
+ 0x81C, 0x62520303,
+ 0x81C, 0x61540303,
+ 0x81C, 0x24560303,
+ 0x81C, 0x23580303,
+ 0x81C, 0x225A0303,
+ 0x81C, 0x215C0303,
+ 0x81C, 0x055E0303,
+ 0x81C, 0x04600303,
+ 0x81C, 0x03620303,
+ 0x81C, 0x02640303,
+ 0x81C, 0x01660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x90000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000303,
+ 0x81C, 0xFB020303,
+ 0x81C, 0xFA040303,
+ 0x81C, 0xF9060303,
+ 0x81C, 0xF8080303,
+ 0x81C, 0xF70A0303,
+ 0x81C, 0xF60C0303,
+ 0x81C, 0xF50E0303,
+ 0x81C, 0xF4100303,
+ 0x81C, 0xF3120303,
+ 0x81C, 0xF2140303,
+ 0x81C, 0xF1160303,
+ 0x81C, 0xF0180303,
+ 0x81C, 0xEF1A0303,
+ 0x81C, 0xEE1C0303,
+ 0x81C, 0xED1E0303,
+ 0x81C, 0xEC200303,
+ 0x81C, 0xEB220303,
+ 0x81C, 0xEA240303,
+ 0x81C, 0xE9260303,
+ 0x81C, 0xE8280303,
+ 0x81C, 0xE72A0303,
+ 0x81C, 0xE62C0303,
+ 0x81C, 0xE52E0303,
+ 0x81C, 0xE4300303,
+ 0x81C, 0xE3320303,
+ 0x81C, 0xE2340303,
+ 0x81C, 0xC6360303,
+ 0x81C, 0xC5380303,
+ 0x81C, 0xC43A0303,
+ 0x81C, 0xC33C0303,
+ 0x81C, 0xA63E0303,
+ 0x81C, 0xA5400303,
+ 0x81C, 0xA4420303,
+ 0x81C, 0xA3440303,
+ 0x81C, 0xA2460303,
+ 0x81C, 0x84480303,
+ 0x81C, 0x834A0303,
+ 0x81C, 0x824C0303,
+ 0x81C, 0x814E0303,
+ 0x81C, 0x80500303,
+ 0x81C, 0x63520303,
+ 0x81C, 0x62540303,
+ 0x81C, 0x61560303,
+ 0x81C, 0x60580303,
+ 0x81C, 0x225A0303,
+ 0x81C, 0x055C0303,
+ 0x81C, 0x045E0303,
+ 0x81C, 0x03600303,
+ 0x81C, 0x02620303,
+ 0x81C, 0x01640303,
+ 0x81C, 0x00660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0x90000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000303,
+ 0x81C, 0xF6020303,
+ 0x81C, 0xF5040303,
+ 0x81C, 0xF4060303,
+ 0x81C, 0xF3080303,
+ 0x81C, 0xF20A0303,
+ 0x81C, 0xF10C0303,
+ 0x81C, 0xF00E0303,
+ 0x81C, 0xEF100303,
+ 0x81C, 0xEE120303,
+ 0x81C, 0xED140303,
+ 0x81C, 0xEC160303,
+ 0x81C, 0xEB180303,
+ 0x81C, 0xEA1A0303,
+ 0x81C, 0xE91C0303,
+ 0x81C, 0xCA1E0303,
+ 0x81C, 0xC9200303,
+ 0x81C, 0xC8220303,
+ 0x81C, 0xC7240303,
+ 0x81C, 0xC6260303,
+ 0x81C, 0xC5280303,
+ 0x81C, 0xC42A0303,
+ 0x81C, 0xC32C0303,
+ 0x81C, 0xC22E0303,
+ 0x81C, 0xC1300303,
+ 0x81C, 0xA4320303,
+ 0x81C, 0xA3340303,
+ 0x81C, 0xA2360303,
+ 0x81C, 0xA1380303,
+ 0x81C, 0xA03A0303,
+ 0x81C, 0x823C0303,
+ 0x81C, 0x813E0303,
+ 0x81C, 0x80400303,
+ 0x81C, 0x64420303,
+ 0x81C, 0x63440303,
+ 0x81C, 0x62460303,
+ 0x81C, 0x61480303,
+ 0x81C, 0x604A0303,
+ 0x81C, 0x414C0303,
+ 0x81C, 0x404E0303,
+ 0x81C, 0x22500303,
+ 0x81C, 0x21520303,
+ 0x81C, 0x20540303,
+ 0x81C, 0x03560303,
+ 0x81C, 0x02580303,
+ 0x81C, 0x015A0303,
+ 0x81C, 0x005C0303,
+ 0x81C, 0x005E0303,
+ 0x81C, 0x00600303,
+ 0x81C, 0x00620303,
+ 0x81C, 0x00640303,
+ 0x81C, 0x00660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFC000303,
+ 0x81C, 0xFB020303,
+ 0x81C, 0xFA040303,
+ 0x81C, 0xF9060303,
+ 0x81C, 0xF8080303,
+ 0x81C, 0xF70A0303,
+ 0x81C, 0xF60C0303,
+ 0x81C, 0xF50E0303,
+ 0x81C, 0xF4100303,
+ 0x81C, 0xF3120303,
+ 0x81C, 0xF2140303,
+ 0x81C, 0xF1160303,
+ 0x81C, 0xF0180303,
+ 0x81C, 0xEF1A0303,
+ 0x81C, 0xEE1C0303,
+ 0x81C, 0xED1E0303,
+ 0x81C, 0xEC200303,
+ 0x81C, 0xEB220303,
+ 0x81C, 0xEA240303,
+ 0x81C, 0xE9260303,
+ 0x81C, 0xE8280303,
+ 0x81C, 0xE72A0303,
+ 0x81C, 0xE62C0303,
+ 0x81C, 0xE52E0303,
+ 0x81C, 0xE4300303,
+ 0x81C, 0xE3320303,
+ 0x81C, 0xE2340303,
+ 0x81C, 0xC6360303,
+ 0x81C, 0xC5380303,
+ 0x81C, 0xC43A0303,
+ 0x81C, 0xC33C0303,
+ 0x81C, 0xA63E0303,
+ 0x81C, 0xA5400303,
+ 0x81C, 0xA4420303,
+ 0x81C, 0xA3440303,
+ 0x81C, 0xA2460303,
+ 0x81C, 0x84480303,
+ 0x81C, 0x834A0303,
+ 0x81C, 0x824C0303,
+ 0x81C, 0x814E0303,
+ 0x81C, 0x80500303,
+ 0x81C, 0x63520303,
+ 0x81C, 0x62540303,
+ 0x81C, 0x61560303,
+ 0x81C, 0x60580303,
+ 0x81C, 0x235A0303,
+ 0x81C, 0x225C0303,
+ 0x81C, 0x215E0303,
+ 0x81C, 0x20600303,
+ 0x81C, 0x03620303,
+ 0x81C, 0x02640303,
+ 0x81C, 0x01660303,
+ 0x81C, 0x00680303,
+ 0x81C, 0x006A0303,
+ 0x81C, 0x006C0303,
+ 0x81C, 0x006E0303,
+ 0x81C, 0x00700303,
+ 0x81C, 0x00720303,
+ 0x81C, 0x00740303,
+ 0x81C, 0x00760303,
+ 0x81C, 0x00780303,
+ 0x81C, 0x007A0303,
+ 0x81C, 0x007C0303,
+ 0x81C, 0x007E0303,
+ 0xB0000000, 0x00000000,
+ 0x80000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xF5000403,
+ 0x81C, 0xF4020403,
+ 0x81C, 0xF3040403,
+ 0x81C, 0xF2060403,
+ 0x81C, 0xF1080403,
+ 0x81C, 0xF00A0403,
+ 0x81C, 0xEF0C0403,
+ 0x81C, 0xEE0E0403,
+ 0x81C, 0xED100403,
+ 0x81C, 0xEC120403,
+ 0x81C, 0xEB140403,
+ 0x81C, 0xEA160403,
+ 0x81C, 0xE9180403,
+ 0x81C, 0xE81A0403,
+ 0x81C, 0xE71C0403,
+ 0x81C, 0xE61E0403,
+ 0x81C, 0xE5200403,
+ 0x81C, 0xE4220403,
+ 0x81C, 0xE3240403,
+ 0x81C, 0xE2260403,
+ 0x81C, 0xE1280403,
+ 0x81C, 0xE02A0403,
+ 0x81C, 0xC32C0403,
+ 0x81C, 0xC22E0403,
+ 0x81C, 0xC1300403,
+ 0x81C, 0xC0320403,
+ 0x81C, 0xA4340403,
+ 0x81C, 0xA3360403,
+ 0x81C, 0xA2380403,
+ 0x81C, 0xA13A0403,
+ 0x81C, 0xA03C0403,
+ 0x81C, 0x823E0403,
+ 0x81C, 0x81400403,
+ 0x81C, 0x80420403,
+ 0x81C, 0x64440403,
+ 0x81C, 0x63460403,
+ 0x81C, 0x62480403,
+ 0x81C, 0x614A0403,
+ 0x81C, 0x604C0403,
+ 0x81C, 0x454E0403,
+ 0x81C, 0x44500403,
+ 0x81C, 0x43520403,
+ 0x81C, 0x42540403,
+ 0x81C, 0x41560403,
+ 0x81C, 0x40580403,
+ 0x81C, 0x055A0403,
+ 0x81C, 0x045C0403,
+ 0x81C, 0x035E0403,
+ 0x81C, 0x02600403,
+ 0x81C, 0x01620403,
+ 0x81C, 0x00640403,
+ 0x81C, 0x00660403,
+ 0x81C, 0x00680403,
+ 0x81C, 0x006A0403,
+ 0x81C, 0x006C0403,
+ 0x81C, 0x006E0403,
+ 0x81C, 0x00700403,
+ 0x81C, 0x00720403,
+ 0x81C, 0x00740403,
+ 0x81C, 0x00760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xF6000403,
+ 0x81C, 0xF5020403,
+ 0x81C, 0xF4040403,
+ 0x81C, 0xF3060403,
+ 0x81C, 0xF2080403,
+ 0x81C, 0xF10A0403,
+ 0x81C, 0xF00C0403,
+ 0x81C, 0xEF0E0403,
+ 0x81C, 0xD6100403,
+ 0x81C, 0xD5120403,
+ 0x81C, 0xD4140403,
+ 0x81C, 0xD3160403,
+ 0x81C, 0xD2180403,
+ 0x81C, 0xD11A0403,
+ 0x81C, 0xD01C0403,
+ 0x81C, 0xCF1E0403,
+ 0x81C, 0x95200403,
+ 0x81C, 0x94220403,
+ 0x81C, 0x93240403,
+ 0x81C, 0x92260403,
+ 0x81C, 0x91280403,
+ 0x81C, 0x902A0403,
+ 0x81C, 0x8F2C0403,
+ 0x81C, 0x8E2E0403,
+ 0x81C, 0x8D300403,
+ 0x81C, 0x8C320403,
+ 0x81C, 0x8B340403,
+ 0x81C, 0x8A360403,
+ 0x81C, 0x89380403,
+ 0x81C, 0x883A0403,
+ 0x81C, 0x873C0403,
+ 0x81C, 0x863E0403,
+ 0x81C, 0x68400403,
+ 0x81C, 0x67420403,
+ 0x81C, 0x66440403,
+ 0x81C, 0x65460403,
+ 0x81C, 0x64480403,
+ 0x81C, 0x634A0403,
+ 0x81C, 0x484C0403,
+ 0x81C, 0x474E0403,
+ 0x81C, 0x46500403,
+ 0x81C, 0x45520403,
+ 0x81C, 0x44540403,
+ 0x81C, 0x27560403,
+ 0x81C, 0x26580403,
+ 0x81C, 0x255A0403,
+ 0x81C, 0x245C0403,
+ 0x81C, 0x235E0403,
+ 0x81C, 0x04600403,
+ 0x81C, 0x03620403,
+ 0x81C, 0x02640403,
+ 0x81C, 0x01660403,
+ 0x81C, 0x00680403,
+ 0x81C, 0x006A0403,
+ 0x81C, 0x006C0403,
+ 0x81C, 0x006E0403,
+ 0x81C, 0x00700403,
+ 0x81C, 0x00720403,
+ 0x81C, 0x00740403,
+ 0x81C, 0x00760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xF5000403,
+ 0x81C, 0xF4020403,
+ 0x81C, 0xF3040403,
+ 0x81C, 0xF2060403,
+ 0x81C, 0xF1080403,
+ 0x81C, 0xF00A0403,
+ 0x81C, 0xEF0C0403,
+ 0x81C, 0xEE0E0403,
+ 0x81C, 0xED100403,
+ 0x81C, 0xEC120403,
+ 0x81C, 0xEB140403,
+ 0x81C, 0xEA160403,
+ 0x81C, 0xE9180403,
+ 0x81C, 0xE81A0403,
+ 0x81C, 0xE71C0403,
+ 0x81C, 0xE61E0403,
+ 0x81C, 0xE5200403,
+ 0x81C, 0xE4220403,
+ 0x81C, 0xE3240403,
+ 0x81C, 0xE2260403,
+ 0x81C, 0xE1280403,
+ 0x81C, 0xE02A0403,
+ 0x81C, 0xC32C0403,
+ 0x81C, 0xC22E0403,
+ 0x81C, 0xC1300403,
+ 0x81C, 0xC0320403,
+ 0x81C, 0xA4340403,
+ 0x81C, 0xA3360403,
+ 0x81C, 0xA2380403,
+ 0x81C, 0xA13A0403,
+ 0x81C, 0xA03C0403,
+ 0x81C, 0x823E0403,
+ 0x81C, 0x81400403,
+ 0x81C, 0x80420403,
+ 0x81C, 0x64440403,
+ 0x81C, 0x63460403,
+ 0x81C, 0x62480403,
+ 0x81C, 0x614A0403,
+ 0x81C, 0x604C0403,
+ 0x81C, 0x454E0403,
+ 0x81C, 0x44500403,
+ 0x81C, 0x43520403,
+ 0x81C, 0x42540403,
+ 0x81C, 0x41560403,
+ 0x81C, 0x40580403,
+ 0x81C, 0x055A0403,
+ 0x81C, 0x045C0403,
+ 0x81C, 0x035E0403,
+ 0x81C, 0x02600403,
+ 0x81C, 0x01620403,
+ 0x81C, 0x00640403,
+ 0x81C, 0x00660403,
+ 0x81C, 0x00680403,
+ 0x81C, 0x006A0403,
+ 0x81C, 0x006C0403,
+ 0x81C, 0x006E0403,
+ 0x81C, 0x00700403,
+ 0x81C, 0x00720403,
+ 0x81C, 0x00740403,
+ 0x81C, 0x00760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xF5000403,
+ 0x81C, 0xF4020403,
+ 0x81C, 0xF3040403,
+ 0x81C, 0xF2060403,
+ 0x81C, 0xF1080403,
+ 0x81C, 0xF00A0403,
+ 0x81C, 0xEF0C0403,
+ 0x81C, 0xEE0E0403,
+ 0x81C, 0xED100403,
+ 0x81C, 0xEC120403,
+ 0x81C, 0xEB140403,
+ 0x81C, 0xEA160403,
+ 0x81C, 0xE9180403,
+ 0x81C, 0xE81A0403,
+ 0x81C, 0xE71C0403,
+ 0x81C, 0xE61E0403,
+ 0x81C, 0xE5200403,
+ 0x81C, 0xE4220403,
+ 0x81C, 0xE3240403,
+ 0x81C, 0xE2260403,
+ 0x81C, 0xE1280403,
+ 0x81C, 0xE02A0403,
+ 0x81C, 0xC32C0403,
+ 0x81C, 0xC22E0403,
+ 0x81C, 0xC1300403,
+ 0x81C, 0xC0320403,
+ 0x81C, 0xA4340403,
+ 0x81C, 0xA3360403,
+ 0x81C, 0xA2380403,
+ 0x81C, 0xA13A0403,
+ 0x81C, 0xA03C0403,
+ 0x81C, 0x823E0403,
+ 0x81C, 0x81400403,
+ 0x81C, 0x80420403,
+ 0x81C, 0x64440403,
+ 0x81C, 0x63460403,
+ 0x81C, 0x62480403,
+ 0x81C, 0x614A0403,
+ 0x81C, 0x604C0403,
+ 0x81C, 0x454E0403,
+ 0x81C, 0x44500403,
+ 0x81C, 0x43520403,
+ 0x81C, 0x42540403,
+ 0x81C, 0x41560403,
+ 0x81C, 0x40580403,
+ 0x81C, 0x055A0403,
+ 0x81C, 0x045C0403,
+ 0x81C, 0x035E0403,
+ 0x81C, 0x02600403,
+ 0x81C, 0x01620403,
+ 0x81C, 0x00640403,
+ 0x81C, 0x00660403,
+ 0x81C, 0x00680403,
+ 0x81C, 0x006A0403,
+ 0x81C, 0x006C0403,
+ 0x81C, 0x006E0403,
+ 0x81C, 0x00700403,
+ 0x81C, 0x00720403,
+ 0x81C, 0x00740403,
+ 0x81C, 0x00760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xF5000403,
+ 0x81C, 0xF4020403,
+ 0x81C, 0xF3040403,
+ 0x81C, 0xF2060403,
+ 0x81C, 0xF1080403,
+ 0x81C, 0xF00A0403,
+ 0x81C, 0xEF0C0403,
+ 0x81C, 0xEE0E0403,
+ 0x81C, 0xED100403,
+ 0x81C, 0xEC120403,
+ 0x81C, 0xEB140403,
+ 0x81C, 0xEA160403,
+ 0x81C, 0xE9180403,
+ 0x81C, 0xE81A0403,
+ 0x81C, 0xE71C0403,
+ 0x81C, 0xE61E0403,
+ 0x81C, 0xE5200403,
+ 0x81C, 0xE4220403,
+ 0x81C, 0xE3240403,
+ 0x81C, 0xE2260403,
+ 0x81C, 0xE1280403,
+ 0x81C, 0xE02A0403,
+ 0x81C, 0xC32C0403,
+ 0x81C, 0xC22E0403,
+ 0x81C, 0xC1300403,
+ 0x81C, 0xC0320403,
+ 0x81C, 0xA4340403,
+ 0x81C, 0xA3360403,
+ 0x81C, 0xA2380403,
+ 0x81C, 0xA13A0403,
+ 0x81C, 0xA03C0403,
+ 0x81C, 0x823E0403,
+ 0x81C, 0x81400403,
+ 0x81C, 0x80420403,
+ 0x81C, 0x64440403,
+ 0x81C, 0x63460403,
+ 0x81C, 0x62480403,
+ 0x81C, 0x614A0403,
+ 0x81C, 0x604C0403,
+ 0x81C, 0x454E0403,
+ 0x81C, 0x44500403,
+ 0x81C, 0x43520403,
+ 0x81C, 0x42540403,
+ 0x81C, 0x41560403,
+ 0x81C, 0x40580403,
+ 0x81C, 0x055A0403,
+ 0x81C, 0x045C0403,
+ 0x81C, 0x035E0403,
+ 0x81C, 0x02600403,
+ 0x81C, 0x01620403,
+ 0x81C, 0x00640403,
+ 0x81C, 0x00660403,
+ 0x81C, 0x00680403,
+ 0x81C, 0x006A0403,
+ 0x81C, 0x006C0403,
+ 0x81C, 0x006E0403,
+ 0x81C, 0x00700403,
+ 0x81C, 0x00720403,
+ 0x81C, 0x00740403,
+ 0x81C, 0x00760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xF5000403,
+ 0x81C, 0xF4020403,
+ 0x81C, 0xF3040403,
+ 0x81C, 0xF2060403,
+ 0x81C, 0xF1080403,
+ 0x81C, 0xF00A0403,
+ 0x81C, 0xEF0C0403,
+ 0x81C, 0xEE0E0403,
+ 0x81C, 0xED100403,
+ 0x81C, 0xEC120403,
+ 0x81C, 0xEB140403,
+ 0x81C, 0xEA160403,
+ 0x81C, 0xE9180403,
+ 0x81C, 0xE81A0403,
+ 0x81C, 0xE71C0403,
+ 0x81C, 0xE61E0403,
+ 0x81C, 0xE5200403,
+ 0x81C, 0xE4220403,
+ 0x81C, 0xE3240403,
+ 0x81C, 0xE2260403,
+ 0x81C, 0xE1280403,
+ 0x81C, 0xE02A0403,
+ 0x81C, 0xC32C0403,
+ 0x81C, 0xC22E0403,
+ 0x81C, 0xC1300403,
+ 0x81C, 0xC0320403,
+ 0x81C, 0xA4340403,
+ 0x81C, 0xA3360403,
+ 0x81C, 0xA2380403,
+ 0x81C, 0xA13A0403,
+ 0x81C, 0xA03C0403,
+ 0x81C, 0x823E0403,
+ 0x81C, 0x81400403,
+ 0x81C, 0x80420403,
+ 0x81C, 0x64440403,
+ 0x81C, 0x63460403,
+ 0x81C, 0x62480403,
+ 0x81C, 0x614A0403,
+ 0x81C, 0x604C0403,
+ 0x81C, 0x454E0403,
+ 0x81C, 0x44500403,
+ 0x81C, 0x43520403,
+ 0x81C, 0x42540403,
+ 0x81C, 0x41560403,
+ 0x81C, 0x40580403,
+ 0x81C, 0x055A0403,
+ 0x81C, 0x045C0403,
+ 0x81C, 0x035E0403,
+ 0x81C, 0x02600403,
+ 0x81C, 0x01620403,
+ 0x81C, 0x00640403,
+ 0x81C, 0x00660403,
+ 0x81C, 0x00680403,
+ 0x81C, 0x006A0403,
+ 0x81C, 0x006C0403,
+ 0x81C, 0x006E0403,
+ 0x81C, 0x00700403,
+ 0x81C, 0x00720403,
+ 0x81C, 0x00740403,
+ 0x81C, 0x00760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x9000000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x9000000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x9000000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x9000000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x90000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0x90000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xF6000403,
+ 0x81C, 0xF5020403,
+ 0x81C, 0xF4040403,
+ 0x81C, 0xF3060403,
+ 0x81C, 0xF2080403,
+ 0x81C, 0xF10A0403,
+ 0x81C, 0xF00C0403,
+ 0x81C, 0xEF0E0403,
+ 0x81C, 0xD6100403,
+ 0x81C, 0xD5120403,
+ 0x81C, 0xD4140403,
+ 0x81C, 0xD3160403,
+ 0x81C, 0xD2180403,
+ 0x81C, 0xD11A0403,
+ 0x81C, 0xD01C0403,
+ 0x81C, 0xCF1E0403,
+ 0x81C, 0x95200403,
+ 0x81C, 0x94220403,
+ 0x81C, 0x93240403,
+ 0x81C, 0x92260403,
+ 0x81C, 0x91280403,
+ 0x81C, 0x902A0403,
+ 0x81C, 0x8F2C0403,
+ 0x81C, 0x8E2E0403,
+ 0x81C, 0x8D300403,
+ 0x81C, 0x8C320403,
+ 0x81C, 0x8B340403,
+ 0x81C, 0x8A360403,
+ 0x81C, 0x89380403,
+ 0x81C, 0x883A0403,
+ 0x81C, 0x873C0403,
+ 0x81C, 0x863E0403,
+ 0x81C, 0x68400403,
+ 0x81C, 0x67420403,
+ 0x81C, 0x66440403,
+ 0x81C, 0x65460403,
+ 0x81C, 0x64480403,
+ 0x81C, 0x634A0403,
+ 0x81C, 0x484C0403,
+ 0x81C, 0x474E0403,
+ 0x81C, 0x46500403,
+ 0x81C, 0x45520403,
+ 0x81C, 0x44540403,
+ 0x81C, 0x27560403,
+ 0x81C, 0x26580403,
+ 0x81C, 0x255A0403,
+ 0x81C, 0x245C0403,
+ 0x81C, 0x235E0403,
+ 0x81C, 0x04600403,
+ 0x81C, 0x03620403,
+ 0x81C, 0x02640403,
+ 0x81C, 0x01660403,
+ 0x81C, 0x00680403,
+ 0x81C, 0x006A0403,
+ 0x81C, 0x006C0403,
+ 0x81C, 0x006E0403,
+ 0x81C, 0x00700403,
+ 0x81C, 0x00720403,
+ 0x81C, 0x00740403,
+ 0x81C, 0x00760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF000403,
+ 0x81C, 0xFF020403,
+ 0x81C, 0xFE040403,
+ 0x81C, 0xFD060403,
+ 0x81C, 0xFC080403,
+ 0x81C, 0xFB0A0403,
+ 0x81C, 0xFA0C0403,
+ 0x81C, 0xF90E0403,
+ 0x81C, 0xF8100403,
+ 0x81C, 0xF7120403,
+ 0x81C, 0xF6140403,
+ 0x81C, 0xF5160403,
+ 0x81C, 0xF4180403,
+ 0x81C, 0xF31A0403,
+ 0x81C, 0xF21C0403,
+ 0x81C, 0xD51E0403,
+ 0x81C, 0xD4200403,
+ 0x81C, 0xD3220403,
+ 0x81C, 0xD2240403,
+ 0x81C, 0xB6260403,
+ 0x81C, 0xB5280403,
+ 0x81C, 0xB42A0403,
+ 0x81C, 0xB32C0403,
+ 0x81C, 0xB22E0403,
+ 0x81C, 0xB1300403,
+ 0x81C, 0xB0320403,
+ 0x81C, 0xAF340403,
+ 0x81C, 0xAE360403,
+ 0x81C, 0xAD380403,
+ 0x81C, 0xAC3A0403,
+ 0x81C, 0xAB3C0403,
+ 0x81C, 0xAA3E0403,
+ 0x81C, 0xA9400403,
+ 0x81C, 0xA8420403,
+ 0x81C, 0xA7440403,
+ 0x81C, 0xA6460403,
+ 0x81C, 0xA5480403,
+ 0x81C, 0xA44A0403,
+ 0x81C, 0xA34C0403,
+ 0x81C, 0x854E0403,
+ 0x81C, 0x84500403,
+ 0x81C, 0x83520403,
+ 0x81C, 0x82540403,
+ 0x81C, 0x81560403,
+ 0x81C, 0x80580403,
+ 0x81C, 0x485A0403,
+ 0x81C, 0x475C0403,
+ 0x81C, 0x465E0403,
+ 0x81C, 0x45600403,
+ 0x81C, 0x44620403,
+ 0x81C, 0x0A640403,
+ 0x81C, 0x09660403,
+ 0x81C, 0x08680403,
+ 0x81C, 0x076A0403,
+ 0x81C, 0x066C0403,
+ 0x81C, 0x056E0403,
+ 0x81C, 0x04700403,
+ 0x81C, 0x03720403,
+ 0x81C, 0x02740403,
+ 0x81C, 0x01760403,
+ 0x81C, 0x00780403,
+ 0x81C, 0x007A0403,
+ 0x81C, 0x007C0403,
+ 0x81C, 0x007E0403,
+ 0xB0000000, 0x00000000,
+ 0x80000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000503,
+ 0x81C, 0xFC020503,
+ 0x81C, 0xFB040503,
+ 0x81C, 0xFA060503,
+ 0x81C, 0xF9080503,
+ 0x81C, 0xF80A0503,
+ 0x81C, 0xF70C0503,
+ 0x81C, 0xF60E0503,
+ 0x81C, 0xF5100503,
+ 0x81C, 0xF4120503,
+ 0x81C, 0xF3140503,
+ 0x81C, 0xF2160503,
+ 0x81C, 0xF1180503,
+ 0x81C, 0xF01A0503,
+ 0x81C, 0xEE1C0503,
+ 0x81C, 0xED1E0503,
+ 0x81C, 0xEC200503,
+ 0x81C, 0xEB220503,
+ 0x81C, 0xEA240503,
+ 0x81C, 0xE9260503,
+ 0x81C, 0xE8280503,
+ 0x81C, 0xE72A0503,
+ 0x81C, 0xE62C0503,
+ 0x81C, 0xE52E0503,
+ 0x81C, 0xE4300503,
+ 0x81C, 0xE3320503,
+ 0x81C, 0xE2340503,
+ 0x81C, 0xC5360503,
+ 0x81C, 0xC4380503,
+ 0x81C, 0xC33A0503,
+ 0x81C, 0xC23C0503,
+ 0x81C, 0xA53E0503,
+ 0x81C, 0xA4400503,
+ 0x81C, 0xA3420503,
+ 0x81C, 0xA2440503,
+ 0x81C, 0xA1460503,
+ 0x81C, 0x83480503,
+ 0x81C, 0x824A0503,
+ 0x81C, 0x814C0503,
+ 0x81C, 0x804E0503,
+ 0x81C, 0x63500503,
+ 0x81C, 0x62520503,
+ 0x81C, 0x61540503,
+ 0x81C, 0x43560503,
+ 0x81C, 0x42580503,
+ 0x81C, 0x415A0503,
+ 0x81C, 0x405C0503,
+ 0x81C, 0x225E0503,
+ 0x81C, 0x21600503,
+ 0x81C, 0x20620503,
+ 0x81C, 0x03640503,
+ 0x81C, 0x02660503,
+ 0x81C, 0x01680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xBE000503,
+ 0x81C, 0xBD020503,
+ 0x81C, 0xBC040503,
+ 0x81C, 0xBB060503,
+ 0x81C, 0xBA080503,
+ 0x81C, 0xB90A0503,
+ 0x81C, 0xB80C0503,
+ 0x81C, 0xB70E0503,
+ 0x81C, 0xB6100503,
+ 0x81C, 0xB5120503,
+ 0x81C, 0xB4140503,
+ 0x81C, 0xB3160503,
+ 0x81C, 0xB2180503,
+ 0x81C, 0xB11A0503,
+ 0x81C, 0xB01C0503,
+ 0x81C, 0xAF1E0503,
+ 0x81C, 0xAE200503,
+ 0x81C, 0xAD220503,
+ 0x81C, 0xAC240503,
+ 0x81C, 0xAB260503,
+ 0x81C, 0x8D280503,
+ 0x81C, 0x8C2A0503,
+ 0x81C, 0x8B2C0503,
+ 0x81C, 0x8A2E0503,
+ 0x81C, 0x89300503,
+ 0x81C, 0x88320503,
+ 0x81C, 0x6A340503,
+ 0x81C, 0x69360503,
+ 0x81C, 0x68380503,
+ 0x81C, 0x673A0503,
+ 0x81C, 0x663C0503,
+ 0x81C, 0x653E0503,
+ 0x81C, 0x64400503,
+ 0x81C, 0x63420503,
+ 0x81C, 0x62440503,
+ 0x81C, 0x61460503,
+ 0x81C, 0x60480503,
+ 0x81C, 0x424A0503,
+ 0x81C, 0x414C0503,
+ 0x81C, 0x404E0503,
+ 0x81C, 0x06500503,
+ 0x81C, 0x05520503,
+ 0x81C, 0x04540503,
+ 0x81C, 0x03560503,
+ 0x81C, 0x02580503,
+ 0x81C, 0x015A0503,
+ 0x81C, 0x005C0503,
+ 0x81C, 0x005E0503,
+ 0x81C, 0x00600503,
+ 0x81C, 0x00620503,
+ 0x81C, 0x00640503,
+ 0x81C, 0x00660503,
+ 0x81C, 0x00680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007C0503,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000503,
+ 0x81C, 0xF7020503,
+ 0x81C, 0xF6040503,
+ 0x81C, 0xF5060503,
+ 0x81C, 0xF4080503,
+ 0x81C, 0xF30A0503,
+ 0x81C, 0xF20C0503,
+ 0x81C, 0xF10E0503,
+ 0x81C, 0xF0100503,
+ 0x81C, 0xEF120503,
+ 0x81C, 0xEE140503,
+ 0x81C, 0xED160503,
+ 0x81C, 0xEC180503,
+ 0x81C, 0xEB1A0503,
+ 0x81C, 0xEA1C0503,
+ 0x81C, 0xE91E0503,
+ 0x81C, 0xE8200503,
+ 0x81C, 0xE7220503,
+ 0x81C, 0xE6240503,
+ 0x81C, 0xE5260503,
+ 0x81C, 0xE4280503,
+ 0x81C, 0xE32A0503,
+ 0x81C, 0xC32C0503,
+ 0x81C, 0xC22E0503,
+ 0x81C, 0xC1300503,
+ 0x81C, 0xC0320503,
+ 0x81C, 0xA3340503,
+ 0x81C, 0xA2360503,
+ 0x81C, 0xA1380503,
+ 0x81C, 0xA03A0503,
+ 0x81C, 0x823C0503,
+ 0x81C, 0x813E0503,
+ 0x81C, 0x80400503,
+ 0x81C, 0x63420503,
+ 0x81C, 0x62440503,
+ 0x81C, 0x61460503,
+ 0x81C, 0x60480503,
+ 0x81C, 0x424A0503,
+ 0x81C, 0x414C0503,
+ 0x81C, 0x404E0503,
+ 0x81C, 0x22500503,
+ 0x81C, 0x21520503,
+ 0x81C, 0x20540503,
+ 0x81C, 0x03560503,
+ 0x81C, 0x02580503,
+ 0x81C, 0x015A0503,
+ 0x81C, 0x005C0503,
+ 0x81C, 0x005E0503,
+ 0x81C, 0x00600503,
+ 0x81C, 0x00620503,
+ 0x81C, 0x00640503,
+ 0x81C, 0x00660503,
+ 0x81C, 0x00680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000503,
+ 0x81C, 0xFD020503,
+ 0x81C, 0xFC040503,
+ 0x81C, 0xFB060503,
+ 0x81C, 0xFA080503,
+ 0x81C, 0xF90A0503,
+ 0x81C, 0xF80C0503,
+ 0x81C, 0xF70E0503,
+ 0x81C, 0xF6100503,
+ 0x81C, 0xF5120503,
+ 0x81C, 0xF4140503,
+ 0x81C, 0xF3160503,
+ 0x81C, 0xF2180503,
+ 0x81C, 0xF11A0503,
+ 0x81C, 0xF01C0503,
+ 0x81C, 0xEF1E0503,
+ 0x81C, 0xEE200503,
+ 0x81C, 0xED220503,
+ 0x81C, 0xEC240503,
+ 0x81C, 0xEB260503,
+ 0x81C, 0xEA280503,
+ 0x81C, 0xE92A0503,
+ 0x81C, 0xE82C0503,
+ 0x81C, 0xE72E0503,
+ 0x81C, 0xE6300503,
+ 0x81C, 0xE5320503,
+ 0x81C, 0xE4340503,
+ 0x81C, 0xE3360503,
+ 0x81C, 0xC6380503,
+ 0x81C, 0xC53A0503,
+ 0x81C, 0xC43C0503,
+ 0x81C, 0xC33E0503,
+ 0x81C, 0xA5400503,
+ 0x81C, 0xA4420503,
+ 0x81C, 0xA3440503,
+ 0x81C, 0xA2460503,
+ 0x81C, 0xA1480503,
+ 0x81C, 0xA04A0503,
+ 0x81C, 0x824C0503,
+ 0x81C, 0x814E0503,
+ 0x81C, 0x80500503,
+ 0x81C, 0x64520503,
+ 0x81C, 0x63540503,
+ 0x81C, 0x62560503,
+ 0x81C, 0x61580503,
+ 0x81C, 0x605A0503,
+ 0x81C, 0x235C0503,
+ 0x81C, 0x225E0503,
+ 0x81C, 0x21600503,
+ 0x81C, 0x20620503,
+ 0x81C, 0x03640503,
+ 0x81C, 0x02660503,
+ 0x81C, 0x01680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000503,
+ 0x81C, 0xF7020503,
+ 0x81C, 0xF6040503,
+ 0x81C, 0xF5060503,
+ 0x81C, 0xF4080503,
+ 0x81C, 0xF30A0503,
+ 0x81C, 0xF20C0503,
+ 0x81C, 0xF10E0503,
+ 0x81C, 0xF0100503,
+ 0x81C, 0xEF120503,
+ 0x81C, 0xEE140503,
+ 0x81C, 0xED160503,
+ 0x81C, 0xEC180503,
+ 0x81C, 0xEB1A0503,
+ 0x81C, 0xEA1C0503,
+ 0x81C, 0xE91E0503,
+ 0x81C, 0xE8200503,
+ 0x81C, 0xE7220503,
+ 0x81C, 0xE6240503,
+ 0x81C, 0xE5260503,
+ 0x81C, 0xE4280503,
+ 0x81C, 0xE32A0503,
+ 0x81C, 0xC32C0503,
+ 0x81C, 0xC22E0503,
+ 0x81C, 0xC1300503,
+ 0x81C, 0xC0320503,
+ 0x81C, 0xA3340503,
+ 0x81C, 0xA2360503,
+ 0x81C, 0xA1380503,
+ 0x81C, 0xA03A0503,
+ 0x81C, 0x823C0503,
+ 0x81C, 0x813E0503,
+ 0x81C, 0x80400503,
+ 0x81C, 0x63420503,
+ 0x81C, 0x62440503,
+ 0x81C, 0x61460503,
+ 0x81C, 0x60480503,
+ 0x81C, 0x424A0503,
+ 0x81C, 0x414C0503,
+ 0x81C, 0x404E0503,
+ 0x81C, 0x22500503,
+ 0x81C, 0x21520503,
+ 0x81C, 0x20540503,
+ 0x81C, 0x03560503,
+ 0x81C, 0x02580503,
+ 0x81C, 0x015A0503,
+ 0x81C, 0x005C0503,
+ 0x81C, 0x005E0503,
+ 0x81C, 0x00600503,
+ 0x81C, 0x00620503,
+ 0x81C, 0x00640503,
+ 0x81C, 0x00660503,
+ 0x81C, 0x00680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000503,
+ 0x81C, 0xFC020503,
+ 0x81C, 0xFB040503,
+ 0x81C, 0xFA060503,
+ 0x81C, 0xF9080503,
+ 0x81C, 0xF80A0503,
+ 0x81C, 0xF70C0503,
+ 0x81C, 0xF60E0503,
+ 0x81C, 0xF5100503,
+ 0x81C, 0xF4120503,
+ 0x81C, 0xF3140503,
+ 0x81C, 0xF2160503,
+ 0x81C, 0xF1180503,
+ 0x81C, 0xF01A0503,
+ 0x81C, 0xEF1C0503,
+ 0x81C, 0xEE1E0503,
+ 0x81C, 0xED200503,
+ 0x81C, 0xEC220503,
+ 0x81C, 0xEB240503,
+ 0x81C, 0xEA260503,
+ 0x81C, 0xE9280503,
+ 0x81C, 0xE82A0503,
+ 0x81C, 0xE72C0503,
+ 0x81C, 0xE62E0503,
+ 0x81C, 0xE5300503,
+ 0x81C, 0xE4320503,
+ 0x81C, 0xE3340503,
+ 0x81C, 0xE2360503,
+ 0x81C, 0xC5380503,
+ 0x81C, 0xC43A0503,
+ 0x81C, 0xC33C0503,
+ 0x81C, 0xC23E0503,
+ 0x81C, 0xA5400503,
+ 0x81C, 0xA4420503,
+ 0x81C, 0xA3440503,
+ 0x81C, 0xA2460503,
+ 0x81C, 0xA1480503,
+ 0x81C, 0x834A0503,
+ 0x81C, 0x824C0503,
+ 0x81C, 0x814E0503,
+ 0x81C, 0x64500503,
+ 0x81C, 0x63520503,
+ 0x81C, 0x62540503,
+ 0x81C, 0x61560503,
+ 0x81C, 0x42580503,
+ 0x81C, 0x415A0503,
+ 0x81C, 0x405C0503,
+ 0x81C, 0x065E0503,
+ 0x81C, 0x05600503,
+ 0x81C, 0x04620503,
+ 0x81C, 0x03640503,
+ 0x81C, 0x02660503,
+ 0x81C, 0x01680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFA000503,
+ 0x81C, 0xF9020503,
+ 0x81C, 0xF8040503,
+ 0x81C, 0xF7060503,
+ 0x81C, 0xF6080503,
+ 0x81C, 0xF50A0503,
+ 0x81C, 0xF40C0503,
+ 0x81C, 0xF30E0503,
+ 0x81C, 0xF2100503,
+ 0x81C, 0xF1120503,
+ 0x81C, 0xF0140503,
+ 0x81C, 0xEF160503,
+ 0x81C, 0xEE180503,
+ 0x81C, 0xED1A0503,
+ 0x81C, 0xEC1C0503,
+ 0x81C, 0xEB1E0503,
+ 0x81C, 0xEA200503,
+ 0x81C, 0xE9220503,
+ 0x81C, 0xE8240503,
+ 0x81C, 0xE7260503,
+ 0x81C, 0xE6280503,
+ 0x81C, 0xE52A0503,
+ 0x81C, 0xC42C0503,
+ 0x81C, 0xC32E0503,
+ 0x81C, 0xC2300503,
+ 0x81C, 0xC1320503,
+ 0x81C, 0xA4340503,
+ 0x81C, 0xA3360503,
+ 0x81C, 0xA2380503,
+ 0x81C, 0xA13A0503,
+ 0x81C, 0x833C0503,
+ 0x81C, 0x823E0503,
+ 0x81C, 0x81400503,
+ 0x81C, 0x63420503,
+ 0x81C, 0x62440503,
+ 0x81C, 0x61460503,
+ 0x81C, 0x60480503,
+ 0x81C, 0x424A0503,
+ 0x81C, 0x414C0503,
+ 0x81C, 0x404E0503,
+ 0x81C, 0x22500503,
+ 0x81C, 0x21520503,
+ 0x81C, 0x20540503,
+ 0x81C, 0x03560503,
+ 0x81C, 0x02580503,
+ 0x81C, 0x015A0503,
+ 0x81C, 0x005C0503,
+ 0x81C, 0x005E0503,
+ 0x81C, 0x00600503,
+ 0x81C, 0x00620503,
+ 0x81C, 0x00640503,
+ 0x81C, 0x00660503,
+ 0x81C, 0x00680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xBF000503,
+ 0x81C, 0xBE020503,
+ 0x81C, 0xBD040503,
+ 0x81C, 0xBC060503,
+ 0x81C, 0xBB080503,
+ 0x81C, 0xBA0A0503,
+ 0x81C, 0xB90C0503,
+ 0x81C, 0xB80E0503,
+ 0x81C, 0xB7100503,
+ 0x81C, 0xB6120503,
+ 0x81C, 0xB5140503,
+ 0x81C, 0xB4160503,
+ 0x81C, 0xB3180503,
+ 0x81C, 0xB21A0503,
+ 0x81C, 0xB11C0503,
+ 0x81C, 0x931E0503,
+ 0x81C, 0x92200503,
+ 0x81C, 0x91220503,
+ 0x81C, 0x90240503,
+ 0x81C, 0x8F260503,
+ 0x81C, 0x8E280503,
+ 0x81C, 0x8D2A0503,
+ 0x81C, 0x8C2C0503,
+ 0x81C, 0x8B2E0503,
+ 0x81C, 0x8A300503,
+ 0x81C, 0x89320503,
+ 0x81C, 0x88340503,
+ 0x81C, 0x6A360503,
+ 0x81C, 0x69380503,
+ 0x81C, 0x683A0503,
+ 0x81C, 0x673C0503,
+ 0x81C, 0x663E0503,
+ 0x81C, 0x65400503,
+ 0x81C, 0x64420503,
+ 0x81C, 0x63440503,
+ 0x81C, 0x62460503,
+ 0x81C, 0x61480503,
+ 0x81C, 0x604A0503,
+ 0x81C, 0x424C0503,
+ 0x81C, 0x414E0503,
+ 0x81C, 0x40500503,
+ 0x81C, 0x06520503,
+ 0x81C, 0x05540503,
+ 0x81C, 0x04560503,
+ 0x81C, 0x03580503,
+ 0x81C, 0x025A0503,
+ 0x81C, 0x015C0503,
+ 0x81C, 0x005E0503,
+ 0x81C, 0x00600503,
+ 0x81C, 0x00620503,
+ 0x81C, 0x00640503,
+ 0x81C, 0x00660503,
+ 0x81C, 0x00680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000503,
+ 0x81C, 0xFC020503,
+ 0x81C, 0xFB040503,
+ 0x81C, 0xFA060503,
+ 0x81C, 0xF9080503,
+ 0x81C, 0xF80A0503,
+ 0x81C, 0xF70C0503,
+ 0x81C, 0xF60E0503,
+ 0x81C, 0xF5100503,
+ 0x81C, 0xF4120503,
+ 0x81C, 0xF3140503,
+ 0x81C, 0xF2160503,
+ 0x81C, 0xF1180503,
+ 0x81C, 0xF01A0503,
+ 0x81C, 0xEF1C0503,
+ 0x81C, 0xEE1E0503,
+ 0x81C, 0xED200503,
+ 0x81C, 0xEC220503,
+ 0x81C, 0xEB240503,
+ 0x81C, 0xEA260503,
+ 0x81C, 0xE9280503,
+ 0x81C, 0xE82A0503,
+ 0x81C, 0xE72C0503,
+ 0x81C, 0xE62E0503,
+ 0x81C, 0xE5300503,
+ 0x81C, 0xE4320503,
+ 0x81C, 0xE3340503,
+ 0x81C, 0xC6360503,
+ 0x81C, 0xC5380503,
+ 0x81C, 0xC43A0503,
+ 0x81C, 0xC33C0503,
+ 0x81C, 0xC23E0503,
+ 0x81C, 0xA5400503,
+ 0x81C, 0xA4420503,
+ 0x81C, 0xA3440503,
+ 0x81C, 0xA2460503,
+ 0x81C, 0xA1480503,
+ 0x81C, 0x834A0503,
+ 0x81C, 0x824C0503,
+ 0x81C, 0x814E0503,
+ 0x81C, 0x63500503,
+ 0x81C, 0x62520503,
+ 0x81C, 0x61540503,
+ 0x81C, 0x43560503,
+ 0x81C, 0x42580503,
+ 0x81C, 0x245A0503,
+ 0x81C, 0x235C0503,
+ 0x81C, 0x225E0503,
+ 0x81C, 0x21600503,
+ 0x81C, 0x04620503,
+ 0x81C, 0x03640503,
+ 0x81C, 0x02660503,
+ 0x81C, 0x01680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000503,
+ 0x81C, 0xF7020503,
+ 0x81C, 0xF6040503,
+ 0x81C, 0xF5060503,
+ 0x81C, 0xF4080503,
+ 0x81C, 0xF30A0503,
+ 0x81C, 0xF20C0503,
+ 0x81C, 0xF10E0503,
+ 0x81C, 0xF0100503,
+ 0x81C, 0xEF120503,
+ 0x81C, 0xEE140503,
+ 0x81C, 0xED160503,
+ 0x81C, 0xEC180503,
+ 0x81C, 0xEB1A0503,
+ 0x81C, 0xEA1C0503,
+ 0x81C, 0xE91E0503,
+ 0x81C, 0xE8200503,
+ 0x81C, 0xE7220503,
+ 0x81C, 0xE6240503,
+ 0x81C, 0xE5260503,
+ 0x81C, 0xE4280503,
+ 0x81C, 0xE32A0503,
+ 0x81C, 0xE22C0503,
+ 0x81C, 0xC32E0503,
+ 0x81C, 0xC2300503,
+ 0x81C, 0xC1320503,
+ 0x81C, 0xA3340503,
+ 0x81C, 0xA2360503,
+ 0x81C, 0xA1380503,
+ 0x81C, 0xA03A0503,
+ 0x81C, 0x823C0503,
+ 0x81C, 0x813E0503,
+ 0x81C, 0x80400503,
+ 0x81C, 0x64420503,
+ 0x81C, 0x63440503,
+ 0x81C, 0x62460503,
+ 0x81C, 0x61480503,
+ 0x81C, 0x434A0503,
+ 0x81C, 0x424C0503,
+ 0x81C, 0x414E0503,
+ 0x81C, 0x40500503,
+ 0x81C, 0x22520503,
+ 0x81C, 0x21540503,
+ 0x81C, 0x20560503,
+ 0x81C, 0x04580503,
+ 0x81C, 0x035A0503,
+ 0x81C, 0x025C0503,
+ 0x81C, 0x015E0503,
+ 0x81C, 0x00600503,
+ 0x81C, 0x00620503,
+ 0x81C, 0x00640503,
+ 0x81C, 0x00660503,
+ 0x81C, 0x00680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000503,
+ 0x81C, 0xFC020503,
+ 0x81C, 0xFB040503,
+ 0x81C, 0xFA060503,
+ 0x81C, 0xF9080503,
+ 0x81C, 0xF80A0503,
+ 0x81C, 0xF70C0503,
+ 0x81C, 0xF60E0503,
+ 0x81C, 0xF5100503,
+ 0x81C, 0xF4120503,
+ 0x81C, 0xF3140503,
+ 0x81C, 0xF2160503,
+ 0x81C, 0xF1180503,
+ 0x81C, 0xF01A0503,
+ 0x81C, 0xEE1C0503,
+ 0x81C, 0xED1E0503,
+ 0x81C, 0xEC200503,
+ 0x81C, 0xEB220503,
+ 0x81C, 0xEA240503,
+ 0x81C, 0xE9260503,
+ 0x81C, 0xE8280503,
+ 0x81C, 0xE72A0503,
+ 0x81C, 0xE62C0503,
+ 0x81C, 0xE52E0503,
+ 0x81C, 0xE4300503,
+ 0x81C, 0xE3320503,
+ 0x81C, 0xE2340503,
+ 0x81C, 0xC5360503,
+ 0x81C, 0xC4380503,
+ 0x81C, 0xC33A0503,
+ 0x81C, 0xC23C0503,
+ 0x81C, 0xA53E0503,
+ 0x81C, 0xA4400503,
+ 0x81C, 0xA3420503,
+ 0x81C, 0xA2440503,
+ 0x81C, 0xA1460503,
+ 0x81C, 0x83480503,
+ 0x81C, 0x824A0503,
+ 0x81C, 0x814C0503,
+ 0x81C, 0x804E0503,
+ 0x81C, 0x63500503,
+ 0x81C, 0x62520503,
+ 0x81C, 0x61540503,
+ 0x81C, 0x43560503,
+ 0x81C, 0x42580503,
+ 0x81C, 0x415A0503,
+ 0x81C, 0x405C0503,
+ 0x81C, 0x225E0503,
+ 0x81C, 0x21600503,
+ 0x81C, 0x20620503,
+ 0x81C, 0x03640503,
+ 0x81C, 0x02660503,
+ 0x81C, 0x01680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000503,
+ 0x81C, 0xF8020503,
+ 0x81C, 0xF7040503,
+ 0x81C, 0xF6060503,
+ 0x81C, 0xF5080503,
+ 0x81C, 0xF40A0503,
+ 0x81C, 0xF30C0503,
+ 0x81C, 0xF20E0503,
+ 0x81C, 0xF1100503,
+ 0x81C, 0xF0120503,
+ 0x81C, 0xEF140503,
+ 0x81C, 0xEE160503,
+ 0x81C, 0xED180503,
+ 0x81C, 0xEC1A0503,
+ 0x81C, 0xEB1C0503,
+ 0x81C, 0xEA1E0503,
+ 0x81C, 0xE9200503,
+ 0x81C, 0xE8220503,
+ 0x81C, 0xE7240503,
+ 0x81C, 0xE6260503,
+ 0x81C, 0xE5280503,
+ 0x81C, 0xE42A0503,
+ 0x81C, 0xE32C0503,
+ 0x81C, 0xC32E0503,
+ 0x81C, 0xC2300503,
+ 0x81C, 0xC1320503,
+ 0x81C, 0xA4340503,
+ 0x81C, 0xA3360503,
+ 0x81C, 0xA2380503,
+ 0x81C, 0xA13A0503,
+ 0x81C, 0xA03C0503,
+ 0x81C, 0x823E0503,
+ 0x81C, 0x81400503,
+ 0x81C, 0x80420503,
+ 0x81C, 0x63440503,
+ 0x81C, 0x62460503,
+ 0x81C, 0x61480503,
+ 0x81C, 0x604A0503,
+ 0x81C, 0x244C0503,
+ 0x81C, 0x234E0503,
+ 0x81C, 0x22500503,
+ 0x81C, 0x21520503,
+ 0x81C, 0x20540503,
+ 0x81C, 0x05560503,
+ 0x81C, 0x04580503,
+ 0x81C, 0x035A0503,
+ 0x81C, 0x025C0503,
+ 0x81C, 0x015E0503,
+ 0x81C, 0x00600503,
+ 0x81C, 0x00620503,
+ 0x81C, 0x00640503,
+ 0x81C, 0x00660503,
+ 0x81C, 0x00680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x9000000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000503,
+ 0x81C, 0xFD020503,
+ 0x81C, 0xFC040503,
+ 0x81C, 0xFB060503,
+ 0x81C, 0xFA080503,
+ 0x81C, 0xF90A0503,
+ 0x81C, 0xF80C0503,
+ 0x81C, 0xF70E0503,
+ 0x81C, 0xF6100503,
+ 0x81C, 0xF5120503,
+ 0x81C, 0xF4140503,
+ 0x81C, 0xF3160503,
+ 0x81C, 0xF2180503,
+ 0x81C, 0xF11A0503,
+ 0x81C, 0xF01C0503,
+ 0x81C, 0xEF1E0503,
+ 0x81C, 0xEE200503,
+ 0x81C, 0xED220503,
+ 0x81C, 0xEC240503,
+ 0x81C, 0xEB260503,
+ 0x81C, 0xEA280503,
+ 0x81C, 0xE92A0503,
+ 0x81C, 0xE82C0503,
+ 0x81C, 0xE72E0503,
+ 0x81C, 0xE6300503,
+ 0x81C, 0xE5320503,
+ 0x81C, 0xE4340503,
+ 0x81C, 0xE3360503,
+ 0x81C, 0xC6380503,
+ 0x81C, 0xC53A0503,
+ 0x81C, 0xC43C0503,
+ 0x81C, 0xC33E0503,
+ 0x81C, 0xA5400503,
+ 0x81C, 0xA4420503,
+ 0x81C, 0xA3440503,
+ 0x81C, 0xA2460503,
+ 0x81C, 0xA1480503,
+ 0x81C, 0xA04A0503,
+ 0x81C, 0x824C0503,
+ 0x81C, 0x814E0503,
+ 0x81C, 0x80500503,
+ 0x81C, 0x64520503,
+ 0x81C, 0x63540503,
+ 0x81C, 0x62560503,
+ 0x81C, 0x61580503,
+ 0x81C, 0x605A0503,
+ 0x81C, 0x235C0503,
+ 0x81C, 0x225E0503,
+ 0x81C, 0x21600503,
+ 0x81C, 0x20620503,
+ 0x81C, 0x03640503,
+ 0x81C, 0x02660503,
+ 0x81C, 0x01680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x9000000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000503,
+ 0x81C, 0xFC020503,
+ 0x81C, 0xFB040503,
+ 0x81C, 0xFA060503,
+ 0x81C, 0xF9080503,
+ 0x81C, 0xF80A0503,
+ 0x81C, 0xF70C0503,
+ 0x81C, 0xF60E0503,
+ 0x81C, 0xF5100503,
+ 0x81C, 0xF4120503,
+ 0x81C, 0xF3140503,
+ 0x81C, 0xF2160503,
+ 0x81C, 0xF1180503,
+ 0x81C, 0xF01A0503,
+ 0x81C, 0xEE1C0503,
+ 0x81C, 0xED1E0503,
+ 0x81C, 0xEC200503,
+ 0x81C, 0xEB220503,
+ 0x81C, 0xEA240503,
+ 0x81C, 0xE9260503,
+ 0x81C, 0xE8280503,
+ 0x81C, 0xE72A0503,
+ 0x81C, 0xE62C0503,
+ 0x81C, 0xE52E0503,
+ 0x81C, 0xE4300503,
+ 0x81C, 0xE3320503,
+ 0x81C, 0xE2340503,
+ 0x81C, 0xC5360503,
+ 0x81C, 0xC4380503,
+ 0x81C, 0xC33A0503,
+ 0x81C, 0xC23C0503,
+ 0x81C, 0xA53E0503,
+ 0x81C, 0xA4400503,
+ 0x81C, 0xA3420503,
+ 0x81C, 0xA2440503,
+ 0x81C, 0xA1460503,
+ 0x81C, 0x83480503,
+ 0x81C, 0x824A0503,
+ 0x81C, 0x814C0503,
+ 0x81C, 0x804E0503,
+ 0x81C, 0x63500503,
+ 0x81C, 0x62520503,
+ 0x81C, 0x61540503,
+ 0x81C, 0x43560503,
+ 0x81C, 0x42580503,
+ 0x81C, 0x415A0503,
+ 0x81C, 0x405C0503,
+ 0x81C, 0x225E0503,
+ 0x81C, 0x21600503,
+ 0x81C, 0x20620503,
+ 0x81C, 0x03640503,
+ 0x81C, 0x02660503,
+ 0x81C, 0x01680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x9000000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFD000503,
+ 0x81C, 0xFC020503,
+ 0x81C, 0xFB040503,
+ 0x81C, 0xFA060503,
+ 0x81C, 0xF9080503,
+ 0x81C, 0xF80A0503,
+ 0x81C, 0xF70C0503,
+ 0x81C, 0xF60E0503,
+ 0x81C, 0xF5100503,
+ 0x81C, 0xF4120503,
+ 0x81C, 0xF3140503,
+ 0x81C, 0xF2160503,
+ 0x81C, 0xF1180503,
+ 0x81C, 0xF01A0503,
+ 0x81C, 0xEE1C0503,
+ 0x81C, 0xED1E0503,
+ 0x81C, 0xEC200503,
+ 0x81C, 0xEB220503,
+ 0x81C, 0xEA240503,
+ 0x81C, 0xE9260503,
+ 0x81C, 0xE8280503,
+ 0x81C, 0xE72A0503,
+ 0x81C, 0xE62C0503,
+ 0x81C, 0xE52E0503,
+ 0x81C, 0xE4300503,
+ 0x81C, 0xE3320503,
+ 0x81C, 0xE2340503,
+ 0x81C, 0xC5360503,
+ 0x81C, 0xC4380503,
+ 0x81C, 0xC33A0503,
+ 0x81C, 0xC23C0503,
+ 0x81C, 0xA53E0503,
+ 0x81C, 0xA4400503,
+ 0x81C, 0xA3420503,
+ 0x81C, 0xA2440503,
+ 0x81C, 0xA1460503,
+ 0x81C, 0x83480503,
+ 0x81C, 0x824A0503,
+ 0x81C, 0x814C0503,
+ 0x81C, 0x804E0503,
+ 0x81C, 0x63500503,
+ 0x81C, 0x62520503,
+ 0x81C, 0x61540503,
+ 0x81C, 0x43560503,
+ 0x81C, 0x42580503,
+ 0x81C, 0x415A0503,
+ 0x81C, 0x405C0503,
+ 0x81C, 0x225E0503,
+ 0x81C, 0x21600503,
+ 0x81C, 0x20620503,
+ 0x81C, 0x03640503,
+ 0x81C, 0x02660503,
+ 0x81C, 0x01680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x9000000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xBF000503,
+ 0x81C, 0xBF020503,
+ 0x81C, 0xBF040503,
+ 0x81C, 0xBF060503,
+ 0x81C, 0xBF080503,
+ 0x81C, 0xBF0A0503,
+ 0x81C, 0xBE0C0503,
+ 0x81C, 0xBD0E0503,
+ 0x81C, 0xBC100503,
+ 0x81C, 0xBB120503,
+ 0x81C, 0xBA140503,
+ 0x81C, 0xB9160503,
+ 0x81C, 0xB8180503,
+ 0x81C, 0xB71A0503,
+ 0x81C, 0xB61C0503,
+ 0x81C, 0xB51E0503,
+ 0x81C, 0xB2200503,
+ 0x81C, 0xB3220503,
+ 0x81C, 0xB2240503,
+ 0x81C, 0xB1260503,
+ 0x81C, 0xB0280503,
+ 0x81C, 0xAF2A0503,
+ 0x81C, 0xAE2C0503,
+ 0x81C, 0xAD2E0503,
+ 0x81C, 0xAC300503,
+ 0x81C, 0xAB320503,
+ 0x81C, 0xAA340503,
+ 0x81C, 0xC6360503,
+ 0x81C, 0xC5380503,
+ 0x81C, 0xC43A0503,
+ 0x81C, 0xC33C0503,
+ 0x81C, 0x883E0503,
+ 0x81C, 0x87400503,
+ 0x81C, 0x86420503,
+ 0x81C, 0x85440503,
+ 0x81C, 0x84460503,
+ 0x81C, 0x83480503,
+ 0x81C, 0x674A0503,
+ 0x81C, 0x664C0503,
+ 0x81C, 0x654E0503,
+ 0x81C, 0x64500503,
+ 0x81C, 0x27520503,
+ 0x81C, 0x26540503,
+ 0x81C, 0x25560503,
+ 0x81C, 0x24580503,
+ 0x81C, 0x235A0503,
+ 0x81C, 0x225C0503,
+ 0x81C, 0x215E0503,
+ 0x81C, 0x20600503,
+ 0x81C, 0x03620503,
+ 0x81C, 0x02640503,
+ 0x81C, 0x01660503,
+ 0x81C, 0x00680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0x90000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFE000403,
+ 0x81C, 0xFD000503,
+ 0x81C, 0xFC020503,
+ 0x81C, 0xFB040503,
+ 0x81C, 0xFA060503,
+ 0x81C, 0xF9080503,
+ 0x81C, 0xF80A0503,
+ 0x81C, 0xF70C0503,
+ 0x81C, 0xF60E0503,
+ 0x81C, 0xF5100503,
+ 0x81C, 0xF4120503,
+ 0x81C, 0xF3140503,
+ 0x81C, 0xF2160503,
+ 0x81C, 0xF1180503,
+ 0x81C, 0xF01A0503,
+ 0x81C, 0xEF1C0503,
+ 0x81C, 0xEE1E0503,
+ 0x81C, 0xED200503,
+ 0x81C, 0xEC220503,
+ 0x81C, 0xEB240503,
+ 0x81C, 0xEA260503,
+ 0x81C, 0xE9280503,
+ 0x81C, 0xE82A0503,
+ 0x81C, 0xE72C0503,
+ 0x81C, 0xE62E0503,
+ 0x81C, 0xE5300503,
+ 0x81C, 0xE4320503,
+ 0x81C, 0xE3340503,
+ 0x81C, 0xC6360503,
+ 0x81C, 0xC5380503,
+ 0x81C, 0xC43A0503,
+ 0x81C, 0xC33C0503,
+ 0x81C, 0xA53E0503,
+ 0x81C, 0xA4400503,
+ 0x81C, 0xA3420503,
+ 0x81C, 0xA2440503,
+ 0x81C, 0xA1460503,
+ 0x81C, 0xA0480503,
+ 0x81C, 0x824A0503,
+ 0x81C, 0x814C0503,
+ 0x81C, 0x804E0503,
+ 0x81C, 0x64500503,
+ 0x81C, 0x63520503,
+ 0x81C, 0x62540503,
+ 0x81C, 0x61560503,
+ 0x81C, 0x60580503,
+ 0x81C, 0x235A0503,
+ 0x81C, 0x225C0503,
+ 0x81C, 0x215E0503,
+ 0x81C, 0x20600503,
+ 0x81C, 0x03620503,
+ 0x81C, 0x02640503,
+ 0x81C, 0x01660503,
+ 0x81C, 0x00680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x90000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000503,
+ 0x81C, 0xF7020503,
+ 0x81C, 0xF6040503,
+ 0x81C, 0xF5060503,
+ 0x81C, 0xF4080503,
+ 0x81C, 0xF30A0503,
+ 0x81C, 0xF20C0503,
+ 0x81C, 0xF10E0503,
+ 0x81C, 0xF0100503,
+ 0x81C, 0xEF120503,
+ 0x81C, 0xEE140503,
+ 0x81C, 0xED160503,
+ 0x81C, 0xEC180503,
+ 0x81C, 0xEB1A0503,
+ 0x81C, 0xEA1C0503,
+ 0x81C, 0xE91E0503,
+ 0x81C, 0xE8200503,
+ 0x81C, 0xE7220503,
+ 0x81C, 0xE6240503,
+ 0x81C, 0xE5260503,
+ 0x81C, 0xE4280503,
+ 0x81C, 0xE32A0503,
+ 0x81C, 0xC32C0503,
+ 0x81C, 0xC22E0503,
+ 0x81C, 0xC1300503,
+ 0x81C, 0xC0320503,
+ 0x81C, 0xA3340503,
+ 0x81C, 0xA2360503,
+ 0x81C, 0xA1380503,
+ 0x81C, 0xA03A0503,
+ 0x81C, 0x823C0503,
+ 0x81C, 0x813E0503,
+ 0x81C, 0x80400503,
+ 0x81C, 0x63420503,
+ 0x81C, 0x62440503,
+ 0x81C, 0x61460503,
+ 0x81C, 0x60480503,
+ 0x81C, 0x424A0503,
+ 0x81C, 0x414C0503,
+ 0x81C, 0x404E0503,
+ 0x81C, 0x22500503,
+ 0x81C, 0x21520503,
+ 0x81C, 0x20540503,
+ 0x81C, 0x03560503,
+ 0x81C, 0x02580503,
+ 0x81C, 0x015A0503,
+ 0x81C, 0x005C0503,
+ 0x81C, 0x005E0503,
+ 0x81C, 0x00600503,
+ 0x81C, 0x00620503,
+ 0x81C, 0x00640503,
+ 0x81C, 0x00660503,
+ 0x81C, 0x00680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFE000503,
+ 0x81C, 0xFD020503,
+ 0x81C, 0xFC040503,
+ 0x81C, 0xFB060503,
+ 0x81C, 0xFA080503,
+ 0x81C, 0xF90A0503,
+ 0x81C, 0xF80C0503,
+ 0x81C, 0xF70E0503,
+ 0x81C, 0xF6100503,
+ 0x81C, 0xF5120503,
+ 0x81C, 0xF4140503,
+ 0x81C, 0xF3160503,
+ 0x81C, 0xF2180503,
+ 0x81C, 0xF11A0503,
+ 0x81C, 0xF01C0503,
+ 0x81C, 0xEF1E0503,
+ 0x81C, 0xEE200503,
+ 0x81C, 0xED220503,
+ 0x81C, 0xEC240503,
+ 0x81C, 0xEB260503,
+ 0x81C, 0xEA280503,
+ 0x81C, 0xE92A0503,
+ 0x81C, 0xE82C0503,
+ 0x81C, 0xE72E0503,
+ 0x81C, 0xE6300503,
+ 0x81C, 0xE5320503,
+ 0x81C, 0xE4340503,
+ 0x81C, 0xE3360503,
+ 0x81C, 0xC6380503,
+ 0x81C, 0xC53A0503,
+ 0x81C, 0xC43C0503,
+ 0x81C, 0xC33E0503,
+ 0x81C, 0xA5400503,
+ 0x81C, 0xA4420503,
+ 0x81C, 0xA3440503,
+ 0x81C, 0xA2460503,
+ 0x81C, 0xA1480503,
+ 0x81C, 0xA04A0503,
+ 0x81C, 0x824C0503,
+ 0x81C, 0x814E0503,
+ 0x81C, 0x80500503,
+ 0x81C, 0x64520503,
+ 0x81C, 0x63540503,
+ 0x81C, 0x62560503,
+ 0x81C, 0x61580503,
+ 0x81C, 0x605A0503,
+ 0x81C, 0x235C0503,
+ 0x81C, 0x225E0503,
+ 0x81C, 0x21600503,
+ 0x81C, 0x20620503,
+ 0x81C, 0x03640503,
+ 0x81C, 0x02660503,
+ 0x81C, 0x01680503,
+ 0x81C, 0x006A0503,
+ 0x81C, 0x006C0503,
+ 0x81C, 0x006E0503,
+ 0x81C, 0x00700503,
+ 0x81C, 0x00720503,
+ 0x81C, 0x00740503,
+ 0x81C, 0x00760503,
+ 0x81C, 0x00780503,
+ 0x81C, 0x007A0503,
+ 0x81C, 0x007C0503,
+ 0x81C, 0x007E0503,
+ 0x81C, 0x007E0503,
+ 0xB0000000, 0x00000000,
+ 0x80000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000603,
+ 0x81C, 0xFB020603,
+ 0x81C, 0xFA040603,
+ 0x81C, 0xF9060603,
+ 0x81C, 0xF8080603,
+ 0x81C, 0xF70A0603,
+ 0x81C, 0xF60C0603,
+ 0x81C, 0xF50E0603,
+ 0x81C, 0xF4100603,
+ 0x81C, 0xF3120603,
+ 0x81C, 0xF2140603,
+ 0x81C, 0xF1160603,
+ 0x81C, 0xF0180603,
+ 0x81C, 0xEE1A0603,
+ 0x81C, 0xED1C0603,
+ 0x81C, 0xEC1E0603,
+ 0x81C, 0xEB200603,
+ 0x81C, 0xEA220603,
+ 0x81C, 0xE9240603,
+ 0x81C, 0xE8260603,
+ 0x81C, 0xE7280603,
+ 0x81C, 0xE62A0603,
+ 0x81C, 0xE52C0603,
+ 0x81C, 0xE42E0603,
+ 0x81C, 0xE3300603,
+ 0x81C, 0xE2320603,
+ 0x81C, 0xC6340603,
+ 0x81C, 0xC5360603,
+ 0x81C, 0xC4380603,
+ 0x81C, 0xC33A0603,
+ 0x81C, 0xA63C0603,
+ 0x81C, 0xA53E0603,
+ 0x81C, 0xA4400603,
+ 0x81C, 0xA3420603,
+ 0x81C, 0xA2440603,
+ 0x81C, 0xA1460603,
+ 0x81C, 0x83480603,
+ 0x81C, 0x824A0603,
+ 0x81C, 0x814C0603,
+ 0x81C, 0x804E0603,
+ 0x81C, 0x63500603,
+ 0x81C, 0x62520603,
+ 0x81C, 0x61540603,
+ 0x81C, 0x42560603,
+ 0x81C, 0x41580603,
+ 0x81C, 0x405A0603,
+ 0x81C, 0x225C0603,
+ 0x81C, 0x215E0603,
+ 0x81C, 0x20600603,
+ 0x81C, 0x04620603,
+ 0x81C, 0x03640603,
+ 0x81C, 0x02660603,
+ 0x81C, 0x01680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xBD000603,
+ 0x81C, 0xBC020603,
+ 0x81C, 0xBB040603,
+ 0x81C, 0xBA060603,
+ 0x81C, 0xB9080603,
+ 0x81C, 0xB80A0603,
+ 0x81C, 0xB70C0603,
+ 0x81C, 0xB60E0603,
+ 0x81C, 0xB5100603,
+ 0x81C, 0xB4120603,
+ 0x81C, 0xB3140603,
+ 0x81C, 0xB2160603,
+ 0x81C, 0xB1180603,
+ 0x81C, 0xB01A0603,
+ 0x81C, 0xAF1C0603,
+ 0x81C, 0xAE1E0603,
+ 0x81C, 0xAD200603,
+ 0x81C, 0x8F220603,
+ 0x81C, 0x8E240603,
+ 0x81C, 0x8D260603,
+ 0x81C, 0x8C280603,
+ 0x81C, 0x8B2A0603,
+ 0x81C, 0x8A2C0603,
+ 0x81C, 0x892E0603,
+ 0x81C, 0x88300603,
+ 0x81C, 0x6B320603,
+ 0x81C, 0x6A340603,
+ 0x81C, 0x69360603,
+ 0x81C, 0x68380603,
+ 0x81C, 0x673A0603,
+ 0x81C, 0x663C0603,
+ 0x81C, 0x653E0603,
+ 0x81C, 0x64400603,
+ 0x81C, 0x63420603,
+ 0x81C, 0x62440603,
+ 0x81C, 0x61460603,
+ 0x81C, 0x60480603,
+ 0x81C, 0x424A0603,
+ 0x81C, 0x414C0603,
+ 0x81C, 0x404E0603,
+ 0x81C, 0x06500603,
+ 0x81C, 0x05520603,
+ 0x81C, 0x04540603,
+ 0x81C, 0x03560603,
+ 0x81C, 0x02580603,
+ 0x81C, 0x015A0603,
+ 0x81C, 0x005C0603,
+ 0x81C, 0x005E0603,
+ 0x81C, 0x00600603,
+ 0x81C, 0x00620603,
+ 0x81C, 0x00640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007C0603,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000603,
+ 0x81C, 0xF6020603,
+ 0x81C, 0xF5040603,
+ 0x81C, 0xF4060603,
+ 0x81C, 0xF3080603,
+ 0x81C, 0xF20A0603,
+ 0x81C, 0xF10C0603,
+ 0x81C, 0xF00E0603,
+ 0x81C, 0xEF100603,
+ 0x81C, 0xEE120603,
+ 0x81C, 0xED140603,
+ 0x81C, 0xEC160603,
+ 0x81C, 0xEB180603,
+ 0x81C, 0xEA1A0603,
+ 0x81C, 0xE91C0603,
+ 0x81C, 0xE81E0603,
+ 0x81C, 0xE7200603,
+ 0x81C, 0xE6220603,
+ 0x81C, 0xE5240603,
+ 0x81C, 0xE4260603,
+ 0x81C, 0xE3280603,
+ 0x81C, 0xC42A0603,
+ 0x81C, 0xC32C0603,
+ 0x81C, 0xC22E0603,
+ 0x81C, 0xC1300603,
+ 0x81C, 0xC0320603,
+ 0x81C, 0xA3340603,
+ 0x81C, 0xA2360603,
+ 0x81C, 0xA1380603,
+ 0x81C, 0xA03A0603,
+ 0x81C, 0x823C0603,
+ 0x81C, 0x813E0603,
+ 0x81C, 0x80400603,
+ 0x81C, 0x64420603,
+ 0x81C, 0x63440603,
+ 0x81C, 0x62460603,
+ 0x81C, 0x61480603,
+ 0x81C, 0x604A0603,
+ 0x81C, 0x414C0603,
+ 0x81C, 0x404E0603,
+ 0x81C, 0x22500603,
+ 0x81C, 0x21520603,
+ 0x81C, 0x20540603,
+ 0x81C, 0x03560603,
+ 0x81C, 0x02580603,
+ 0x81C, 0x015A0603,
+ 0x81C, 0x005C0603,
+ 0x81C, 0x005E0603,
+ 0x81C, 0x00600603,
+ 0x81C, 0x00620603,
+ 0x81C, 0x00640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000603,
+ 0x81C, 0xFB020603,
+ 0x81C, 0xFA040603,
+ 0x81C, 0xF9060603,
+ 0x81C, 0xF8080603,
+ 0x81C, 0xF70A0603,
+ 0x81C, 0xF60C0603,
+ 0x81C, 0xF50E0603,
+ 0x81C, 0xF4100603,
+ 0x81C, 0xF3120603,
+ 0x81C, 0xF2140603,
+ 0x81C, 0xF1160603,
+ 0x81C, 0xF0180603,
+ 0x81C, 0xEF1A0603,
+ 0x81C, 0xEE1C0603,
+ 0x81C, 0xED1E0603,
+ 0x81C, 0xEC200603,
+ 0x81C, 0xEB220603,
+ 0x81C, 0xEA240603,
+ 0x81C, 0xE9260603,
+ 0x81C, 0xE8280603,
+ 0x81C, 0xE72A0603,
+ 0x81C, 0xE62C0603,
+ 0x81C, 0xE52E0603,
+ 0x81C, 0xE4300603,
+ 0x81C, 0xE3320603,
+ 0x81C, 0xE2340603,
+ 0x81C, 0xC6360603,
+ 0x81C, 0xC5380603,
+ 0x81C, 0xC43A0603,
+ 0x81C, 0xC33C0603,
+ 0x81C, 0xA63E0603,
+ 0x81C, 0xA5400603,
+ 0x81C, 0xA4420603,
+ 0x81C, 0xA3440603,
+ 0x81C, 0xA2460603,
+ 0x81C, 0xA1480603,
+ 0x81C, 0x834A0603,
+ 0x81C, 0x824C0603,
+ 0x81C, 0x814E0603,
+ 0x81C, 0x64500603,
+ 0x81C, 0x63520603,
+ 0x81C, 0x62540603,
+ 0x81C, 0x61560603,
+ 0x81C, 0x60580603,
+ 0x81C, 0x405A0603,
+ 0x81C, 0x215C0603,
+ 0x81C, 0x205E0603,
+ 0x81C, 0x03600603,
+ 0x81C, 0x02620603,
+ 0x81C, 0x01640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000603,
+ 0x81C, 0xF6020603,
+ 0x81C, 0xF5040603,
+ 0x81C, 0xF4060603,
+ 0x81C, 0xF3080603,
+ 0x81C, 0xF20A0603,
+ 0x81C, 0xF10C0603,
+ 0x81C, 0xF00E0603,
+ 0x81C, 0xEF100603,
+ 0x81C, 0xEE120603,
+ 0x81C, 0xED140603,
+ 0x81C, 0xEC160603,
+ 0x81C, 0xEB180603,
+ 0x81C, 0xEA1A0603,
+ 0x81C, 0xE91C0603,
+ 0x81C, 0xE81E0603,
+ 0x81C, 0xE7200603,
+ 0x81C, 0xE6220603,
+ 0x81C, 0xE5240603,
+ 0x81C, 0xE4260603,
+ 0x81C, 0xE3280603,
+ 0x81C, 0xC42A0603,
+ 0x81C, 0xC32C0603,
+ 0x81C, 0xC22E0603,
+ 0x81C, 0xC1300603,
+ 0x81C, 0xC0320603,
+ 0x81C, 0xA3340603,
+ 0x81C, 0xA2360603,
+ 0x81C, 0xA1380603,
+ 0x81C, 0xA03A0603,
+ 0x81C, 0x823C0603,
+ 0x81C, 0x813E0603,
+ 0x81C, 0x80400603,
+ 0x81C, 0x64420603,
+ 0x81C, 0x63440603,
+ 0x81C, 0x62460603,
+ 0x81C, 0x61480603,
+ 0x81C, 0x604A0603,
+ 0x81C, 0x414C0603,
+ 0x81C, 0x404E0603,
+ 0x81C, 0x22500603,
+ 0x81C, 0x21520603,
+ 0x81C, 0x20540603,
+ 0x81C, 0x03560603,
+ 0x81C, 0x02580603,
+ 0x81C, 0x015A0603,
+ 0x81C, 0x005C0603,
+ 0x81C, 0x005E0603,
+ 0x81C, 0x00600603,
+ 0x81C, 0x00620603,
+ 0x81C, 0x00640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000603,
+ 0x81C, 0xFB020603,
+ 0x81C, 0xFA040603,
+ 0x81C, 0xF9060603,
+ 0x81C, 0xF8080603,
+ 0x81C, 0xF70A0603,
+ 0x81C, 0xF60C0603,
+ 0x81C, 0xF50E0603,
+ 0x81C, 0xF4100603,
+ 0x81C, 0xF3120603,
+ 0x81C, 0xF2140603,
+ 0x81C, 0xF1160603,
+ 0x81C, 0xF0180603,
+ 0x81C, 0xEF1A0603,
+ 0x81C, 0xEE1C0603,
+ 0x81C, 0xED1E0603,
+ 0x81C, 0xEC200603,
+ 0x81C, 0xEB220603,
+ 0x81C, 0xEA240603,
+ 0x81C, 0xE9260603,
+ 0x81C, 0xE8280603,
+ 0x81C, 0xE72A0603,
+ 0x81C, 0xE62C0603,
+ 0x81C, 0xE52E0603,
+ 0x81C, 0xE4300603,
+ 0x81C, 0xE3320603,
+ 0x81C, 0xE2340603,
+ 0x81C, 0xE1360603,
+ 0x81C, 0xC5380603,
+ 0x81C, 0xC43A0603,
+ 0x81C, 0xC33C0603,
+ 0x81C, 0xC23E0603,
+ 0x81C, 0xC1400603,
+ 0x81C, 0xA3420603,
+ 0x81C, 0xA2440603,
+ 0x81C, 0xA1460603,
+ 0x81C, 0xA0480603,
+ 0x81C, 0x834A0603,
+ 0x81C, 0x824C0603,
+ 0x81C, 0x814E0603,
+ 0x81C, 0x64500603,
+ 0x81C, 0x63520603,
+ 0x81C, 0x62540603,
+ 0x81C, 0x61560603,
+ 0x81C, 0x25580603,
+ 0x81C, 0x245A0603,
+ 0x81C, 0x235C0603,
+ 0x81C, 0x225E0603,
+ 0x81C, 0x21600603,
+ 0x81C, 0x04620603,
+ 0x81C, 0x03640603,
+ 0x81C, 0x02660603,
+ 0x81C, 0x01680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000603,
+ 0x81C, 0xF8020603,
+ 0x81C, 0xF7040603,
+ 0x81C, 0xF6060603,
+ 0x81C, 0xF5080603,
+ 0x81C, 0xF40A0603,
+ 0x81C, 0xF30C0603,
+ 0x81C, 0xF20E0603,
+ 0x81C, 0xF1100603,
+ 0x81C, 0xF0120603,
+ 0x81C, 0xEF140603,
+ 0x81C, 0xEE160603,
+ 0x81C, 0xED180603,
+ 0x81C, 0xEC1A0603,
+ 0x81C, 0xEB1C0603,
+ 0x81C, 0xEA1E0603,
+ 0x81C, 0xE9200603,
+ 0x81C, 0xE8220603,
+ 0x81C, 0xE7240603,
+ 0x81C, 0xE6260603,
+ 0x81C, 0xE5280603,
+ 0x81C, 0xC42A0603,
+ 0x81C, 0xC32C0603,
+ 0x81C, 0xC22E0603,
+ 0x81C, 0xC1300603,
+ 0x81C, 0xC0320603,
+ 0x81C, 0xA3340603,
+ 0x81C, 0xA2360603,
+ 0x81C, 0xA1380603,
+ 0x81C, 0xA03A0603,
+ 0x81C, 0x823C0603,
+ 0x81C, 0x813E0603,
+ 0x81C, 0x80400603,
+ 0x81C, 0x64420603,
+ 0x81C, 0x63440603,
+ 0x81C, 0x62460603,
+ 0x81C, 0x61480603,
+ 0x81C, 0x604A0603,
+ 0x81C, 0x414C0603,
+ 0x81C, 0x404E0603,
+ 0x81C, 0x22500603,
+ 0x81C, 0x21520603,
+ 0x81C, 0x20540603,
+ 0x81C, 0x03560603,
+ 0x81C, 0x02580603,
+ 0x81C, 0x015A0603,
+ 0x81C, 0x005C0603,
+ 0x81C, 0x005E0603,
+ 0x81C, 0x00600603,
+ 0x81C, 0x00620603,
+ 0x81C, 0x00640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xBE000603,
+ 0x81C, 0xBD020603,
+ 0x81C, 0xBC040603,
+ 0x81C, 0xBB060603,
+ 0x81C, 0xBA080603,
+ 0x81C, 0xB90A0603,
+ 0x81C, 0xB80C0603,
+ 0x81C, 0xB70E0603,
+ 0x81C, 0xB6100603,
+ 0x81C, 0xB5120603,
+ 0x81C, 0xB4140603,
+ 0x81C, 0xB3160603,
+ 0x81C, 0xB2180603,
+ 0x81C, 0xB11A0603,
+ 0x81C, 0xB01C0603,
+ 0x81C, 0x921E0603,
+ 0x81C, 0x91200603,
+ 0x81C, 0x90220603,
+ 0x81C, 0x8F240603,
+ 0x81C, 0x8E260603,
+ 0x81C, 0x8D280603,
+ 0x81C, 0x8C2A0603,
+ 0x81C, 0x8B2C0603,
+ 0x81C, 0x8A2E0603,
+ 0x81C, 0x89300603,
+ 0x81C, 0x88320603,
+ 0x81C, 0x6B340603,
+ 0x81C, 0x6A360603,
+ 0x81C, 0x69380603,
+ 0x81C, 0x683A0603,
+ 0x81C, 0x673C0603,
+ 0x81C, 0x663E0603,
+ 0x81C, 0x65400603,
+ 0x81C, 0x64420603,
+ 0x81C, 0x63440603,
+ 0x81C, 0x62460603,
+ 0x81C, 0x61480603,
+ 0x81C, 0x604A0603,
+ 0x81C, 0x424C0603,
+ 0x81C, 0x414E0603,
+ 0x81C, 0x40500603,
+ 0x81C, 0x06520603,
+ 0x81C, 0x05540603,
+ 0x81C, 0x04560603,
+ 0x81C, 0x03580603,
+ 0x81C, 0x025A0603,
+ 0x81C, 0x015C0603,
+ 0x81C, 0x005E0603,
+ 0x81C, 0x00600603,
+ 0x81C, 0x00620603,
+ 0x81C, 0x00640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000603,
+ 0x81C, 0xFA020603,
+ 0x81C, 0xF9040603,
+ 0x81C, 0xF8060603,
+ 0x81C, 0xF7080603,
+ 0x81C, 0xF60A0603,
+ 0x81C, 0xF50C0603,
+ 0x81C, 0xF40E0603,
+ 0x81C, 0xF3100603,
+ 0x81C, 0xF2120603,
+ 0x81C, 0xF1140603,
+ 0x81C, 0xF0160603,
+ 0x81C, 0xEF180603,
+ 0x81C, 0xEE1A0603,
+ 0x81C, 0xED1C0603,
+ 0x81C, 0xEC1E0603,
+ 0x81C, 0xEB200603,
+ 0x81C, 0xEA220603,
+ 0x81C, 0xE9240603,
+ 0x81C, 0xE8260603,
+ 0x81C, 0xE7280603,
+ 0x81C, 0xE62A0603,
+ 0x81C, 0xE52C0603,
+ 0x81C, 0xE42E0603,
+ 0x81C, 0xE3300603,
+ 0x81C, 0xE2320603,
+ 0x81C, 0xC6340603,
+ 0x81C, 0xC5360603,
+ 0x81C, 0xC4380603,
+ 0x81C, 0xC33A0603,
+ 0x81C, 0xC23C0603,
+ 0x81C, 0xC13E0603,
+ 0x81C, 0xC0400603,
+ 0x81C, 0xA3420603,
+ 0x81C, 0xA2440603,
+ 0x81C, 0xA1460603,
+ 0x81C, 0xA0480603,
+ 0x81C, 0x824A0603,
+ 0x81C, 0x814C0603,
+ 0x81C, 0x804E0603,
+ 0x81C, 0x63500603,
+ 0x81C, 0x62520603,
+ 0x81C, 0x61540603,
+ 0x81C, 0x60560603,
+ 0x81C, 0x24580603,
+ 0x81C, 0x235A0603,
+ 0x81C, 0x225C0603,
+ 0x81C, 0x215E0603,
+ 0x81C, 0x20600603,
+ 0x81C, 0x03620603,
+ 0x81C, 0x02640603,
+ 0x81C, 0x01660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000603,
+ 0x81C, 0xF7020603,
+ 0x81C, 0xF6040603,
+ 0x81C, 0xF5060603,
+ 0x81C, 0xF4080603,
+ 0x81C, 0xF30A0603,
+ 0x81C, 0xF20C0603,
+ 0x81C, 0xF10E0603,
+ 0x81C, 0xF0100603,
+ 0x81C, 0xEF120603,
+ 0x81C, 0xEE140603,
+ 0x81C, 0xED160603,
+ 0x81C, 0xEC180603,
+ 0x81C, 0xEB1A0603,
+ 0x81C, 0xEA1C0603,
+ 0x81C, 0xE91E0603,
+ 0x81C, 0xE8200603,
+ 0x81C, 0xE7220603,
+ 0x81C, 0xE6240603,
+ 0x81C, 0xE5260603,
+ 0x81C, 0xE4280603,
+ 0x81C, 0xE32A0603,
+ 0x81C, 0xC42C0603,
+ 0x81C, 0xC32E0603,
+ 0x81C, 0xC2300603,
+ 0x81C, 0xC1320603,
+ 0x81C, 0xA3340603,
+ 0x81C, 0xA2360603,
+ 0x81C, 0xA1380603,
+ 0x81C, 0xA03A0603,
+ 0x81C, 0x823C0603,
+ 0x81C, 0x813E0603,
+ 0x81C, 0x80400603,
+ 0x81C, 0x65420603,
+ 0x81C, 0x64440603,
+ 0x81C, 0x63460603,
+ 0x81C, 0x62480603,
+ 0x81C, 0x614A0603,
+ 0x81C, 0x424C0603,
+ 0x81C, 0x414E0603,
+ 0x81C, 0x40500603,
+ 0x81C, 0x22520603,
+ 0x81C, 0x21540603,
+ 0x81C, 0x20560603,
+ 0x81C, 0x04580603,
+ 0x81C, 0x035A0603,
+ 0x81C, 0x025C0603,
+ 0x81C, 0x015E0603,
+ 0x81C, 0x00600603,
+ 0x81C, 0x00620603,
+ 0x81C, 0x00640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000603,
+ 0x81C, 0xFB020603,
+ 0x81C, 0xFA040603,
+ 0x81C, 0xF9060603,
+ 0x81C, 0xF8080603,
+ 0x81C, 0xF70A0603,
+ 0x81C, 0xF60C0603,
+ 0x81C, 0xF50E0603,
+ 0x81C, 0xF4100603,
+ 0x81C, 0xF3120603,
+ 0x81C, 0xF2140603,
+ 0x81C, 0xF1160603,
+ 0x81C, 0xF0180603,
+ 0x81C, 0xEE1A0603,
+ 0x81C, 0xED1C0603,
+ 0x81C, 0xEC1E0603,
+ 0x81C, 0xEB200603,
+ 0x81C, 0xEA220603,
+ 0x81C, 0xE9240603,
+ 0x81C, 0xE8260603,
+ 0x81C, 0xE7280603,
+ 0x81C, 0xE62A0603,
+ 0x81C, 0xE52C0603,
+ 0x81C, 0xE42E0603,
+ 0x81C, 0xE3300603,
+ 0x81C, 0xE2320603,
+ 0x81C, 0xC6340603,
+ 0x81C, 0xC5360603,
+ 0x81C, 0xC4380603,
+ 0x81C, 0xC33A0603,
+ 0x81C, 0xA63C0603,
+ 0x81C, 0xA53E0603,
+ 0x81C, 0xA4400603,
+ 0x81C, 0xA3420603,
+ 0x81C, 0xA2440603,
+ 0x81C, 0xA1460603,
+ 0x81C, 0x83480603,
+ 0x81C, 0x824A0603,
+ 0x81C, 0x814C0603,
+ 0x81C, 0x804E0603,
+ 0x81C, 0x63500603,
+ 0x81C, 0x62520603,
+ 0x81C, 0x61540603,
+ 0x81C, 0x42560603,
+ 0x81C, 0x41580603,
+ 0x81C, 0x405A0603,
+ 0x81C, 0x225C0603,
+ 0x81C, 0x215E0603,
+ 0x81C, 0x20600603,
+ 0x81C, 0x04620603,
+ 0x81C, 0x03640603,
+ 0x81C, 0x02660603,
+ 0x81C, 0x01680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000603,
+ 0x81C, 0xF8020603,
+ 0x81C, 0xF7040603,
+ 0x81C, 0xF6060603,
+ 0x81C, 0xF5080603,
+ 0x81C, 0xF40A0603,
+ 0x81C, 0xF30C0603,
+ 0x81C, 0xF20E0603,
+ 0x81C, 0xF1100603,
+ 0x81C, 0xF0120603,
+ 0x81C, 0xEF140603,
+ 0x81C, 0xEE160603,
+ 0x81C, 0xED180603,
+ 0x81C, 0xEC1A0603,
+ 0x81C, 0xEB1C0603,
+ 0x81C, 0xEA1E0603,
+ 0x81C, 0xE9200603,
+ 0x81C, 0xE8220603,
+ 0x81C, 0xE7240603,
+ 0x81C, 0xE6260603,
+ 0x81C, 0xE5280603,
+ 0x81C, 0xE42A0603,
+ 0x81C, 0xC42C0603,
+ 0x81C, 0xC32E0603,
+ 0x81C, 0xC2300603,
+ 0x81C, 0xC1320603,
+ 0x81C, 0xA3340603,
+ 0x81C, 0xA2360603,
+ 0x81C, 0xA1380603,
+ 0x81C, 0xA03A0603,
+ 0x81C, 0x823C0603,
+ 0x81C, 0x813E0603,
+ 0x81C, 0x80400603,
+ 0x81C, 0x64420603,
+ 0x81C, 0x63440603,
+ 0x81C, 0x62460603,
+ 0x81C, 0x61480603,
+ 0x81C, 0x604A0603,
+ 0x81C, 0x244C0603,
+ 0x81C, 0x234E0603,
+ 0x81C, 0x22500603,
+ 0x81C, 0x21520603,
+ 0x81C, 0x20540603,
+ 0x81C, 0x05560603,
+ 0x81C, 0x04580603,
+ 0x81C, 0x035A0603,
+ 0x81C, 0x025C0603,
+ 0x81C, 0x015E0603,
+ 0x81C, 0x00600603,
+ 0x81C, 0x00620603,
+ 0x81C, 0x00640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x9000000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000603,
+ 0x81C, 0xFB020603,
+ 0x81C, 0xFA040603,
+ 0x81C, 0xF9060603,
+ 0x81C, 0xF8080603,
+ 0x81C, 0xF70A0603,
+ 0x81C, 0xF60C0603,
+ 0x81C, 0xF50E0603,
+ 0x81C, 0xF4100603,
+ 0x81C, 0xF3120603,
+ 0x81C, 0xF2140603,
+ 0x81C, 0xF1160603,
+ 0x81C, 0xF0180603,
+ 0x81C, 0xEF1A0603,
+ 0x81C, 0xEE1C0603,
+ 0x81C, 0xED1E0603,
+ 0x81C, 0xEC200603,
+ 0x81C, 0xEB220603,
+ 0x81C, 0xEA240603,
+ 0x81C, 0xE9260603,
+ 0x81C, 0xE8280603,
+ 0x81C, 0xE72A0603,
+ 0x81C, 0xE62C0603,
+ 0x81C, 0xE52E0603,
+ 0x81C, 0xE4300603,
+ 0x81C, 0xE3320603,
+ 0x81C, 0xE2340603,
+ 0x81C, 0xC6360603,
+ 0x81C, 0xC5380603,
+ 0x81C, 0xC43A0603,
+ 0x81C, 0xC33C0603,
+ 0x81C, 0xA63E0603,
+ 0x81C, 0xA5400603,
+ 0x81C, 0xA4420603,
+ 0x81C, 0xA3440603,
+ 0x81C, 0xA2460603,
+ 0x81C, 0xA1480603,
+ 0x81C, 0x834A0603,
+ 0x81C, 0x824C0603,
+ 0x81C, 0x814E0603,
+ 0x81C, 0x64500603,
+ 0x81C, 0x63520603,
+ 0x81C, 0x62540603,
+ 0x81C, 0x61560603,
+ 0x81C, 0x60580603,
+ 0x81C, 0x405A0603,
+ 0x81C, 0x215C0603,
+ 0x81C, 0x205E0603,
+ 0x81C, 0x03600603,
+ 0x81C, 0x02620603,
+ 0x81C, 0x01640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x9000000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000603,
+ 0x81C, 0xFB020603,
+ 0x81C, 0xFA040603,
+ 0x81C, 0xF9060603,
+ 0x81C, 0xF8080603,
+ 0x81C, 0xF70A0603,
+ 0x81C, 0xF60C0603,
+ 0x81C, 0xF50E0603,
+ 0x81C, 0xF4100603,
+ 0x81C, 0xF3120603,
+ 0x81C, 0xF2140603,
+ 0x81C, 0xF1160603,
+ 0x81C, 0xF0180603,
+ 0x81C, 0xEE1A0603,
+ 0x81C, 0xED1C0603,
+ 0x81C, 0xEC1E0603,
+ 0x81C, 0xEB200603,
+ 0x81C, 0xEA220603,
+ 0x81C, 0xE9240603,
+ 0x81C, 0xE8260603,
+ 0x81C, 0xE7280603,
+ 0x81C, 0xE62A0603,
+ 0x81C, 0xE52C0603,
+ 0x81C, 0xE42E0603,
+ 0x81C, 0xE3300603,
+ 0x81C, 0xE2320603,
+ 0x81C, 0xC6340603,
+ 0x81C, 0xC5360603,
+ 0x81C, 0xC4380603,
+ 0x81C, 0xC33A0603,
+ 0x81C, 0xA63C0603,
+ 0x81C, 0xA53E0603,
+ 0x81C, 0xA4400603,
+ 0x81C, 0xA3420603,
+ 0x81C, 0xA2440603,
+ 0x81C, 0xA1460603,
+ 0x81C, 0x83480603,
+ 0x81C, 0x824A0603,
+ 0x81C, 0x814C0603,
+ 0x81C, 0x804E0603,
+ 0x81C, 0x63500603,
+ 0x81C, 0x62520603,
+ 0x81C, 0x61540603,
+ 0x81C, 0x42560603,
+ 0x81C, 0x41580603,
+ 0x81C, 0x405A0603,
+ 0x81C, 0x225C0603,
+ 0x81C, 0x215E0603,
+ 0x81C, 0x20600603,
+ 0x81C, 0x04620603,
+ 0x81C, 0x03640603,
+ 0x81C, 0x02660603,
+ 0x81C, 0x01680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x9000000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000603,
+ 0x81C, 0xFB020603,
+ 0x81C, 0xFA040603,
+ 0x81C, 0xF9060603,
+ 0x81C, 0xF8080603,
+ 0x81C, 0xF70A0603,
+ 0x81C, 0xF60C0603,
+ 0x81C, 0xF50E0603,
+ 0x81C, 0xF4100603,
+ 0x81C, 0xF3120603,
+ 0x81C, 0xF2140603,
+ 0x81C, 0xF1160603,
+ 0x81C, 0xF0180603,
+ 0x81C, 0xEE1A0603,
+ 0x81C, 0xED1C0603,
+ 0x81C, 0xEC1E0603,
+ 0x81C, 0xEB200603,
+ 0x81C, 0xEA220603,
+ 0x81C, 0xE9240603,
+ 0x81C, 0xE8260603,
+ 0x81C, 0xE7280603,
+ 0x81C, 0xE62A0603,
+ 0x81C, 0xE52C0603,
+ 0x81C, 0xE42E0603,
+ 0x81C, 0xE3300603,
+ 0x81C, 0xE2320603,
+ 0x81C, 0xC6340603,
+ 0x81C, 0xC5360603,
+ 0x81C, 0xC4380603,
+ 0x81C, 0xC33A0603,
+ 0x81C, 0xA63C0603,
+ 0x81C, 0xA53E0603,
+ 0x81C, 0xA4400603,
+ 0x81C, 0xA3420603,
+ 0x81C, 0xA2440603,
+ 0x81C, 0xA1460603,
+ 0x81C, 0x83480603,
+ 0x81C, 0x824A0603,
+ 0x81C, 0x814C0603,
+ 0x81C, 0x804E0603,
+ 0x81C, 0x63500603,
+ 0x81C, 0x62520603,
+ 0x81C, 0x61540603,
+ 0x81C, 0x42560603,
+ 0x81C, 0x41580603,
+ 0x81C, 0x405A0603,
+ 0x81C, 0x225C0603,
+ 0x81C, 0x215E0603,
+ 0x81C, 0x20600603,
+ 0x81C, 0x04620603,
+ 0x81C, 0x03640603,
+ 0x81C, 0x02660603,
+ 0x81C, 0x01680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x9000000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xBF000603,
+ 0x81C, 0xBF020603,
+ 0x81C, 0xBF040603,
+ 0x81C, 0xBF060603,
+ 0x81C, 0xBF080603,
+ 0x81C, 0xBE0A0603,
+ 0x81C, 0xBD0C0603,
+ 0x81C, 0xBC0E0603,
+ 0x81C, 0xBB100603,
+ 0x81C, 0xBA120603,
+ 0x81C, 0xB9140603,
+ 0x81C, 0xB8160603,
+ 0x81C, 0xB7180603,
+ 0x81C, 0xB61A0603,
+ 0x81C, 0xB51C0603,
+ 0x81C, 0xB41E0603,
+ 0x81C, 0xB1200603,
+ 0x81C, 0xB2220603,
+ 0x81C, 0xB1240603,
+ 0x81C, 0xB0260603,
+ 0x81C, 0xAF280603,
+ 0x81C, 0xAE2A0603,
+ 0x81C, 0xAD2C0603,
+ 0x81C, 0xAC2E0603,
+ 0x81C, 0xAB300603,
+ 0x81C, 0xAA320603,
+ 0x81C, 0xC6340603,
+ 0x81C, 0xC5360603,
+ 0x81C, 0xC4380603,
+ 0x81C, 0xC33A0603,
+ 0x81C, 0x883C0603,
+ 0x81C, 0x873E0603,
+ 0x81C, 0x86400603,
+ 0x81C, 0x85420603,
+ 0x81C, 0x84440603,
+ 0x81C, 0x83460603,
+ 0x81C, 0x67480603,
+ 0x81C, 0x664A0603,
+ 0x81C, 0x654C0603,
+ 0x81C, 0x644E0603,
+ 0x81C, 0x27500603,
+ 0x81C, 0x26520603,
+ 0x81C, 0x25540603,
+ 0x81C, 0x24560603,
+ 0x81C, 0x23580603,
+ 0x81C, 0x225A0603,
+ 0x81C, 0x215C0603,
+ 0x81C, 0x205E0603,
+ 0x81C, 0x03600603,
+ 0x81C, 0x02620603,
+ 0x81C, 0x01640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0x90000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000403,
+ 0x81C, 0xFB000603,
+ 0x81C, 0xFA020603,
+ 0x81C, 0xF9040603,
+ 0x81C, 0xF8060603,
+ 0x81C, 0xF7080603,
+ 0x81C, 0xF60A0603,
+ 0x81C, 0xF50C0603,
+ 0x81C, 0xF40E0603,
+ 0x81C, 0xF3100603,
+ 0x81C, 0xF2120603,
+ 0x81C, 0xF1140603,
+ 0x81C, 0xF0160603,
+ 0x81C, 0xEF180603,
+ 0x81C, 0xEE1A0603,
+ 0x81C, 0xED1C0603,
+ 0x81C, 0xEC1E0603,
+ 0x81C, 0xEB200603,
+ 0x81C, 0xEA220603,
+ 0x81C, 0xE9240603,
+ 0x81C, 0xE8260603,
+ 0x81C, 0xE7280603,
+ 0x81C, 0xE62A0603,
+ 0x81C, 0xE52C0603,
+ 0x81C, 0xE42E0603,
+ 0x81C, 0xE3300603,
+ 0x81C, 0xE2320603,
+ 0x81C, 0xC6340603,
+ 0x81C, 0xC5360603,
+ 0x81C, 0xC4380603,
+ 0x81C, 0xC33A0603,
+ 0x81C, 0xA63C0603,
+ 0x81C, 0xA53E0603,
+ 0x81C, 0xA4400603,
+ 0x81C, 0xA3420603,
+ 0x81C, 0xA2440603,
+ 0x81C, 0xA1460603,
+ 0x81C, 0x83480603,
+ 0x81C, 0x824A0603,
+ 0x81C, 0x814C0603,
+ 0x81C, 0x644E0603,
+ 0x81C, 0x63500603,
+ 0x81C, 0x62520603,
+ 0x81C, 0x61540603,
+ 0x81C, 0x60560603,
+ 0x81C, 0x40580603,
+ 0x81C, 0x215A0603,
+ 0x81C, 0x205C0603,
+ 0x81C, 0x035E0603,
+ 0x81C, 0x02600603,
+ 0x81C, 0x01620603,
+ 0x81C, 0x00640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x90000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000603,
+ 0x81C, 0xF6020603,
+ 0x81C, 0xF5040603,
+ 0x81C, 0xF4060603,
+ 0x81C, 0xF3080603,
+ 0x81C, 0xF20A0603,
+ 0x81C, 0xF10C0603,
+ 0x81C, 0xF00E0603,
+ 0x81C, 0xEF100603,
+ 0x81C, 0xEE120603,
+ 0x81C, 0xED140603,
+ 0x81C, 0xEC160603,
+ 0x81C, 0xEB180603,
+ 0x81C, 0xEA1A0603,
+ 0x81C, 0xE91C0603,
+ 0x81C, 0xE81E0603,
+ 0x81C, 0xE7200603,
+ 0x81C, 0xE6220603,
+ 0x81C, 0xE5240603,
+ 0x81C, 0xE4260603,
+ 0x81C, 0xE3280603,
+ 0x81C, 0xC42A0603,
+ 0x81C, 0xC32C0603,
+ 0x81C, 0xC22E0603,
+ 0x81C, 0xC1300603,
+ 0x81C, 0xC0320603,
+ 0x81C, 0xA3340603,
+ 0x81C, 0xA2360603,
+ 0x81C, 0xA1380603,
+ 0x81C, 0xA03A0603,
+ 0x81C, 0x823C0603,
+ 0x81C, 0x813E0603,
+ 0x81C, 0x80400603,
+ 0x81C, 0x64420603,
+ 0x81C, 0x63440603,
+ 0x81C, 0x62460603,
+ 0x81C, 0x61480603,
+ 0x81C, 0x604A0603,
+ 0x81C, 0x414C0603,
+ 0x81C, 0x404E0603,
+ 0x81C, 0x22500603,
+ 0x81C, 0x21520603,
+ 0x81C, 0x20540603,
+ 0x81C, 0x03560603,
+ 0x81C, 0x02580603,
+ 0x81C, 0x015A0603,
+ 0x81C, 0x005C0603,
+ 0x81C, 0x005E0603,
+ 0x81C, 0x00600603,
+ 0x81C, 0x00620603,
+ 0x81C, 0x00640603,
+ 0x81C, 0x00660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFD000603,
+ 0x81C, 0xFC020603,
+ 0x81C, 0xFB040603,
+ 0x81C, 0xFA060603,
+ 0x81C, 0xF9080603,
+ 0x81C, 0xF80A0603,
+ 0x81C, 0xF70C0603,
+ 0x81C, 0xF60E0603,
+ 0x81C, 0xF5100603,
+ 0x81C, 0xF4120603,
+ 0x81C, 0xF3140603,
+ 0x81C, 0xF2160603,
+ 0x81C, 0xF1180603,
+ 0x81C, 0xF01A0603,
+ 0x81C, 0xEF1C0603,
+ 0x81C, 0xEE1E0603,
+ 0x81C, 0xED200603,
+ 0x81C, 0xEC220603,
+ 0x81C, 0xEB240603,
+ 0x81C, 0xEA260603,
+ 0x81C, 0xE9280603,
+ 0x81C, 0xE82A0603,
+ 0x81C, 0xE72C0603,
+ 0x81C, 0xE62E0603,
+ 0x81C, 0xE5300603,
+ 0x81C, 0xE4320603,
+ 0x81C, 0xE3340603,
+ 0x81C, 0xC6360603,
+ 0x81C, 0xC5380603,
+ 0x81C, 0xC43A0603,
+ 0x81C, 0xC33C0603,
+ 0x81C, 0xA63E0603,
+ 0x81C, 0xA5400603,
+ 0x81C, 0xA4420603,
+ 0x81C, 0xA3440603,
+ 0x81C, 0xA2460603,
+ 0x81C, 0xA1480603,
+ 0x81C, 0x834A0603,
+ 0x81C, 0x824C0603,
+ 0x81C, 0x814E0603,
+ 0x81C, 0x64500603,
+ 0x81C, 0x63520603,
+ 0x81C, 0x62540603,
+ 0x81C, 0x61560603,
+ 0x81C, 0x60580603,
+ 0x81C, 0x235A0603,
+ 0x81C, 0x225C0603,
+ 0x81C, 0x215E0603,
+ 0x81C, 0x20600603,
+ 0x81C, 0x03620603,
+ 0x81C, 0x02640603,
+ 0x81C, 0x01660603,
+ 0x81C, 0x00680603,
+ 0x81C, 0x006A0603,
+ 0x81C, 0x006C0603,
+ 0x81C, 0x006E0603,
+ 0x81C, 0x00700603,
+ 0x81C, 0x00720603,
+ 0x81C, 0x00740603,
+ 0x81C, 0x00760603,
+ 0x81C, 0x00780603,
+ 0x81C, 0x007A0603,
+ 0x81C, 0x007C0603,
+ 0x81C, 0x007E0603,
+ 0x81C, 0x007E0603,
+ 0xB0000000, 0x00000000,
+ 0x80000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000703,
+ 0x81C, 0xFB020703,
+ 0x81C, 0xFA040703,
+ 0x81C, 0xF9060703,
+ 0x81C, 0xF8080703,
+ 0x81C, 0xF70A0703,
+ 0x81C, 0xF60C0703,
+ 0x81C, 0xF50E0703,
+ 0x81C, 0xF4100703,
+ 0x81C, 0xF3120703,
+ 0x81C, 0xF2140703,
+ 0x81C, 0xF1160703,
+ 0x81C, 0xEF180703,
+ 0x81C, 0xEE1A0703,
+ 0x81C, 0xED1C0703,
+ 0x81C, 0xEC1E0703,
+ 0x81C, 0xEB200703,
+ 0x81C, 0xEA220703,
+ 0x81C, 0xE9240703,
+ 0x81C, 0xE8260703,
+ 0x81C, 0xE7280703,
+ 0x81C, 0xE62A0703,
+ 0x81C, 0xE52C0703,
+ 0x81C, 0xE42E0703,
+ 0x81C, 0xE3300703,
+ 0x81C, 0xE2320703,
+ 0x81C, 0xC6340703,
+ 0x81C, 0xC5360703,
+ 0x81C, 0xC4380703,
+ 0x81C, 0xC33A0703,
+ 0x81C, 0xA63C0703,
+ 0x81C, 0xA53E0703,
+ 0x81C, 0xA4400703,
+ 0x81C, 0xA3420703,
+ 0x81C, 0xA2440703,
+ 0x81C, 0xA1460703,
+ 0x81C, 0x83480703,
+ 0x81C, 0x824A0703,
+ 0x81C, 0x814C0703,
+ 0x81C, 0x804E0703,
+ 0x81C, 0x63500703,
+ 0x81C, 0x62520703,
+ 0x81C, 0x61540703,
+ 0x81C, 0x42560703,
+ 0x81C, 0x41580703,
+ 0x81C, 0x405A0703,
+ 0x81C, 0x225C0703,
+ 0x81C, 0x215E0703,
+ 0x81C, 0x20600703,
+ 0x81C, 0x04620703,
+ 0x81C, 0x03640703,
+ 0x81C, 0x02660703,
+ 0x81C, 0x01680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xBD000703,
+ 0x81C, 0xBC020703,
+ 0x81C, 0xBB040703,
+ 0x81C, 0xBA060703,
+ 0x81C, 0xB9080703,
+ 0x81C, 0xB80A0703,
+ 0x81C, 0xB70C0703,
+ 0x81C, 0xB60E0703,
+ 0x81C, 0xB5100703,
+ 0x81C, 0xB4120703,
+ 0x81C, 0xB3140703,
+ 0x81C, 0xB2160703,
+ 0x81C, 0xB1180703,
+ 0x81C, 0xB01A0703,
+ 0x81C, 0xAF1C0703,
+ 0x81C, 0xAE1E0703,
+ 0x81C, 0xAD200703,
+ 0x81C, 0xAC220703,
+ 0x81C, 0x8E240703,
+ 0x81C, 0x8D260703,
+ 0x81C, 0x8C280703,
+ 0x81C, 0x6F2A0703,
+ 0x81C, 0x6E2C0703,
+ 0x81C, 0x6D2E0703,
+ 0x81C, 0x6C300703,
+ 0x81C, 0x6B320703,
+ 0x81C, 0x6A340703,
+ 0x81C, 0x69360703,
+ 0x81C, 0x68380703,
+ 0x81C, 0x673A0703,
+ 0x81C, 0x663C0703,
+ 0x81C, 0x653E0703,
+ 0x81C, 0x64400703,
+ 0x81C, 0x63420703,
+ 0x81C, 0x62440703,
+ 0x81C, 0x61460703,
+ 0x81C, 0x60480703,
+ 0x81C, 0x424A0703,
+ 0x81C, 0x414C0703,
+ 0x81C, 0x404E0703,
+ 0x81C, 0x06500703,
+ 0x81C, 0x05520703,
+ 0x81C, 0x04540703,
+ 0x81C, 0x03560703,
+ 0x81C, 0x02580703,
+ 0x81C, 0x015A0703,
+ 0x81C, 0x005C0703,
+ 0x81C, 0x005E0703,
+ 0x81C, 0x00600703,
+ 0x81C, 0x00620703,
+ 0x81C, 0x00640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007C0703,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000703,
+ 0x81C, 0xF6020703,
+ 0x81C, 0xF5040703,
+ 0x81C, 0xF4060703,
+ 0x81C, 0xF3080703,
+ 0x81C, 0xF20A0703,
+ 0x81C, 0xF10C0703,
+ 0x81C, 0xF00E0703,
+ 0x81C, 0xEF100703,
+ 0x81C, 0xEE120703,
+ 0x81C, 0xED140703,
+ 0x81C, 0xEC160703,
+ 0x81C, 0xEB180703,
+ 0x81C, 0xEA1A0703,
+ 0x81C, 0xE91C0703,
+ 0x81C, 0xCA1E0703,
+ 0x81C, 0xC9200703,
+ 0x81C, 0xC8220703,
+ 0x81C, 0xC7240703,
+ 0x81C, 0xC6260703,
+ 0x81C, 0xC5280703,
+ 0x81C, 0xC42A0703,
+ 0x81C, 0xC32C0703,
+ 0x81C, 0xC22E0703,
+ 0x81C, 0xC1300703,
+ 0x81C, 0xA4320703,
+ 0x81C, 0xA3340703,
+ 0x81C, 0xA2360703,
+ 0x81C, 0xA1380703,
+ 0x81C, 0xA03A0703,
+ 0x81C, 0x823C0703,
+ 0x81C, 0x813E0703,
+ 0x81C, 0x80400703,
+ 0x81C, 0x64420703,
+ 0x81C, 0x63440703,
+ 0x81C, 0x62460703,
+ 0x81C, 0x61480703,
+ 0x81C, 0x604A0703,
+ 0x81C, 0x414C0703,
+ 0x81C, 0x404E0703,
+ 0x81C, 0x22500703,
+ 0x81C, 0x21520703,
+ 0x81C, 0x20540703,
+ 0x81C, 0x03560703,
+ 0x81C, 0x02580703,
+ 0x81C, 0x015A0703,
+ 0x81C, 0x005C0703,
+ 0x81C, 0x005E0703,
+ 0x81C, 0x00600703,
+ 0x81C, 0x00620703,
+ 0x81C, 0x00640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000703,
+ 0x81C, 0xFB020703,
+ 0x81C, 0xFA040703,
+ 0x81C, 0xF9060703,
+ 0x81C, 0xF8080703,
+ 0x81C, 0xF70A0703,
+ 0x81C, 0xF60C0703,
+ 0x81C, 0xF50E0703,
+ 0x81C, 0xF4100703,
+ 0x81C, 0xF3120703,
+ 0x81C, 0xF2140703,
+ 0x81C, 0xF1160703,
+ 0x81C, 0xF0180703,
+ 0x81C, 0xEF1A0703,
+ 0x81C, 0xEE1C0703,
+ 0x81C, 0xED1E0703,
+ 0x81C, 0xEC200703,
+ 0x81C, 0xEB220703,
+ 0x81C, 0xEA240703,
+ 0x81C, 0xE9260703,
+ 0x81C, 0xE8280703,
+ 0x81C, 0xE72A0703,
+ 0x81C, 0xE62C0703,
+ 0x81C, 0xE52E0703,
+ 0x81C, 0xE4300703,
+ 0x81C, 0xE3320703,
+ 0x81C, 0xE2340703,
+ 0x81C, 0xC6360703,
+ 0x81C, 0xC5380703,
+ 0x81C, 0xC43A0703,
+ 0x81C, 0xC33C0703,
+ 0x81C, 0xA63E0703,
+ 0x81C, 0xA5400703,
+ 0x81C, 0xA4420703,
+ 0x81C, 0xA3440703,
+ 0x81C, 0xA2460703,
+ 0x81C, 0x84480703,
+ 0x81C, 0x834A0703,
+ 0x81C, 0x824C0703,
+ 0x81C, 0x814E0703,
+ 0x81C, 0x80500703,
+ 0x81C, 0x63520703,
+ 0x81C, 0x62540703,
+ 0x81C, 0x61560703,
+ 0x81C, 0x60580703,
+ 0x81C, 0x225A0703,
+ 0x81C, 0x055C0703,
+ 0x81C, 0x045E0703,
+ 0x81C, 0x03600703,
+ 0x81C, 0x02620703,
+ 0x81C, 0x01640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000703,
+ 0x81C, 0xF6020703,
+ 0x81C, 0xF5040703,
+ 0x81C, 0xF4060703,
+ 0x81C, 0xF3080703,
+ 0x81C, 0xF20A0703,
+ 0x81C, 0xF10C0703,
+ 0x81C, 0xF00E0703,
+ 0x81C, 0xEF100703,
+ 0x81C, 0xEE120703,
+ 0x81C, 0xED140703,
+ 0x81C, 0xEC160703,
+ 0x81C, 0xEB180703,
+ 0x81C, 0xEA1A0703,
+ 0x81C, 0xE91C0703,
+ 0x81C, 0xCA1E0703,
+ 0x81C, 0xC9200703,
+ 0x81C, 0xC8220703,
+ 0x81C, 0xC7240703,
+ 0x81C, 0xC6260703,
+ 0x81C, 0xC5280703,
+ 0x81C, 0xC42A0703,
+ 0x81C, 0xC32C0703,
+ 0x81C, 0xC22E0703,
+ 0x81C, 0xC1300703,
+ 0x81C, 0xA4320703,
+ 0x81C, 0xA3340703,
+ 0x81C, 0xA2360703,
+ 0x81C, 0xA1380703,
+ 0x81C, 0xA03A0703,
+ 0x81C, 0x823C0703,
+ 0x81C, 0x813E0703,
+ 0x81C, 0x80400703,
+ 0x81C, 0x64420703,
+ 0x81C, 0x63440703,
+ 0x81C, 0x62460703,
+ 0x81C, 0x61480703,
+ 0x81C, 0x604A0703,
+ 0x81C, 0x414C0703,
+ 0x81C, 0x404E0703,
+ 0x81C, 0x22500703,
+ 0x81C, 0x21520703,
+ 0x81C, 0x20540703,
+ 0x81C, 0x03560703,
+ 0x81C, 0x02580703,
+ 0x81C, 0x015A0703,
+ 0x81C, 0x005C0703,
+ 0x81C, 0x005E0703,
+ 0x81C, 0x00600703,
+ 0x81C, 0x00620703,
+ 0x81C, 0x00640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000703,
+ 0x81C, 0xFA020703,
+ 0x81C, 0xF9040703,
+ 0x81C, 0xF8060703,
+ 0x81C, 0xF7080703,
+ 0x81C, 0xF60A0703,
+ 0x81C, 0xF50C0703,
+ 0x81C, 0xF40E0703,
+ 0x81C, 0xF3100703,
+ 0x81C, 0xF2120703,
+ 0x81C, 0xF1140703,
+ 0x81C, 0xF0160703,
+ 0x81C, 0xEF180703,
+ 0x81C, 0xEE1A0703,
+ 0x81C, 0xED1C0703,
+ 0x81C, 0xEC1E0703,
+ 0x81C, 0xEB200703,
+ 0x81C, 0xEA220703,
+ 0x81C, 0xE9240703,
+ 0x81C, 0xE8260703,
+ 0x81C, 0xE7280703,
+ 0x81C, 0xE62A0703,
+ 0x81C, 0xE52C0703,
+ 0x81C, 0xE42E0703,
+ 0x81C, 0xE3300703,
+ 0x81C, 0xE2320703,
+ 0x81C, 0xE1340703,
+ 0x81C, 0xC5360703,
+ 0x81C, 0xC4380703,
+ 0x81C, 0xC33A0703,
+ 0x81C, 0xC23C0703,
+ 0x81C, 0xC13E0703,
+ 0x81C, 0xA4400703,
+ 0x81C, 0xA3420703,
+ 0x81C, 0xA2440703,
+ 0x81C, 0xA1460703,
+ 0x81C, 0x83480703,
+ 0x81C, 0x824A0703,
+ 0x81C, 0x814C0703,
+ 0x81C, 0x804E0703,
+ 0x81C, 0x64500703,
+ 0x81C, 0x63520703,
+ 0x81C, 0x62540703,
+ 0x81C, 0x61560703,
+ 0x81C, 0x60580703,
+ 0x81C, 0x235A0703,
+ 0x81C, 0x225C0703,
+ 0x81C, 0x215E0703,
+ 0x81C, 0x20600703,
+ 0x81C, 0x04620703,
+ 0x81C, 0x03640703,
+ 0x81C, 0x02660703,
+ 0x81C, 0x01680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF9000703,
+ 0x81C, 0xF8020703,
+ 0x81C, 0xF7040703,
+ 0x81C, 0xF6060703,
+ 0x81C, 0xF5080703,
+ 0x81C, 0xF40A0703,
+ 0x81C, 0xF30C0703,
+ 0x81C, 0xF20E0703,
+ 0x81C, 0xF1100703,
+ 0x81C, 0xF0120703,
+ 0x81C, 0xEF140703,
+ 0x81C, 0xEE160703,
+ 0x81C, 0xED180703,
+ 0x81C, 0xEC1A0703,
+ 0x81C, 0xEB1C0703,
+ 0x81C, 0xEA1E0703,
+ 0x81C, 0xC9200703,
+ 0x81C, 0xC8220703,
+ 0x81C, 0xC7240703,
+ 0x81C, 0xC6260703,
+ 0x81C, 0xC5280703,
+ 0x81C, 0xC42A0703,
+ 0x81C, 0xC32C0703,
+ 0x81C, 0xC22E0703,
+ 0x81C, 0xC1300703,
+ 0x81C, 0xC0320703,
+ 0x81C, 0xA3340703,
+ 0x81C, 0xA2360703,
+ 0x81C, 0xA1380703,
+ 0x81C, 0xA03A0703,
+ 0x81C, 0x823C0703,
+ 0x81C, 0x813E0703,
+ 0x81C, 0x80400703,
+ 0x81C, 0x64420703,
+ 0x81C, 0x63440703,
+ 0x81C, 0x62460703,
+ 0x81C, 0x61480703,
+ 0x81C, 0x604A0703,
+ 0x81C, 0x414C0703,
+ 0x81C, 0x404E0703,
+ 0x81C, 0x22500703,
+ 0x81C, 0x21520703,
+ 0x81C, 0x20540703,
+ 0x81C, 0x03560703,
+ 0x81C, 0x02580703,
+ 0x81C, 0x015A0703,
+ 0x81C, 0x005C0703,
+ 0x81C, 0x005E0703,
+ 0x81C, 0x00600703,
+ 0x81C, 0x00620703,
+ 0x81C, 0x00640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xBE000703,
+ 0x81C, 0xBD020703,
+ 0x81C, 0xBC040703,
+ 0x81C, 0xBB060703,
+ 0x81C, 0xBA080703,
+ 0x81C, 0xB90A0703,
+ 0x81C, 0xB80C0703,
+ 0x81C, 0xB70E0703,
+ 0x81C, 0xB6100703,
+ 0x81C, 0xB5120703,
+ 0x81C, 0xB4140703,
+ 0x81C, 0xB3160703,
+ 0x81C, 0xB2180703,
+ 0x81C, 0xB11A0703,
+ 0x81C, 0xB01C0703,
+ 0x81C, 0x921E0703,
+ 0x81C, 0x91200703,
+ 0x81C, 0x90220703,
+ 0x81C, 0x8F240703,
+ 0x81C, 0x8E260703,
+ 0x81C, 0x8D280703,
+ 0x81C, 0x8C2A0703,
+ 0x81C, 0x6F2C0703,
+ 0x81C, 0x6E2E0703,
+ 0x81C, 0x6D300703,
+ 0x81C, 0x6C320703,
+ 0x81C, 0x6B340703,
+ 0x81C, 0x6A360703,
+ 0x81C, 0x69380703,
+ 0x81C, 0x683A0703,
+ 0x81C, 0x673C0703,
+ 0x81C, 0x663E0703,
+ 0x81C, 0x65400703,
+ 0x81C, 0x64420703,
+ 0x81C, 0x63440703,
+ 0x81C, 0x62460703,
+ 0x81C, 0x61480703,
+ 0x81C, 0x604A0703,
+ 0x81C, 0x424C0703,
+ 0x81C, 0x414E0703,
+ 0x81C, 0x40500703,
+ 0x81C, 0x06520703,
+ 0x81C, 0x05540703,
+ 0x81C, 0x04560703,
+ 0x81C, 0x03580703,
+ 0x81C, 0x025A0703,
+ 0x81C, 0x015C0703,
+ 0x81C, 0x005E0703,
+ 0x81C, 0x00600703,
+ 0x81C, 0x00620703,
+ 0x81C, 0x00640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x90000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000703,
+ 0x81C, 0xFA020703,
+ 0x81C, 0xF9040703,
+ 0x81C, 0xF8060703,
+ 0x81C, 0xF7080703,
+ 0x81C, 0xF60A0703,
+ 0x81C, 0xF50C0703,
+ 0x81C, 0xF40E0703,
+ 0x81C, 0xF3100703,
+ 0x81C, 0xF2120703,
+ 0x81C, 0xF1140703,
+ 0x81C, 0xF0160703,
+ 0x81C, 0xEF180703,
+ 0x81C, 0xEE1A0703,
+ 0x81C, 0xED1C0703,
+ 0x81C, 0xEC1E0703,
+ 0x81C, 0xEB200703,
+ 0x81C, 0xEA220703,
+ 0x81C, 0xE9240703,
+ 0x81C, 0xE8260703,
+ 0x81C, 0xE7280703,
+ 0x81C, 0xE62A0703,
+ 0x81C, 0xE52C0703,
+ 0x81C, 0xE42E0703,
+ 0x81C, 0xE3300703,
+ 0x81C, 0xE2320703,
+ 0x81C, 0xC6340703,
+ 0x81C, 0xC5360703,
+ 0x81C, 0xC4380703,
+ 0x81C, 0xC33A0703,
+ 0x81C, 0xC23C0703,
+ 0x81C, 0xC13E0703,
+ 0x81C, 0xA4400703,
+ 0x81C, 0xA3420703,
+ 0x81C, 0xA2440703,
+ 0x81C, 0xA1460703,
+ 0x81C, 0x83480703,
+ 0x81C, 0x824A0703,
+ 0x81C, 0x814C0703,
+ 0x81C, 0x804E0703,
+ 0x81C, 0x63500703,
+ 0x81C, 0x62520703,
+ 0x81C, 0x43540703,
+ 0x81C, 0x42560703,
+ 0x81C, 0x41580703,
+ 0x81C, 0x235A0703,
+ 0x81C, 0x225C0703,
+ 0x81C, 0x215E0703,
+ 0x81C, 0x20600703,
+ 0x81C, 0x04620703,
+ 0x81C, 0x03640703,
+ 0x81C, 0x02660703,
+ 0x81C, 0x01680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x90000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000703,
+ 0x81C, 0xF7020703,
+ 0x81C, 0xF6040703,
+ 0x81C, 0xF5060703,
+ 0x81C, 0xF4080703,
+ 0x81C, 0xF30A0703,
+ 0x81C, 0xF20C0703,
+ 0x81C, 0xF10E0703,
+ 0x81C, 0xF0100703,
+ 0x81C, 0xEF120703,
+ 0x81C, 0xEE140703,
+ 0x81C, 0xED160703,
+ 0x81C, 0xEC180703,
+ 0x81C, 0xEB1A0703,
+ 0x81C, 0xEA1C0703,
+ 0x81C, 0xE91E0703,
+ 0x81C, 0xCA200703,
+ 0x81C, 0xC9220703,
+ 0x81C, 0xC8240703,
+ 0x81C, 0xC7260703,
+ 0x81C, 0xC6280703,
+ 0x81C, 0xC52A0703,
+ 0x81C, 0xC42C0703,
+ 0x81C, 0xC32E0703,
+ 0x81C, 0xC2300703,
+ 0x81C, 0xC1320703,
+ 0x81C, 0xA3340703,
+ 0x81C, 0xA2360703,
+ 0x81C, 0xA1380703,
+ 0x81C, 0xA03A0703,
+ 0x81C, 0x823C0703,
+ 0x81C, 0x813E0703,
+ 0x81C, 0x80400703,
+ 0x81C, 0x65420703,
+ 0x81C, 0x64440703,
+ 0x81C, 0x63460703,
+ 0x81C, 0x62480703,
+ 0x81C, 0x614A0703,
+ 0x81C, 0x424C0703,
+ 0x81C, 0x414E0703,
+ 0x81C, 0x40500703,
+ 0x81C, 0x22520703,
+ 0x81C, 0x21540703,
+ 0x81C, 0x20560703,
+ 0x81C, 0x04580703,
+ 0x81C, 0x035A0703,
+ 0x81C, 0x025C0703,
+ 0x81C, 0x015E0703,
+ 0x81C, 0x00600703,
+ 0x81C, 0x00620703,
+ 0x81C, 0x00640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x9000000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000703,
+ 0x81C, 0xFB020703,
+ 0x81C, 0xFA040703,
+ 0x81C, 0xF9060703,
+ 0x81C, 0xF8080703,
+ 0x81C, 0xF70A0703,
+ 0x81C, 0xF60C0703,
+ 0x81C, 0xF50E0703,
+ 0x81C, 0xF4100703,
+ 0x81C, 0xF3120703,
+ 0x81C, 0xF2140703,
+ 0x81C, 0xF1160703,
+ 0x81C, 0xEF180703,
+ 0x81C, 0xEE1A0703,
+ 0x81C, 0xED1C0703,
+ 0x81C, 0xEC1E0703,
+ 0x81C, 0xEB200703,
+ 0x81C, 0xEA220703,
+ 0x81C, 0xE9240703,
+ 0x81C, 0xE8260703,
+ 0x81C, 0xE7280703,
+ 0x81C, 0xE62A0703,
+ 0x81C, 0xE52C0703,
+ 0x81C, 0xE42E0703,
+ 0x81C, 0xE3300703,
+ 0x81C, 0xE2320703,
+ 0x81C, 0xC6340703,
+ 0x81C, 0xC5360703,
+ 0x81C, 0xC4380703,
+ 0x81C, 0xC33A0703,
+ 0x81C, 0xA63C0703,
+ 0x81C, 0xA53E0703,
+ 0x81C, 0xA4400703,
+ 0x81C, 0xA3420703,
+ 0x81C, 0xA2440703,
+ 0x81C, 0xA1460703,
+ 0x81C, 0x83480703,
+ 0x81C, 0x824A0703,
+ 0x81C, 0x814C0703,
+ 0x81C, 0x804E0703,
+ 0x81C, 0x63500703,
+ 0x81C, 0x62520703,
+ 0x81C, 0x61540703,
+ 0x81C, 0x42560703,
+ 0x81C, 0x41580703,
+ 0x81C, 0x405A0703,
+ 0x81C, 0x225C0703,
+ 0x81C, 0x215E0703,
+ 0x81C, 0x20600703,
+ 0x81C, 0x04620703,
+ 0x81C, 0x03640703,
+ 0x81C, 0x02660703,
+ 0x81C, 0x01680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x9000000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF8000703,
+ 0x81C, 0xF7020703,
+ 0x81C, 0xF6040703,
+ 0x81C, 0xF5060703,
+ 0x81C, 0xF4080703,
+ 0x81C, 0xF30A0703,
+ 0x81C, 0xF20C0703,
+ 0x81C, 0xF10E0703,
+ 0x81C, 0xF0100703,
+ 0x81C, 0xEF120703,
+ 0x81C, 0xEE140703,
+ 0x81C, 0xED160703,
+ 0x81C, 0xEC180703,
+ 0x81C, 0xEB1A0703,
+ 0x81C, 0xEA1C0703,
+ 0x81C, 0xE91E0703,
+ 0x81C, 0xCA200703,
+ 0x81C, 0xC9220703,
+ 0x81C, 0xC8240703,
+ 0x81C, 0xC7260703,
+ 0x81C, 0xC6280703,
+ 0x81C, 0xC52A0703,
+ 0x81C, 0xC42C0703,
+ 0x81C, 0xC32E0703,
+ 0x81C, 0xC2300703,
+ 0x81C, 0xC1320703,
+ 0x81C, 0xA3340703,
+ 0x81C, 0xA2360703,
+ 0x81C, 0xA1380703,
+ 0x81C, 0xA03A0703,
+ 0x81C, 0x823C0703,
+ 0x81C, 0x813E0703,
+ 0x81C, 0x80400703,
+ 0x81C, 0x64420703,
+ 0x81C, 0x63440703,
+ 0x81C, 0x62460703,
+ 0x81C, 0x61480703,
+ 0x81C, 0x604A0703,
+ 0x81C, 0x234C0703,
+ 0x81C, 0x224E0703,
+ 0x81C, 0x21500703,
+ 0x81C, 0x20520703,
+ 0x81C, 0x06540703,
+ 0x81C, 0x05560703,
+ 0x81C, 0x04580703,
+ 0x81C, 0x035A0703,
+ 0x81C, 0x025C0703,
+ 0x81C, 0x015E0703,
+ 0x81C, 0x00600703,
+ 0x81C, 0x00620703,
+ 0x81C, 0x00640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x9000000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000703,
+ 0x81C, 0xFB020703,
+ 0x81C, 0xFA040703,
+ 0x81C, 0xF9060703,
+ 0x81C, 0xF8080703,
+ 0x81C, 0xF70A0703,
+ 0x81C, 0xF60C0703,
+ 0x81C, 0xF50E0703,
+ 0x81C, 0xF4100703,
+ 0x81C, 0xF3120703,
+ 0x81C, 0xF2140703,
+ 0x81C, 0xF1160703,
+ 0x81C, 0xF0180703,
+ 0x81C, 0xEF1A0703,
+ 0x81C, 0xEE1C0703,
+ 0x81C, 0xED1E0703,
+ 0x81C, 0xEC200703,
+ 0x81C, 0xEB220703,
+ 0x81C, 0xEA240703,
+ 0x81C, 0xE9260703,
+ 0x81C, 0xE8280703,
+ 0x81C, 0xE72A0703,
+ 0x81C, 0xE62C0703,
+ 0x81C, 0xE52E0703,
+ 0x81C, 0xE4300703,
+ 0x81C, 0xE3320703,
+ 0x81C, 0xE2340703,
+ 0x81C, 0xC6360703,
+ 0x81C, 0xC5380703,
+ 0x81C, 0xC43A0703,
+ 0x81C, 0xC33C0703,
+ 0x81C, 0xA63E0703,
+ 0x81C, 0xA5400703,
+ 0x81C, 0xA4420703,
+ 0x81C, 0xA3440703,
+ 0x81C, 0xA2460703,
+ 0x81C, 0x84480703,
+ 0x81C, 0x834A0703,
+ 0x81C, 0x824C0703,
+ 0x81C, 0x814E0703,
+ 0x81C, 0x80500703,
+ 0x81C, 0x63520703,
+ 0x81C, 0x62540703,
+ 0x81C, 0x61560703,
+ 0x81C, 0x60580703,
+ 0x81C, 0x225A0703,
+ 0x81C, 0x055C0703,
+ 0x81C, 0x045E0703,
+ 0x81C, 0x03600703,
+ 0x81C, 0x02620703,
+ 0x81C, 0x01640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x9000000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000703,
+ 0x81C, 0xFA020703,
+ 0x81C, 0xF9040703,
+ 0x81C, 0xF8060703,
+ 0x81C, 0xF7080703,
+ 0x81C, 0xF60A0703,
+ 0x81C, 0xF50C0703,
+ 0x81C, 0xF40E0703,
+ 0x81C, 0xF3100703,
+ 0x81C, 0xF2120703,
+ 0x81C, 0xF1140703,
+ 0x81C, 0xEF160703,
+ 0x81C, 0xEE180703,
+ 0x81C, 0xED1A0703,
+ 0x81C, 0xEC1C0703,
+ 0x81C, 0xEB1E0703,
+ 0x81C, 0xEA200703,
+ 0x81C, 0xE9220703,
+ 0x81C, 0xE8240703,
+ 0x81C, 0xE7260703,
+ 0x81C, 0xE6280703,
+ 0x81C, 0xE52A0703,
+ 0x81C, 0xE42C0703,
+ 0x81C, 0xE32E0703,
+ 0x81C, 0xE2300703,
+ 0x81C, 0xE1320703,
+ 0x81C, 0xC6340703,
+ 0x81C, 0xC5360703,
+ 0x81C, 0xC4380703,
+ 0x81C, 0xC33A0703,
+ 0x81C, 0xA63C0703,
+ 0x81C, 0xA53E0703,
+ 0x81C, 0xA4400703,
+ 0x81C, 0xA3420703,
+ 0x81C, 0xA2440703,
+ 0x81C, 0xA1460703,
+ 0x81C, 0x83480703,
+ 0x81C, 0x824A0703,
+ 0x81C, 0x814C0703,
+ 0x81C, 0x804E0703,
+ 0x81C, 0x63500703,
+ 0x81C, 0x62520703,
+ 0x81C, 0x61540703,
+ 0x81C, 0x42560703,
+ 0x81C, 0x41580703,
+ 0x81C, 0x405A0703,
+ 0x81C, 0x225C0703,
+ 0x81C, 0x215E0703,
+ 0x81C, 0x20600703,
+ 0x81C, 0x04620703,
+ 0x81C, 0x03640703,
+ 0x81C, 0x02660703,
+ 0x81C, 0x01680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x9000000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFB000703,
+ 0x81C, 0xFA020703,
+ 0x81C, 0xF9040703,
+ 0x81C, 0xF8060703,
+ 0x81C, 0xF7080703,
+ 0x81C, 0xF60A0703,
+ 0x81C, 0xF50C0703,
+ 0x81C, 0xF40E0703,
+ 0x81C, 0xF3100703,
+ 0x81C, 0xF2120703,
+ 0x81C, 0xF1140703,
+ 0x81C, 0xEF160703,
+ 0x81C, 0xEE180703,
+ 0x81C, 0xED1A0703,
+ 0x81C, 0xEC1C0703,
+ 0x81C, 0xEB1E0703,
+ 0x81C, 0xEA200703,
+ 0x81C, 0xE9220703,
+ 0x81C, 0xE8240703,
+ 0x81C, 0xE7260703,
+ 0x81C, 0xE6280703,
+ 0x81C, 0xE52A0703,
+ 0x81C, 0xE42C0703,
+ 0x81C, 0xE32E0703,
+ 0x81C, 0xE2300703,
+ 0x81C, 0xE1320703,
+ 0x81C, 0xC6340703,
+ 0x81C, 0xC5360703,
+ 0x81C, 0xC4380703,
+ 0x81C, 0xC33A0703,
+ 0x81C, 0xA63C0703,
+ 0x81C, 0xA53E0703,
+ 0x81C, 0xA4400703,
+ 0x81C, 0xA3420703,
+ 0x81C, 0xA2440703,
+ 0x81C, 0xA1460703,
+ 0x81C, 0x83480703,
+ 0x81C, 0x824A0703,
+ 0x81C, 0x814C0703,
+ 0x81C, 0x804E0703,
+ 0x81C, 0x63500703,
+ 0x81C, 0x62520703,
+ 0x81C, 0x61540703,
+ 0x81C, 0x42560703,
+ 0x81C, 0x41580703,
+ 0x81C, 0x405A0703,
+ 0x81C, 0x225C0703,
+ 0x81C, 0x215E0703,
+ 0x81C, 0x20600703,
+ 0x81C, 0x04620703,
+ 0x81C, 0x03640703,
+ 0x81C, 0x02660703,
+ 0x81C, 0x01680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x9000000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xBF000703,
+ 0x81C, 0xBF020703,
+ 0x81C, 0xBF040703,
+ 0x81C, 0xBF060703,
+ 0x81C, 0xBF080703,
+ 0x81C, 0xBE0A0703,
+ 0x81C, 0xBD0C0703,
+ 0x81C, 0xBC0E0703,
+ 0x81C, 0xBB100703,
+ 0x81C, 0xBA120703,
+ 0x81C, 0xB9140703,
+ 0x81C, 0xB8160703,
+ 0x81C, 0xB7180703,
+ 0x81C, 0xB61A0703,
+ 0x81C, 0xB51C0703,
+ 0x81C, 0xB41E0703,
+ 0x81C, 0xB1200703,
+ 0x81C, 0xB2220703,
+ 0x81C, 0xB1240703,
+ 0x81C, 0xB0260703,
+ 0x81C, 0xAF280703,
+ 0x81C, 0xAE2A0703,
+ 0x81C, 0xAD2C0703,
+ 0x81C, 0xAC2E0703,
+ 0x81C, 0xAB300703,
+ 0x81C, 0xAA320703,
+ 0x81C, 0xC6340703,
+ 0x81C, 0xC5360703,
+ 0x81C, 0xC4380703,
+ 0x81C, 0xC33A0703,
+ 0x81C, 0x883C0703,
+ 0x81C, 0x873E0703,
+ 0x81C, 0x86400703,
+ 0x81C, 0x85420703,
+ 0x81C, 0x84440703,
+ 0x81C, 0x83460703,
+ 0x81C, 0x67480703,
+ 0x81C, 0x664A0703,
+ 0x81C, 0x654C0703,
+ 0x81C, 0x644E0703,
+ 0x81C, 0x27500703,
+ 0x81C, 0x26520703,
+ 0x81C, 0x25540703,
+ 0x81C, 0x24560703,
+ 0x81C, 0x23580703,
+ 0x81C, 0x225A0703,
+ 0x81C, 0x215C0703,
+ 0x81C, 0x205E0703,
+ 0x81C, 0x03600703,
+ 0x81C, 0x02620703,
+ 0x81C, 0x01640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0x90000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xFC000403,
+ 0x81C, 0xFB000703,
+ 0x81C, 0xFA020703,
+ 0x81C, 0xF9040703,
+ 0x81C, 0xF8060703,
+ 0x81C, 0xF7080703,
+ 0x81C, 0xF60A0703,
+ 0x81C, 0xF50C0703,
+ 0x81C, 0xF40E0703,
+ 0x81C, 0xF3100703,
+ 0x81C, 0xF2120703,
+ 0x81C, 0xF1140703,
+ 0x81C, 0xF0160703,
+ 0x81C, 0xEF180703,
+ 0x81C, 0xEE1A0703,
+ 0x81C, 0xED1C0703,
+ 0x81C, 0xEC1E0703,
+ 0x81C, 0xEB200703,
+ 0x81C, 0xEA220703,
+ 0x81C, 0xE9240703,
+ 0x81C, 0xE8260703,
+ 0x81C, 0xE7280703,
+ 0x81C, 0xE62A0703,
+ 0x81C, 0xE52C0703,
+ 0x81C, 0xE42E0703,
+ 0x81C, 0xE3300703,
+ 0x81C, 0xE2320703,
+ 0x81C, 0xC6340703,
+ 0x81C, 0xC5360703,
+ 0x81C, 0xC4380703,
+ 0x81C, 0xC33A0703,
+ 0x81C, 0xA63C0703,
+ 0x81C, 0xA53E0703,
+ 0x81C, 0xA4400703,
+ 0x81C, 0xA3420703,
+ 0x81C, 0xA2440703,
+ 0x81C, 0x84460703,
+ 0x81C, 0x83480703,
+ 0x81C, 0x824A0703,
+ 0x81C, 0x814C0703,
+ 0x81C, 0x804E0703,
+ 0x81C, 0x63500703,
+ 0x81C, 0x62520703,
+ 0x81C, 0x61540703,
+ 0x81C, 0x60560703,
+ 0x81C, 0x22580703,
+ 0x81C, 0x055A0703,
+ 0x81C, 0x045C0703,
+ 0x81C, 0x035E0703,
+ 0x81C, 0x02600703,
+ 0x81C, 0x01620703,
+ 0x81C, 0x00640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x90000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x81C, 0xF7000703,
+ 0x81C, 0xF6020703,
+ 0x81C, 0xF5040703,
+ 0x81C, 0xF4060703,
+ 0x81C, 0xF3080703,
+ 0x81C, 0xF20A0703,
+ 0x81C, 0xF10C0703,
+ 0x81C, 0xF00E0703,
+ 0x81C, 0xEF100703,
+ 0x81C, 0xEE120703,
+ 0x81C, 0xED140703,
+ 0x81C, 0xEC160703,
+ 0x81C, 0xEB180703,
+ 0x81C, 0xEA1A0703,
+ 0x81C, 0xE91C0703,
+ 0x81C, 0xCA1E0703,
+ 0x81C, 0xC9200703,
+ 0x81C, 0xC8220703,
+ 0x81C, 0xC7240703,
+ 0x81C, 0xC6260703,
+ 0x81C, 0xC5280703,
+ 0x81C, 0xC42A0703,
+ 0x81C, 0xC32C0703,
+ 0x81C, 0xC22E0703,
+ 0x81C, 0xC1300703,
+ 0x81C, 0xA4320703,
+ 0x81C, 0xA3340703,
+ 0x81C, 0xA2360703,
+ 0x81C, 0xA1380703,
+ 0x81C, 0xA03A0703,
+ 0x81C, 0x823C0703,
+ 0x81C, 0x813E0703,
+ 0x81C, 0x80400703,
+ 0x81C, 0x64420703,
+ 0x81C, 0x63440703,
+ 0x81C, 0x62460703,
+ 0x81C, 0x61480703,
+ 0x81C, 0x604A0703,
+ 0x81C, 0x414C0703,
+ 0x81C, 0x404E0703,
+ 0x81C, 0x22500703,
+ 0x81C, 0x21520703,
+ 0x81C, 0x20540703,
+ 0x81C, 0x03560703,
+ 0x81C, 0x02580703,
+ 0x81C, 0x015A0703,
+ 0x81C, 0x005C0703,
+ 0x81C, 0x005E0703,
+ 0x81C, 0x00600703,
+ 0x81C, 0x00620703,
+ 0x81C, 0x00640703,
+ 0x81C, 0x00660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0xA0000000, 0x00000000,
+ 0x81C, 0xFC000703,
+ 0x81C, 0xFB020703,
+ 0x81C, 0xFA040703,
+ 0x81C, 0xF9060703,
+ 0x81C, 0xF8080703,
+ 0x81C, 0xF70A0703,
+ 0x81C, 0xF60C0703,
+ 0x81C, 0xF50E0703,
+ 0x81C, 0xF4100703,
+ 0x81C, 0xF3120703,
+ 0x81C, 0xF2140703,
+ 0x81C, 0xF1160703,
+ 0x81C, 0xF0180703,
+ 0x81C, 0xEF1A0703,
+ 0x81C, 0xEE1C0703,
+ 0x81C, 0xED1E0703,
+ 0x81C, 0xEC200703,
+ 0x81C, 0xEB220703,
+ 0x81C, 0xEA240703,
+ 0x81C, 0xE9260703,
+ 0x81C, 0xE8280703,
+ 0x81C, 0xE72A0703,
+ 0x81C, 0xE62C0703,
+ 0x81C, 0xE52E0703,
+ 0x81C, 0xE4300703,
+ 0x81C, 0xE3320703,
+ 0x81C, 0xE2340703,
+ 0x81C, 0xC6360703,
+ 0x81C, 0xC5380703,
+ 0x81C, 0xC43A0703,
+ 0x81C, 0xC33C0703,
+ 0x81C, 0xA63E0703,
+ 0x81C, 0xA5400703,
+ 0x81C, 0xA4420703,
+ 0x81C, 0xA3440703,
+ 0x81C, 0xA2460703,
+ 0x81C, 0x84480703,
+ 0x81C, 0x834A0703,
+ 0x81C, 0x824C0703,
+ 0x81C, 0x814E0703,
+ 0x81C, 0x80500703,
+ 0x81C, 0x63520703,
+ 0x81C, 0x62540703,
+ 0x81C, 0x61560703,
+ 0x81C, 0x60580703,
+ 0x81C, 0x235A0703,
+ 0x81C, 0x225C0703,
+ 0x81C, 0x215E0703,
+ 0x81C, 0x20600703,
+ 0x81C, 0x03620703,
+ 0x81C, 0x02640703,
+ 0x81C, 0x01660703,
+ 0x81C, 0x00680703,
+ 0x81C, 0x006A0703,
+ 0x81C, 0x006C0703,
+ 0x81C, 0x006E0703,
+ 0x81C, 0x00700703,
+ 0x81C, 0x00720703,
+ 0x81C, 0x00740703,
+ 0x81C, 0x00760703,
+ 0x81C, 0x00780703,
+ 0x81C, 0x007A0703,
+ 0x81C, 0x007C0703,
+ 0x81C, 0x007E0703,
+ 0x81C, 0x007E0703,
+ 0xB0000000, 0x00000000,
+ 0x80000000, 0x00000000, 0x40000000, 0x00000000,
+ 0xC50, 0x00000022,
+ 0xC50, 0x00000020,
+ 0xE50, 0x00000022,
+ 0xE50, 0x00000020,
+ 0x9000000d, 0x00000000, 0x40000000, 0x00000000,
+ 0xC50, 0x00000022,
+ 0xC50, 0x00000020,
+ 0xE50, 0x00000022,
+ 0xE50, 0x00000020,
+ 0x9000000e, 0x00000000, 0x40000000, 0x00000000,
+ 0xC50, 0x00000022,
+ 0xC50, 0x00000020,
+ 0xE50, 0x00000022,
+ 0xE50, 0x00000020,
+ 0xA0000000, 0x00000000,
+ 0xC50, 0x00000022,
+ 0xC50, 0x00000020,
+ 0xE50, 0x00000022,
+ 0xE50, 0x00000020,
+ 0xB0000000, 0x00000000,
+
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8822b_agc, rtw_phy_cfg_agc);
+
+static const u32 rtw8822b_bb[] = {
+ 0x800, 0x9020D010,
+ 0x804, 0x800181A0,
+ 0x808, 0x0E028233,
+ 0x80C, 0x10000013,
+ 0x810, 0x22101243,
+ 0x814, 0x020C3D11,
+ 0x818, 0x84A10385,
+ 0x81C, 0x1E1E081F,
+ 0x820, 0x0001AAAA,
+ 0x824, 0x00030FE0,
+ 0x828, 0x0000CCCC,
+ 0x82C, 0x75CB7010,
+ 0x830, 0x79A0EAAA,
+ 0x834, 0x072E6986,
+ 0x838, 0x87766441,
+ 0x83C, 0x9194B2B7,
+ 0x840, 0x171750E0,
+ 0x844, 0x4D3D7CDB,
+ 0x848, 0x4AD0408B,
+ 0x84C, 0x6AFBF7A5,
+ 0x850, 0x28A74706,
+ 0x854, 0x0001520C,
+ 0x858, 0x4060C000,
+ 0x85C, 0x74010160,
+ 0x860, 0x68A7C321,
+ 0x864, 0x79F27032,
+ 0x868, 0x8CA7A314,
+ 0x86C, 0x778C2878,
+ 0x870, 0x77777777,
+ 0x874, 0x27612C2E,
+ 0x878, 0xC0003152,
+ 0x87C, 0x5C8FC000,
+ 0x880, 0x00000000,
+ 0x884, 0x00000000,
+ 0x888, 0x00000000,
+ 0x88C, 0x00000000,
+ 0x890, 0x00000000,
+ 0x894, 0x00000000,
+ 0x898, 0x00000000,
+ 0x89C, 0x00000000,
+ 0x8A0, 0x00000013,
+ 0x8A4, 0x7F7F7F7F,
+ 0x8A8, 0x2202033E,
+ 0x8AC, 0xF00F000A,
+ 0x8B0, 0x00000600,
+ 0x8B4, 0x000FC080,
+ 0x8B8, 0xEC0057F7,
+ 0x8BC, 0xACB520A3,
+ 0x8C0, 0xFFE04020,
+ 0x8C4, 0x47C00000,
+ 0x8C8, 0x000251A5,
+ 0x8CC, 0x08108492,
+ 0x8D0, 0x0000B800,
+ 0x8D4, 0x860308A0,
+ 0x8D8, 0x29095612,
+ 0x8DC, 0x00000000,
+ 0x8E0, 0x32D16777,
+ 0x8E4, 0x4C098935,
+ 0x8E8, 0xFFFFC42C,
+ 0x8EC, 0x99999999,
+ 0x8F0, 0x00009999,
+ 0x8F4, 0x00D80FA1,
+ 0x8F8, 0x40000080,
+ 0x8FC, 0x00000130,
+ 0x900, 0x00800000,
+ 0x904, 0x00000000,
+ 0x908, 0x00000000,
+ 0x90C, 0xD3000000,
+ 0x910, 0x0000FC00,
+ 0x914, 0xC6380000,
+ 0x918, 0x1C1028C0,
+ 0x91C, 0x64B11A1C,
+ 0x920, 0xE0767233,
+ 0x924, 0x855A2500,
+ 0x928, 0x4AB0E4E4,
+ 0x92C, 0xFFFEB200,
+ 0x930, 0xFFFFFFFE,
+ 0x934, 0x001FFFFF,
+ 0x938, 0x00008480,
+ 0x93C, 0xE41C0642,
+ 0x940, 0x0E470430,
+ 0x944, 0x00000000,
+ 0x948, 0xAC000000,
+ 0x94C, 0x10000083,
+ 0x950, 0x32010080,
+ 0x954, 0x84510080,
+ 0x958, 0x00000001,
+ 0x95C, 0x04248000,
+ 0x960, 0x00000000,
+ 0x964, 0x00000000,
+ 0x968, 0x00000000,
+ 0x96C, 0x00000000,
+ 0x970, 0x00001FFF,
+ 0x974, 0x44000FFF,
+ 0x978, 0x00000000,
+ 0x97C, 0x00000000,
+ 0x980, 0x00000000,
+ 0x984, 0x00000000,
+ 0x988, 0x00000000,
+ 0x98C, 0x43440000,
+ 0x990, 0x27100000,
+ 0x994, 0xFFFF0100,
+ 0x998, 0xFFFFFF5C,
+ 0x99C, 0xFFFFFFFF,
+ 0x9A0, 0x000000FF,
+ 0x9A4, 0x80000088,
+ 0x9A8, 0x0C2F0000,
+ 0x9AC, 0x01560000,
+ 0x9B0, 0x70000000,
+ 0x9B4, 0x00000000,
+ 0x9B8, 0x00000000,
+ 0x9BC, 0x00000000,
+ 0x9C0, 0x00000000,
+ 0x9C4, 0x00000000,
+ 0x9C8, 0x00000000,
+ 0x9CC, 0x00000000,
+ 0x9D0, 0x00000000,
+ 0x9D4, 0x00000000,
+ 0x9D8, 0x00000000,
+ 0x9DC, 0x00000000,
+ 0x9E0, 0x00000000,
+ 0x9E4, 0x02000402,
+ 0x9E8, 0x000022D4,
+ 0x9EC, 0x00000000,
+ 0x9F0, 0x00010080,
+ 0x9F4, 0x00000000,
+ 0x9F8, 0x00000000,
+ 0x9FC, 0xEFFFF7F7,
+ 0xA00, 0x00D047C8,
+ 0xA04, 0x81FF800C,
+ 0xA08, 0x8C838300,
+ 0xA0C, 0x2E20100F,
+ 0xA10, 0x9500BB78,
+ 0xA14, 0x1114D028,
+ 0xA18, 0x00881117,
+ 0xA1C, 0x89140F00,
+ 0xA20, 0x84880000,
+ 0xA24, 0x384F6577,
+ 0xA28, 0x00001525,
+ 0xA2C, 0x00920000,
+ 0xA70, 0x101FFF00,
+ 0xA74, 0x00000148,
+ 0xA78, 0x00000900,
+ 0xA7C, 0x225B0606,
+ 0xA80, 0x218675B2,
+ 0xA84, 0x80208C00,
+ 0xA88, 0x040C0000,
+ 0xA8C, 0x12345678,
+ 0xA90, 0xABCDEF00,
+ 0xA94, 0x001B1B89,
+ 0xA98, 0x030A0000,
+ 0xA9C, 0x00060000,
+ 0xAA0, 0x00000000,
+ 0xAA4, 0x0004000F,
+ 0xAA8, 0x00000200,
+ 0xB00, 0xE1000440,
+ 0xB04, 0x00800000,
+ 0xB08, 0xFF02030B,
+ 0xB0C, 0x01EAA406,
+ 0xB10, 0x00030690,
+ 0xB14, 0x006000FA,
+ 0xB18, 0x00000002,
+ 0xB1C, 0x00000002,
+ 0xB20, 0x4B00001F,
+ 0xB24, 0x4E8E3E40,
+ 0xB28, 0x03020100,
+ 0xB2C, 0x07060504,
+ 0xB30, 0x0B0A0908,
+ 0xB34, 0x0F0E0D0C,
+ 0xB38, 0x13121110,
+ 0xB3C, 0x0000003A,
+ 0xB40, 0x00000000,
+ 0xB44, 0x80000000,
+ 0xB48, 0x3F0000FA,
+ 0xB4C, 0x88C80020,
+ 0xB50, 0x00000000,
+ 0xB54, 0x00004241,
+ 0xB58, 0xE0008208,
+ 0xB5C, 0x41EFFFF9,
+ 0xB60, 0x00000000,
+ 0xB64, 0x00200063,
+ 0xB68, 0x0000003A,
+ 0xB6C, 0x00000102,
+ 0xB70, 0x4E6D1870,
+ 0xB74, 0x03020100,
+ 0xB78, 0x07060504,
+ 0xB7C, 0x0B0A0908,
+ 0xB80, 0x0F0E0D0C,
+ 0xB84, 0x13121110,
+ 0xB88, 0x00000000,
+ 0xB8C, 0x00000000,
+ 0xC00, 0x00000007,
+ 0xC04, 0x00000020,
+ 0xC08, 0x60403231,
+ 0xC0C, 0x00012345,
+ 0xC10, 0x00000100,
+ 0xC14, 0x01000000,
+ 0xC18, 0x00000000,
+ 0xC1C, 0x40040053,
+ 0xC20, 0x40020103,
+ 0xC24, 0x00000000,
+ 0xC28, 0x00000000,
+ 0xC2C, 0x00000000,
+ 0xC30, 0x00000000,
+ 0xC34, 0x00000000,
+ 0xC38, 0x00000000,
+ 0xC3C, 0x00000000,
+ 0xC40, 0x00000000,
+ 0xC44, 0x00000000,
+ 0xC48, 0x00000000,
+ 0xC4C, 0x00000000,
+ 0xC50, 0x00000020,
+ 0xC54, 0x00000000,
+ 0xC58, 0xD8020402,
+ 0xC5C, 0xDE000120,
+ 0xC68, 0x5979993F,
+ 0xC6C, 0x0000122A,
+ 0xC70, 0x99795979,
+ 0xC74, 0x99795979,
+ 0xC78, 0x99799979,
+ 0xC7C, 0x99791979,
+ 0xC80, 0x19791979,
+ 0xC84, 0x19791979,
+ 0xC88, 0x00000000,
+ 0xC8C, 0x07000000,
+ 0xC94, 0x01000100,
+ 0xC98, 0x201C8000,
+ 0xC9C, 0x00000000,
+ 0xCA0, 0x0000A555,
+ 0xCA4, 0x08040201,
+ 0xCA8, 0x80402010,
+ 0xCAC, 0x00000000,
+ 0xCB0, 0x77777777,
+ 0xCB4, 0x00007777,
+ 0xCB8, 0x00000000,
+ 0xCBC, 0x00000000,
+ 0xCC0, 0x00000000,
+ 0xCC4, 0x00000000,
+ 0xCC8, 0x00000000,
+ 0xCCC, 0x00000000,
+ 0xCD0, 0x00000000,
+ 0xCD4, 0x00000000,
+ 0xCD8, 0x00000000,
+ 0xCDC, 0x00000000,
+ 0xCE0, 0x00000000,
+ 0xCE4, 0x00000000,
+ 0xCE8, 0x00000000,
+ 0xCEC, 0x00000000,
+ 0xE00, 0x00000007,
+ 0xE04, 0x00000020,
+ 0xE08, 0x60403231,
+ 0xE0C, 0x00012345,
+ 0xE10, 0x00000100,
+ 0xE14, 0x01000000,
+ 0xE18, 0x00000000,
+ 0xE1C, 0x40040053,
+ 0xE20, 0x40020103,
+ 0xE24, 0x00000000,
+ 0xE28, 0x00000000,
+ 0xE2C, 0x00000000,
+ 0xE30, 0x00000000,
+ 0xE34, 0x00000000,
+ 0xE38, 0x00000000,
+ 0xE3C, 0x00000000,
+ 0xE40, 0x00000000,
+ 0xE44, 0x00000000,
+ 0xE48, 0x00000000,
+ 0xE4C, 0x00000000,
+ 0xE50, 0x00000020,
+ 0xE54, 0x00000000,
+ 0xE58, 0xD8120402,
+ 0xE5C, 0xDE000120,
+ 0xE68, 0x5979993F,
+ 0xE6C, 0x0000122A,
+ 0xE70, 0x99795979,
+ 0xE74, 0x99795979,
+ 0xE78, 0x99799979,
+ 0xE7C, 0x99791979,
+ 0xE80, 0x19791979,
+ 0xE84, 0x19791979,
+ 0xE88, 0x00000000,
+ 0xE8C, 0x07000000,
+ 0xE94, 0x01000100,
+ 0xE98, 0x201C8000,
+ 0xE9C, 0x00000000,
+ 0xEA0, 0x0000A555,
+ 0xEA4, 0x08040201,
+ 0xEA8, 0x80402010,
+ 0xEAC, 0x00000000,
+ 0xEB0, 0x77777777,
+ 0xEB4, 0x00007777,
+ 0xEB8, 0x00000000,
+ 0xEBC, 0x00000000,
+ 0xEC0, 0x00000000,
+ 0xEC4, 0x00000000,
+ 0xEC8, 0x00000000,
+ 0xECC, 0x00000000,
+ 0xED0, 0x00000000,
+ 0xED4, 0x00000000,
+ 0xED8, 0x00000000,
+ 0xEDC, 0x00000000,
+ 0xEE0, 0x00000000,
+ 0xEE4, 0x00000000,
+ 0xEE8, 0x00000000,
+ 0xEEC, 0x00000000,
+ 0x1900, 0x00000000,
+ 0x1904, 0x00238000,
+ 0x1908, 0x00000000,
+ 0x190C, 0x00000000,
+ 0x1910, 0x00000000,
+ 0x1914, 0x00000000,
+ 0x1918, 0x00000000,
+ 0x191C, 0x00000000,
+ 0x1920, 0x00000000,
+ 0x1924, 0x00000000,
+ 0x1928, 0x00000000,
+ 0x192C, 0x00000000,
+ 0x1930, 0x00000000,
+ 0x1934, 0x00000000,
+ 0x1938, 0x00000000,
+ 0x193C, 0x00000000,
+ 0x1940, 0x00000000,
+ 0x1944, 0x00000000,
+ 0x1948, 0x00000000,
+ 0x194C, 0x00000000,
+ 0x1950, 0x00000000,
+ 0x1954, 0x00000000,
+ 0x1958, 0x00000000,
+ 0x195C, 0x00000000,
+ 0x1960, 0x00000000,
+ 0x1964, 0x00000000,
+ 0x1968, 0x00000000,
+ 0x196C, 0x00000000,
+ 0x1970, 0x00000000,
+ 0x1974, 0x00000000,
+ 0x1978, 0x00000000,
+ 0x197C, 0x00000000,
+ 0x1980, 0x00000000,
+ 0x1984, 0x03000000,
+ 0x1988, 0x21401E88,
+ 0x198C, 0x00004000,
+ 0x1990, 0x00000000,
+ 0x1994, 0x00000000,
+ 0x1998, 0x00000053,
+ 0x199C, 0x00000000,
+ 0x19A0, 0x00000000,
+ 0x19A4, 0x00000000,
+ 0x19A8, 0x00000000,
+ 0x19AC, 0x0E47E47F,
+ 0x19B0, 0x00000000,
+ 0x19B4, 0x0E47E47F,
+ 0x19B8, 0x00000000,
+ 0x19BC, 0x00000000,
+ 0x19C0, 0x00000000,
+ 0x19C4, 0x00000000,
+ 0x19C8, 0x00000000,
+ 0x19CC, 0x00000000,
+ 0x19D0, 0x00000000,
+ 0x19D4, 0xAAAAAAAA,
+ 0x19D8, 0x00000AAA,
+ 0x19DC, 0x133E0F37,
+ 0x19E0, 0x00000000,
+ 0x19E4, 0x00000000,
+ 0x19E8, 0x00000000,
+ 0x19EC, 0x00000000,
+ 0x19F0, 0x00000000,
+ 0x19F4, 0x00000000,
+ 0x19F8, 0x01A00000,
+ 0x19FC, 0x00000000,
+ 0x1C00, 0x00000100,
+ 0x1C04, 0x01000000,
+ 0x1C08, 0x00000100,
+ 0x1C0C, 0x01000000,
+ 0x1C10, 0x00000100,
+ 0x1C14, 0x01000000,
+ 0x1C18, 0x00000100,
+ 0x1C1C, 0x01000000,
+ 0x1C20, 0x00000100,
+ 0x1C24, 0x01000000,
+ 0x1C28, 0x00000100,
+ 0x1C2C, 0x01000000,
+ 0x1C30, 0x00000100,
+ 0x1C34, 0x01000000,
+ 0x1C38, 0x00000000,
+ 0x1C3C, 0x00000000,
+ 0x1C40, 0x000C0100,
+ 0x1C44, 0x000000F3,
+ 0x1C48, 0x1A8249A8,
+ 0x1C4C, 0x1461C826,
+ 0x1C50, 0x0001469E,
+ 0x1C54, 0x58D158D1,
+ 0x1C58, 0x04490088,
+ 0x1C5C, 0x04004400,
+ 0x1C60, 0x00000000,
+ 0x1C64, 0x04004400,
+ 0x1C68, 0x00000100,
+ 0x1C6C, 0x01000000,
+ 0x1C70, 0x00000100,
+ 0x1C74, 0x01000000,
+ 0x1C78, 0x00000000,
+ 0x1C7C, 0x00000010,
+ 0x1C80, 0x5FFF5FFF,
+ 0x1C84, 0x5FFF5FFF,
+ 0x1C88, 0x5FFF5FFF,
+ 0x1C8C, 0x5FFF5FFF,
+ 0x1C90, 0x5FFF5FFF,
+ 0x1C94, 0x5FFF5FFF,
+ 0x1C98, 0x5FFF5FFF,
+ 0x1C9C, 0x5FFF5FFF,
+ 0x1CA0, 0x00000100,
+ 0x1CA4, 0x01000000,
+ 0x1CA8, 0x00000100,
+ 0x1CAC, 0x5FFF5FFF,
+ 0x1CB0, 0x00000100,
+ 0x1CB4, 0x01000000,
+ 0x1CB8, 0x00000000,
+ 0x1CBC, 0x00000000,
+ 0x1CC0, 0x00000100,
+ 0x1CC4, 0x01000000,
+ 0x1CC8, 0x00000100,
+ 0x1CCC, 0x01000000,
+ 0x1CD0, 0x00000100,
+ 0x1CD4, 0x01000000,
+ 0x1CD8, 0x00000100,
+ 0x1CDC, 0x01000000,
+ 0x1CE0, 0x00000100,
+ 0x1CE4, 0x01000000,
+ 0x1CE8, 0x00000100,
+ 0x1CEC, 0x01000000,
+ 0x1CF0, 0x00000100,
+ 0x1CF4, 0x01000000,
+ 0x1CF8, 0x00000000,
+ 0x1CFC, 0x00000000,
+ 0xC60, 0x70038040,
+ 0xC60, 0x70038040,
+ 0xC60, 0x70146040,
+ 0xC60, 0x70246040,
+ 0xC60, 0x70346040,
+ 0xC60, 0x70446040,
+ 0xC60, 0x70532040,
+ 0xC60, 0x70646040,
+ 0xC60, 0x70738040,
+ 0xC60, 0x70838040,
+ 0xC60, 0x70938040,
+ 0xC60, 0x70A38040,
+ 0xC60, 0x70B36040,
+ 0xC60, 0x70C06040,
+ 0xC60, 0x70D06040,
+ 0xC60, 0x70E76040,
+ 0xC60, 0x70F06040,
+ 0xE60, 0x70038040,
+ 0xE60, 0x70038040,
+ 0xE60, 0x70146040,
+ 0xE60, 0x70246040,
+ 0xE60, 0x70346040,
+ 0xE60, 0x70446040,
+ 0xE60, 0x70532040,
+ 0xE60, 0x70646040,
+ 0xE60, 0x70738040,
+ 0xE60, 0x70838040,
+ 0xE60, 0x70938040,
+ 0xE60, 0x70A38040,
+ 0xE60, 0x70B36040,
+ 0xE60, 0x70C06040,
+ 0xE60, 0x70D06040,
+ 0xE60, 0x70E76040,
+ 0xE60, 0x70F06040,
+ 0xC64, 0x00800000,
+ 0xC64, 0x08800001,
+ 0xC64, 0x00800002,
+ 0xC64, 0x00800003,
+ 0xC64, 0x00800004,
+ 0xC64, 0x00800005,
+ 0xC64, 0x00800006,
+ 0xC64, 0x08800007,
+ 0xC64, 0x00004000,
+ 0xE64, 0x00800000,
+ 0xE64, 0x08800001,
+ 0xE64, 0x00800002,
+ 0xE64, 0x00800003,
+ 0xE64, 0x00800004,
+ 0xE64, 0x00800005,
+ 0xE64, 0x00800006,
+ 0xE64, 0x08800007,
+ 0xE64, 0x00004000,
+ 0x1B00, 0xF8000008,
+ 0x1B00, 0xF80A7008,
+ 0x1B00, 0xF8015008,
+ 0x1B00, 0xF8000008,
+ 0x1B04, 0xE24629D2,
+ 0x1B08, 0x00000080,
+ 0x1B0C, 0x00000000,
+ 0x1B10, 0x00011C00,
+ 0x1B14, 0x00000000,
+ 0x1B18, 0x00292903,
+ 0x1B1C, 0xA2193C32,
+ 0x1B20, 0x01840008,
+ 0x1B24, 0x01860008,
+ 0x1B28, 0x80060300,
+ 0x1B2C, 0x00000003,
+ 0x1B30, 0x20000000,
+ 0x1B34, 0x00000800,
+ 0x1B3C, 0x20000000,
+ 0x1BC0, 0x01000000,
+ 0x1BCC, 0x00000000,
+ 0x1B00, 0xF800000A,
+ 0x1B1C, 0xA2193C32,
+ 0x1B20, 0x01840008,
+ 0x1B24, 0x01860008,
+ 0x1B28, 0x80060300,
+ 0x1B2C, 0x00000003,
+ 0x1B30, 0x20000000,
+ 0x1B34, 0x00000800,
+ 0x1B3C, 0x20000000,
+ 0x1BC0, 0x01000000,
+ 0x1BCC, 0x00000000,
+ 0x1B00, 0xF8000000,
+ 0x1B80, 0x00000007,
+ 0x1B80, 0x090A0005,
+ 0x1B80, 0x090A0007,
+ 0x1B80, 0x0FFE0015,
+ 0x1B80, 0x0FFE0017,
+ 0x1B80, 0x00220025,
+ 0x1B80, 0x00220027,
+ 0x1B80, 0x00040035,
+ 0x1B80, 0x00040037,
+ 0x1B80, 0x05C00045,
+ 0x1B80, 0x05C00047,
+ 0x1B80, 0x00070055,
+ 0x1B80, 0x00070057,
+ 0x1B80, 0x64000065,
+ 0x1B80, 0x64000067,
+ 0x1B80, 0x00020075,
+ 0x1B80, 0x00020077,
+ 0x1B80, 0x00080085,
+ 0x1B80, 0x00080087,
+ 0x1B80, 0x80000095,
+ 0x1B80, 0x80000097,
+ 0x1B80, 0x090800A5,
+ 0x1B80, 0x090800A7,
+ 0x1B80, 0x0F0200B5,
+ 0x1B80, 0x0F0200B7,
+ 0x1B80, 0x002200C5,
+ 0x1B80, 0x002200C7,
+ 0x1B80, 0x000400D5,
+ 0x1B80, 0x000400D7,
+ 0x1B80, 0x05C000E5,
+ 0x1B80, 0x05C000E7,
+ 0x1B80, 0x000700F5,
+ 0x1B80, 0x000700F7,
+ 0x1B80, 0x64020105,
+ 0x1B80, 0x64020107,
+ 0x1B80, 0x00020115,
+ 0x1B80, 0x00020117,
+ 0x1B80, 0x00040125,
+ 0x1B80, 0x00040127,
+ 0x1B80, 0x4A000135,
+ 0x1B80, 0x4A000137,
+ 0x1B80, 0x4B040145,
+ 0x1B80, 0x4B040147,
+ 0x1B80, 0x85030155,
+ 0x1B80, 0x85030157,
+ 0x1B80, 0x40090165,
+ 0x1B80, 0x40090167,
+ 0x1B80, 0xE0280175,
+ 0x1B80, 0xE0280177,
+ 0x1B80, 0x4B050185,
+ 0x1B80, 0x4B050187,
+ 0x1B80, 0x86030195,
+ 0x1B80, 0x86030197,
+ 0x1B80, 0x400B01A5,
+ 0x1B80, 0x400B01A7,
+ 0x1B80, 0xE02801B5,
+ 0x1B80, 0xE02801B7,
+ 0x1B80, 0x4B0001C5,
+ 0x1B80, 0x4B0001C7,
+ 0x1B80, 0x000701D5,
+ 0x1B80, 0x000701D7,
+ 0x1B80, 0x4C0001E5,
+ 0x1B80, 0x4C0001E7,
+ 0x1B80, 0x000401F5,
+ 0x1B80, 0x000401F7,
+ 0x1B80, 0x4D040205,
+ 0x1B80, 0x4D040207,
+ 0x1B80, 0x2EF00215,
+ 0x1B80, 0x2EF00217,
+ 0x1B80, 0x00000225,
+ 0x1B80, 0x00000227,
+ 0x1B80, 0x20810235,
+ 0x1B80, 0x20810237,
+ 0x1B80, 0x23450245,
+ 0x1B80, 0x23450247,
+ 0x1B80, 0x4D000255,
+ 0x1B80, 0x4D000257,
+ 0x1B80, 0x00040265,
+ 0x1B80, 0x00040267,
+ 0x1B80, 0x30000275,
+ 0x1B80, 0x30000277,
+ 0x1B80, 0xE1D80285,
+ 0x1B80, 0xE1D80287,
+ 0x1B80, 0xF0110295,
+ 0x1B80, 0xF0110297,
+ 0x1B80, 0xF11102A5,
+ 0x1B80, 0xF11102A7,
+ 0x1B80, 0xF21102B5,
+ 0x1B80, 0xF21102B7,
+ 0x1B80, 0xF31102C5,
+ 0x1B80, 0xF31102C7,
+ 0x1B80, 0xF41102D5,
+ 0x1B80, 0xF41102D7,
+ 0x1B80, 0xF51102E5,
+ 0x1B80, 0xF51102E7,
+ 0x1B80, 0xF61102F5,
+ 0x1B80, 0xF61102F7,
+ 0x1B80, 0xF7110305,
+ 0x1B80, 0xF7110307,
+ 0x1B80, 0xF8110315,
+ 0x1B80, 0xF8110317,
+ 0x1B80, 0xF9110325,
+ 0x1B80, 0xF9110327,
+ 0x1B80, 0xFA110335,
+ 0x1B80, 0xFA110337,
+ 0x1B80, 0xFB110345,
+ 0x1B80, 0xFB110347,
+ 0x1B80, 0xFC110355,
+ 0x1B80, 0xFC110357,
+ 0x1B80, 0xFD110365,
+ 0x1B80, 0xFD110367,
+ 0x1B80, 0xFE110375,
+ 0x1B80, 0xFE110377,
+ 0x1B80, 0xFF110385,
+ 0x1B80, 0xFF110387,
+ 0x1B80, 0x00010395,
+ 0x1B80, 0x00010397,
+ 0x1B80, 0x305103A5,
+ 0x1B80, 0x305103A7,
+ 0x1B80, 0x306903B5,
+ 0x1B80, 0x306903B7,
+ 0x1B80, 0x30B403C5,
+ 0x1B80, 0x30B403C7,
+ 0x1B80, 0x30B703D5,
+ 0x1B80, 0x30B703D7,
+ 0x1B80, 0x306B03E5,
+ 0x1B80, 0x306B03E7,
+ 0x1B80, 0x307603F5,
+ 0x1B80, 0x307603F7,
+ 0x1B80, 0x30810405,
+ 0x1B80, 0x30810407,
+ 0x1B80, 0x30C10415,
+ 0x1B80, 0x30C10417,
+ 0x1B80, 0x30BB0425,
+ 0x1B80, 0x30BB0427,
+ 0x1B80, 0x30CF0435,
+ 0x1B80, 0x30CF0437,
+ 0x1B80, 0x30DA0445,
+ 0x1B80, 0x30DA0447,
+ 0x1B80, 0x30E50455,
+ 0x1B80, 0x30E50457,
+ 0x1B80, 0x304A0465,
+ 0x1B80, 0x304A0467,
+ 0x1B80, 0x31140475,
+ 0x1B80, 0x31140477,
+ 0x1B80, 0x31250485,
+ 0x1B80, 0x31250487,
+ 0x1B80, 0x313A0495,
+ 0x1B80, 0x313A0497,
+ 0x1B80, 0x4D0404A5,
+ 0x1B80, 0x4D0404A7,
+ 0x1B80, 0x2EF004B5,
+ 0x1B80, 0x2EF004B7,
+ 0x1B80, 0x000004C5,
+ 0x1B80, 0x000004C7,
+ 0x1B80, 0x208104D5,
+ 0x1B80, 0x208104D7,
+ 0x1B80, 0xA3B504E5,
+ 0x1B80, 0xA3B504E7,
+ 0x1B80, 0x4D0004F5,
+ 0x1B80, 0x4D0004F7,
+ 0x1B80, 0x30000505,
+ 0x1B80, 0x30000507,
+ 0x1B80, 0xE1650515,
+ 0x1B80, 0xE1650517,
+ 0x1B80, 0x4D040525,
+ 0x1B80, 0x4D040527,
+ 0x1B80, 0x20800535,
+ 0x1B80, 0x20800537,
+ 0x1B80, 0x00000545,
+ 0x1B80, 0x00000547,
+ 0x1B80, 0x4D000555,
+ 0x1B80, 0x4D000557,
+ 0x1B80, 0x55070565,
+ 0x1B80, 0x55070567,
+ 0x1B80, 0xE15D0575,
+ 0x1B80, 0xE15D0577,
+ 0x1B80, 0xE15D0585,
+ 0x1B80, 0xE15D0587,
+ 0x1B80, 0x4D040595,
+ 0x1B80, 0x4D040597,
+ 0x1B80, 0x208805A5,
+ 0x1B80, 0x208805A7,
+ 0x1B80, 0x020005B5,
+ 0x1B80, 0x020005B7,
+ 0x1B80, 0x4D0005C5,
+ 0x1B80, 0x4D0005C7,
+ 0x1B80, 0x550F05D5,
+ 0x1B80, 0x550F05D7,
+ 0x1B80, 0xE15D05E5,
+ 0x1B80, 0xE15D05E7,
+ 0x1B80, 0x4F0205F5,
+ 0x1B80, 0x4F0205F7,
+ 0x1B80, 0x4E000605,
+ 0x1B80, 0x4E000607,
+ 0x1B80, 0x53020615,
+ 0x1B80, 0x53020617,
+ 0x1B80, 0x52010625,
+ 0x1B80, 0x52010627,
+ 0x1B80, 0xE1610635,
+ 0x1B80, 0xE1610637,
+ 0x1B80, 0x4D080645,
+ 0x1B80, 0x4D080647,
+ 0x1B80, 0x57100655,
+ 0x1B80, 0x57100657,
+ 0x1B80, 0x57000665,
+ 0x1B80, 0x57000667,
+ 0x1B80, 0x4D000675,
+ 0x1B80, 0x4D000677,
+ 0x1B80, 0x00010685,
+ 0x1B80, 0x00010687,
+ 0x1B80, 0xE1650695,
+ 0x1B80, 0xE1650697,
+ 0x1B80, 0x000106A5,
+ 0x1B80, 0x000106A7,
+ 0x1B80, 0x308B06B5,
+ 0x1B80, 0x308B06B7,
+ 0x1B80, 0x002306C5,
+ 0x1B80, 0x002306C7,
+ 0x1B80, 0xE1CB06D5,
+ 0x1B80, 0xE1CB06D7,
+ 0x1B80, 0x000206E5,
+ 0x1B80, 0x000206E7,
+ 0x1B80, 0x54E906F5,
+ 0x1B80, 0x54E906F7,
+ 0x1B80, 0x0BA60705,
+ 0x1B80, 0x0BA60707,
+ 0x1B80, 0x00230715,
+ 0x1B80, 0x00230717,
+ 0x1B80, 0xE1CB0725,
+ 0x1B80, 0xE1CB0727,
+ 0x1B80, 0x00020735,
+ 0x1B80, 0x00020737,
+ 0x1B80, 0x4D300745,
+ 0x1B80, 0x4D300747,
+ 0x1B80, 0x30A40755,
+ 0x1B80, 0x30A40757,
+ 0x1B80, 0x30870765,
+ 0x1B80, 0x30870767,
+ 0x1B80, 0x00220775,
+ 0x1B80, 0x00220777,
+ 0x1B80, 0xE1CB0785,
+ 0x1B80, 0xE1CB0787,
+ 0x1B80, 0x00020795,
+ 0x1B80, 0x00020797,
+ 0x1B80, 0x54E807A5,
+ 0x1B80, 0x54E807A7,
+ 0x1B80, 0x0BA607B5,
+ 0x1B80, 0x0BA607B7,
+ 0x1B80, 0x002207C5,
+ 0x1B80, 0x002207C7,
+ 0x1B80, 0xE1CB07D5,
+ 0x1B80, 0xE1CB07D7,
+ 0x1B80, 0x000207E5,
+ 0x1B80, 0x000207E7,
+ 0x1B80, 0x4D3007F5,
+ 0x1B80, 0x4D3007F7,
+ 0x1B80, 0x30A40805,
+ 0x1B80, 0x30A40807,
+ 0x1B80, 0x63F10815,
+ 0x1B80, 0x63F10817,
+ 0x1B80, 0xE1650825,
+ 0x1B80, 0xE1650827,
+ 0x1B80, 0xE1CB0835,
+ 0x1B80, 0xE1CB0837,
+ 0x1B80, 0x63F40845,
+ 0x1B80, 0x63F40847,
+ 0x1B80, 0xE1650855,
+ 0x1B80, 0xE1650857,
+ 0x1B80, 0xE1CB0865,
+ 0x1B80, 0xE1CB0867,
+ 0x1B80, 0x0BA80875,
+ 0x1B80, 0x0BA80877,
+ 0x1B80, 0x63F80885,
+ 0x1B80, 0x63F80887,
+ 0x1B80, 0xE1650895,
+ 0x1B80, 0xE1650897,
+ 0x1B80, 0xE1CB08A5,
+ 0x1B80, 0xE1CB08A7,
+ 0x1B80, 0x0BA908B5,
+ 0x1B80, 0x0BA908B7,
+ 0x1B80, 0x63FC08C5,
+ 0x1B80, 0x63FC08C7,
+ 0x1B80, 0xE16508D5,
+ 0x1B80, 0xE16508D7,
+ 0x1B80, 0xE1CB08E5,
+ 0x1B80, 0xE1CB08E7,
+ 0x1B80, 0x63FF08F5,
+ 0x1B80, 0x63FF08F7,
+ 0x1B80, 0xE1650905,
+ 0x1B80, 0xE1650907,
+ 0x1B80, 0xE1CB0915,
+ 0x1B80, 0xE1CB0917,
+ 0x1B80, 0x63000925,
+ 0x1B80, 0x63000927,
+ 0x1B80, 0xE1650935,
+ 0x1B80, 0xE1650937,
+ 0x1B80, 0xE1CB0945,
+ 0x1B80, 0xE1CB0947,
+ 0x1B80, 0x63030955,
+ 0x1B80, 0x63030957,
+ 0x1B80, 0xE1650965,
+ 0x1B80, 0xE1650967,
+ 0x1B80, 0xE1CB0975,
+ 0x1B80, 0xE1CB0977,
+ 0x1B80, 0xF4D40985,
+ 0x1B80, 0xF4D40987,
+ 0x1B80, 0x63070995,
+ 0x1B80, 0x63070997,
+ 0x1B80, 0xE16509A5,
+ 0x1B80, 0xE16509A7,
+ 0x1B80, 0xE1CB09B5,
+ 0x1B80, 0xE1CB09B7,
+ 0x1B80, 0xF5DB09C5,
+ 0x1B80, 0xF5DB09C7,
+ 0x1B80, 0x630B09D5,
+ 0x1B80, 0x630B09D7,
+ 0x1B80, 0xE16509E5,
+ 0x1B80, 0xE16509E7,
+ 0x1B80, 0xE1CB09F5,
+ 0x1B80, 0xE1CB09F7,
+ 0x1B80, 0x630E0A05,
+ 0x1B80, 0x630E0A07,
+ 0x1B80, 0xE1650A15,
+ 0x1B80, 0xE1650A17,
+ 0x1B80, 0xE1CB0A25,
+ 0x1B80, 0xE1CB0A27,
+ 0x1B80, 0x4D300A35,
+ 0x1B80, 0x4D300A37,
+ 0x1B80, 0x55010A45,
+ 0x1B80, 0x55010A47,
+ 0x1B80, 0x57040A55,
+ 0x1B80, 0x57040A57,
+ 0x1B80, 0x57000A65,
+ 0x1B80, 0x57000A67,
+ 0x1B80, 0x96000A75,
+ 0x1B80, 0x96000A77,
+ 0x1B80, 0x57080A85,
+ 0x1B80, 0x57080A87,
+ 0x1B80, 0x57000A95,
+ 0x1B80, 0x57000A97,
+ 0x1B80, 0x95000AA5,
+ 0x1B80, 0x95000AA7,
+ 0x1B80, 0x4D000AB5,
+ 0x1B80, 0x4D000AB7,
+ 0x1B80, 0x6C070AC5,
+ 0x1B80, 0x6C070AC7,
+ 0x1B80, 0x7B200AD5,
+ 0x1B80, 0x7B200AD7,
+ 0x1B80, 0x7A000AE5,
+ 0x1B80, 0x7A000AE7,
+ 0x1B80, 0x79000AF5,
+ 0x1B80, 0x79000AF7,
+ 0x1B80, 0x7F200B05,
+ 0x1B80, 0x7F200B07,
+ 0x1B80, 0x7E000B15,
+ 0x1B80, 0x7E000B17,
+ 0x1B80, 0x7D000B25,
+ 0x1B80, 0x7D000B27,
+ 0x1B80, 0x00010B35,
+ 0x1B80, 0x00010B37,
+ 0x1B80, 0x62850B45,
+ 0x1B80, 0x62850B47,
+ 0x1B80, 0xE1650B55,
+ 0x1B80, 0xE1650B57,
+ 0x1B80, 0x00010B65,
+ 0x1B80, 0x00010B67,
+ 0x1B80, 0x5C320B75,
+ 0x1B80, 0x5C320B77,
+ 0x1B80, 0xE1C70B85,
+ 0x1B80, 0xE1C70B87,
+ 0x1B80, 0xE1930B95,
+ 0x1B80, 0xE1930B97,
+ 0x1B80, 0x00010BA5,
+ 0x1B80, 0x00010BA7,
+ 0x1B80, 0x5C320BB5,
+ 0x1B80, 0x5C320BB7,
+ 0x1B80, 0x63F40BC5,
+ 0x1B80, 0x63F40BC7,
+ 0x1B80, 0x62850BD5,
+ 0x1B80, 0x62850BD7,
+ 0x1B80, 0x0BB00BE5,
+ 0x1B80, 0x0BB00BE7,
+ 0x1B80, 0xE1650BF5,
+ 0x1B80, 0xE1650BF7,
+ 0x1B80, 0xE1CB0C05,
+ 0x1B80, 0xE1CB0C07,
+ 0x1B80, 0x5C320C15,
+ 0x1B80, 0x5C320C17,
+ 0x1B80, 0x63FC0C25,
+ 0x1B80, 0x63FC0C27,
+ 0x1B80, 0x62850C35,
+ 0x1B80, 0x62850C37,
+ 0x1B80, 0x0BB10C45,
+ 0x1B80, 0x0BB10C47,
+ 0x1B80, 0xE1650C55,
+ 0x1B80, 0xE1650C57,
+ 0x1B80, 0xE1CB0C65,
+ 0x1B80, 0xE1CB0C67,
+ 0x1B80, 0x63030C75,
+ 0x1B80, 0x63030C77,
+ 0x1B80, 0xE1650C85,
+ 0x1B80, 0xE1650C87,
+ 0x1B80, 0xE1CB0C95,
+ 0x1B80, 0xE1CB0C97,
+ 0x1B80, 0xF7040CA5,
+ 0x1B80, 0xF7040CA7,
+ 0x1B80, 0x630B0CB5,
+ 0x1B80, 0x630B0CB7,
+ 0x1B80, 0xE1650CC5,
+ 0x1B80, 0xE1650CC7,
+ 0x1B80, 0xE1CB0CD5,
+ 0x1B80, 0xE1CB0CD7,
+ 0x1B80, 0x00010CE5,
+ 0x1B80, 0x00010CE7,
+ 0x1B80, 0x30F30CF5,
+ 0x1B80, 0x30F30CF7,
+ 0x1B80, 0x00230D05,
+ 0x1B80, 0x00230D07,
+ 0x1B80, 0xE1D00D15,
+ 0x1B80, 0xE1D00D17,
+ 0x1B80, 0x00020D25,
+ 0x1B80, 0x00020D27,
+ 0x1B80, 0x54E90D35,
+ 0x1B80, 0x54E90D37,
+ 0x1B80, 0x0BA60D45,
+ 0x1B80, 0x0BA60D47,
+ 0x1B80, 0x00230D55,
+ 0x1B80, 0x00230D57,
+ 0x1B80, 0xE1D00D65,
+ 0x1B80, 0xE1D00D67,
+ 0x1B80, 0x00020D75,
+ 0x1B80, 0x00020D77,
+ 0x1B80, 0x4D100D85,
+ 0x1B80, 0x4D100D87,
+ 0x1B80, 0x30A40D95,
+ 0x1B80, 0x30A40D97,
+ 0x1B80, 0x30ED0DA5,
+ 0x1B80, 0x30ED0DA7,
+ 0x1B80, 0x00220DB5,
+ 0x1B80, 0x00220DB7,
+ 0x1B80, 0xE1D00DC5,
+ 0x1B80, 0xE1D00DC7,
+ 0x1B80, 0x00020DD5,
+ 0x1B80, 0x00020DD7,
+ 0x1B80, 0x54E80DE5,
+ 0x1B80, 0x54E80DE7,
+ 0x1B80, 0x0BA60DF5,
+ 0x1B80, 0x0BA60DF7,
+ 0x1B80, 0x00220E05,
+ 0x1B80, 0x00220E07,
+ 0x1B80, 0xE1D00E15,
+ 0x1B80, 0xE1D00E17,
+ 0x1B80, 0x00020E25,
+ 0x1B80, 0x00020E27,
+ 0x1B80, 0x4D100E35,
+ 0x1B80, 0x4D100E37,
+ 0x1B80, 0x30A40E45,
+ 0x1B80, 0x30A40E47,
+ 0x1B80, 0x5C320E55,
+ 0x1B80, 0x5C320E57,
+ 0x1B80, 0x54F00E65,
+ 0x1B80, 0x54F00E67,
+ 0x1B80, 0x67F10E75,
+ 0x1B80, 0x67F10E77,
+ 0x1B80, 0xE1930E85,
+ 0x1B80, 0xE1930E87,
+ 0x1B80, 0xE1D00E95,
+ 0x1B80, 0xE1D00E97,
+ 0x1B80, 0x67F40EA5,
+ 0x1B80, 0x67F40EA7,
+ 0x1B80, 0xE1930EB5,
+ 0x1B80, 0xE1930EB7,
+ 0x1B80, 0xE1D00EC5,
+ 0x1B80, 0xE1D00EC7,
+ 0x1B80, 0x5C320ED5,
+ 0x1B80, 0x5C320ED7,
+ 0x1B80, 0x54F10EE5,
+ 0x1B80, 0x54F10EE7,
+ 0x1B80, 0x0BA80EF5,
+ 0x1B80, 0x0BA80EF7,
+ 0x1B80, 0x67F80F05,
+ 0x1B80, 0x67F80F07,
+ 0x1B80, 0xE1930F15,
+ 0x1B80, 0xE1930F17,
+ 0x1B80, 0xE1D00F25,
+ 0x1B80, 0xE1D00F27,
+ 0x1B80, 0x5C320F35,
+ 0x1B80, 0x5C320F37,
+ 0x1B80, 0x54F10F45,
+ 0x1B80, 0x54F10F47,
+ 0x1B80, 0x0BA90F55,
+ 0x1B80, 0x0BA90F57,
+ 0x1B80, 0x67FC0F65,
+ 0x1B80, 0x67FC0F67,
+ 0x1B80, 0xE1930F75,
+ 0x1B80, 0xE1930F77,
+ 0x1B80, 0xE1D00F85,
+ 0x1B80, 0xE1D00F87,
+ 0x1B80, 0x67FF0F95,
+ 0x1B80, 0x67FF0F97,
+ 0x1B80, 0xE1930FA5,
+ 0x1B80, 0xE1930FA7,
+ 0x1B80, 0xE1D00FB5,
+ 0x1B80, 0xE1D00FB7,
+ 0x1B80, 0x5C320FC5,
+ 0x1B80, 0x5C320FC7,
+ 0x1B80, 0x54F20FD5,
+ 0x1B80, 0x54F20FD7,
+ 0x1B80, 0x67000FE5,
+ 0x1B80, 0x67000FE7,
+ 0x1B80, 0xE1930FF5,
+ 0x1B80, 0xE1930FF7,
+ 0x1B80, 0xE1D01005,
+ 0x1B80, 0xE1D01007,
+ 0x1B80, 0x67031015,
+ 0x1B80, 0x67031017,
+ 0x1B80, 0xE1931025,
+ 0x1B80, 0xE1931027,
+ 0x1B80, 0xE1D01035,
+ 0x1B80, 0xE1D01037,
+ 0x1B80, 0xF9CC1045,
+ 0x1B80, 0xF9CC1047,
+ 0x1B80, 0x67071055,
+ 0x1B80, 0x67071057,
+ 0x1B80, 0xE1931065,
+ 0x1B80, 0xE1931067,
+ 0x1B80, 0xE1D01075,
+ 0x1B80, 0xE1D01077,
+ 0x1B80, 0xFAD31085,
+ 0x1B80, 0xFAD31087,
+ 0x1B80, 0x5C321095,
+ 0x1B80, 0x5C321097,
+ 0x1B80, 0x54F310A5,
+ 0x1B80, 0x54F310A7,
+ 0x1B80, 0x670B10B5,
+ 0x1B80, 0x670B10B7,
+ 0x1B80, 0xE19310C5,
+ 0x1B80, 0xE19310C7,
+ 0x1B80, 0xE1D010D5,
+ 0x1B80, 0xE1D010D7,
+ 0x1B80, 0x670E10E5,
+ 0x1B80, 0x670E10E7,
+ 0x1B80, 0xE19310F5,
+ 0x1B80, 0xE19310F7,
+ 0x1B80, 0xE1D01105,
+ 0x1B80, 0xE1D01107,
+ 0x1B80, 0x4D101115,
+ 0x1B80, 0x4D101117,
+ 0x1B80, 0x30A41125,
+ 0x1B80, 0x30A41127,
+ 0x1B80, 0x00011135,
+ 0x1B80, 0x00011137,
+ 0x1B80, 0x6C001145,
+ 0x1B80, 0x6C001147,
+ 0x1B80, 0x00061155,
+ 0x1B80, 0x00061157,
+ 0x1B80, 0x53001165,
+ 0x1B80, 0x53001167,
+ 0x1B80, 0x57F71175,
+ 0x1B80, 0x57F71177,
+ 0x1B80, 0x58211185,
+ 0x1B80, 0x58211187,
+ 0x1B80, 0x592E1195,
+ 0x1B80, 0x592E1197,
+ 0x1B80, 0x5A3811A5,
+ 0x1B80, 0x5A3811A7,
+ 0x1B80, 0x5B4111B5,
+ 0x1B80, 0x5B4111B7,
+ 0x1B80, 0x000711C5,
+ 0x1B80, 0x000711C7,
+ 0x1B80, 0x5C0011D5,
+ 0x1B80, 0x5C0011D7,
+ 0x1B80, 0x4B0011E5,
+ 0x1B80, 0x4B0011E7,
+ 0x1B80, 0x4E8F11F5,
+ 0x1B80, 0x4E8F11F7,
+ 0x1B80, 0x4F151205,
+ 0x1B80, 0x4F151207,
+ 0x1B80, 0x00041215,
+ 0x1B80, 0x00041217,
+ 0x1B80, 0xE1B51225,
+ 0x1B80, 0xE1B51227,
+ 0x1B80, 0xAB001235,
+ 0x1B80, 0xAB001237,
+ 0x1B80, 0x00011245,
+ 0x1B80, 0x00011247,
+ 0x1B80, 0x6C001255,
+ 0x1B80, 0x6C001257,
+ 0x1B80, 0x00061265,
+ 0x1B80, 0x00061267,
+ 0x1B80, 0x53001275,
+ 0x1B80, 0x53001277,
+ 0x1B80, 0x57F71285,
+ 0x1B80, 0x57F71287,
+ 0x1B80, 0x58211295,
+ 0x1B80, 0x58211297,
+ 0x1B80, 0x592E12A5,
+ 0x1B80, 0x592E12A7,
+ 0x1B80, 0x5A3812B5,
+ 0x1B80, 0x5A3812B7,
+ 0x1B80, 0x5B4112C5,
+ 0x1B80, 0x5B4112C7,
+ 0x1B80, 0x000712D5,
+ 0x1B80, 0x000712D7,
+ 0x1B80, 0x5C0012E5,
+ 0x1B80, 0x5C0012E7,
+ 0x1B80, 0x4B4012F5,
+ 0x1B80, 0x4B4012F7,
+ 0x1B80, 0x4E971305,
+ 0x1B80, 0x4E971307,
+ 0x1B80, 0x4F111315,
+ 0x1B80, 0x4F111317,
+ 0x1B80, 0x00041325,
+ 0x1B80, 0x00041327,
+ 0x1B80, 0xE1B51335,
+ 0x1B80, 0xE1B51337,
+ 0x1B80, 0xAB001345,
+ 0x1B80, 0xAB001347,
+ 0x1B80, 0x8B001355,
+ 0x1B80, 0x8B001357,
+ 0x1B80, 0xAB001365,
+ 0x1B80, 0xAB001367,
+ 0x1B80, 0x8A191375,
+ 0x1B80, 0x8A191377,
+ 0x1B80, 0x301D1385,
+ 0x1B80, 0x301D1387,
+ 0x1B80, 0x00011395,
+ 0x1B80, 0x00011397,
+ 0x1B80, 0x6C0113A5,
+ 0x1B80, 0x6C0113A7,
+ 0x1B80, 0x000613B5,
+ 0x1B80, 0x000613B7,
+ 0x1B80, 0x530113C5,
+ 0x1B80, 0x530113C7,
+ 0x1B80, 0x57F713D5,
+ 0x1B80, 0x57F713D7,
+ 0x1B80, 0x582113E5,
+ 0x1B80, 0x582113E7,
+ 0x1B80, 0x592E13F5,
+ 0x1B80, 0x592E13F7,
+ 0x1B80, 0x5A381405,
+ 0x1B80, 0x5A381407,
+ 0x1B80, 0x5B411415,
+ 0x1B80, 0x5B411417,
+ 0x1B80, 0x00071425,
+ 0x1B80, 0x00071427,
+ 0x1B80, 0x5C001435,
+ 0x1B80, 0x5C001437,
+ 0x1B80, 0x4B001445,
+ 0x1B80, 0x4B001447,
+ 0x1B80, 0x4E871455,
+ 0x1B80, 0x4E871457,
+ 0x1B80, 0x4F111465,
+ 0x1B80, 0x4F111467,
+ 0x1B80, 0x00041475,
+ 0x1B80, 0x00041477,
+ 0x1B80, 0xE1B51485,
+ 0x1B80, 0xE1B51487,
+ 0x1B80, 0xAB001495,
+ 0x1B80, 0xAB001497,
+ 0x1B80, 0x000614A5,
+ 0x1B80, 0x000614A7,
+ 0x1B80, 0x577714B5,
+ 0x1B80, 0x577714B7,
+ 0x1B80, 0x000714C5,
+ 0x1B80, 0x000714C7,
+ 0x1B80, 0x4E8614D5,
+ 0x1B80, 0x4E8614D7,
+ 0x1B80, 0x000414E5,
+ 0x1B80, 0x000414E7,
+ 0x1B80, 0x000114F5,
+ 0x1B80, 0x000114F7,
+ 0x1B80, 0x00011505,
+ 0x1B80, 0x00011507,
+ 0x1B80, 0x7B241515,
+ 0x1B80, 0x7B241517,
+ 0x1B80, 0x7A401525,
+ 0x1B80, 0x7A401527,
+ 0x1B80, 0x79001535,
+ 0x1B80, 0x79001537,
+ 0x1B80, 0x55031545,
+ 0x1B80, 0x55031547,
+ 0x1B80, 0x315D1555,
+ 0x1B80, 0x315D1557,
+ 0x1B80, 0x7B1C1565,
+ 0x1B80, 0x7B1C1567,
+ 0x1B80, 0x7A401575,
+ 0x1B80, 0x7A401577,
+ 0x1B80, 0x550B1585,
+ 0x1B80, 0x550B1587,
+ 0x1B80, 0x315D1595,
+ 0x1B80, 0x315D1597,
+ 0x1B80, 0x7B2015A5,
+ 0x1B80, 0x7B2015A7,
+ 0x1B80, 0x7A0015B5,
+ 0x1B80, 0x7A0015B7,
+ 0x1B80, 0x551315C5,
+ 0x1B80, 0x551315C7,
+ 0x1B80, 0x740115D5,
+ 0x1B80, 0x740115D7,
+ 0x1B80, 0x740015E5,
+ 0x1B80, 0x740015E7,
+ 0x1B80, 0x8E0015F5,
+ 0x1B80, 0x8E0015F7,
+ 0x1B80, 0x00011605,
+ 0x1B80, 0x00011607,
+ 0x1B80, 0x57021615,
+ 0x1B80, 0x57021617,
+ 0x1B80, 0x57001625,
+ 0x1B80, 0x57001627,
+ 0x1B80, 0x97001635,
+ 0x1B80, 0x97001637,
+ 0x1B80, 0x00011645,
+ 0x1B80, 0x00011647,
+ 0x1B80, 0x4F781655,
+ 0x1B80, 0x4F781657,
+ 0x1B80, 0x53881665,
+ 0x1B80, 0x53881667,
+ 0x1B80, 0xE1731675,
+ 0x1B80, 0xE1731677,
+ 0x1B80, 0x54801685,
+ 0x1B80, 0x54801687,
+ 0x1B80, 0x54001695,
+ 0x1B80, 0x54001697,
+ 0x1B80, 0xE17316A5,
+ 0x1B80, 0xE17316A7,
+ 0x1B80, 0x548116B5,
+ 0x1B80, 0x548116B7,
+ 0x1B80, 0x540016C5,
+ 0x1B80, 0x540016C7,
+ 0x1B80, 0xE17316D5,
+ 0x1B80, 0xE17316D7,
+ 0x1B80, 0x548216E5,
+ 0x1B80, 0x548216E7,
+ 0x1B80, 0x540016F5,
+ 0x1B80, 0x540016F7,
+ 0x1B80, 0xE17E1705,
+ 0x1B80, 0xE17E1707,
+ 0x1B80, 0xBF1D1715,
+ 0x1B80, 0xBF1D1717,
+ 0x1B80, 0x301D1725,
+ 0x1B80, 0x301D1727,
+ 0x1B80, 0xE1511735,
+ 0x1B80, 0xE1511737,
+ 0x1B80, 0xE1561745,
+ 0x1B80, 0xE1561747,
+ 0x1B80, 0xE15A1755,
+ 0x1B80, 0xE15A1757,
+ 0x1B80, 0xE1611765,
+ 0x1B80, 0xE1611767,
+ 0x1B80, 0xE1C71775,
+ 0x1B80, 0xE1C71777,
+ 0x1B80, 0x55131785,
+ 0x1B80, 0x55131787,
+ 0x1B80, 0xE15D1795,
+ 0x1B80, 0xE15D1797,
+ 0x1B80, 0x551517A5,
+ 0x1B80, 0x551517A7,
+ 0x1B80, 0xE16117B5,
+ 0x1B80, 0xE16117B7,
+ 0x1B80, 0xE1C717C5,
+ 0x1B80, 0xE1C717C7,
+ 0x1B80, 0x000117D5,
+ 0x1B80, 0x000117D7,
+ 0x1B80, 0x54BF17E5,
+ 0x1B80, 0x54BF17E7,
+ 0x1B80, 0x54C017F5,
+ 0x1B80, 0x54C017F7,
+ 0x1B80, 0x54A31805,
+ 0x1B80, 0x54A31807,
+ 0x1B80, 0x54C11815,
+ 0x1B80, 0x54C11817,
+ 0x1B80, 0x54A41825,
+ 0x1B80, 0x54A41827,
+ 0x1B80, 0x4C181835,
+ 0x1B80, 0x4C181837,
+ 0x1B80, 0xBF071845,
+ 0x1B80, 0xBF071847,
+ 0x1B80, 0x54C21855,
+ 0x1B80, 0x54C21857,
+ 0x1B80, 0x54A41865,
+ 0x1B80, 0x54A41867,
+ 0x1B80, 0xBF041875,
+ 0x1B80, 0xBF041877,
+ 0x1B80, 0x54C11885,
+ 0x1B80, 0x54C11887,
+ 0x1B80, 0x54A31895,
+ 0x1B80, 0x54A31897,
+ 0x1B80, 0xBF0118A5,
+ 0x1B80, 0xBF0118A7,
+ 0x1B80, 0xE1D518B5,
+ 0x1B80, 0xE1D518B7,
+ 0x1B80, 0x54DF18C5,
+ 0x1B80, 0x54DF18C7,
+ 0x1B80, 0x000118D5,
+ 0x1B80, 0x000118D7,
+ 0x1B80, 0x54BF18E5,
+ 0x1B80, 0x54BF18E7,
+ 0x1B80, 0x54E518F5,
+ 0x1B80, 0x54E518F7,
+ 0x1B80, 0x050A1905,
+ 0x1B80, 0x050A1907,
+ 0x1B80, 0x54DF1915,
+ 0x1B80, 0x54DF1917,
+ 0x1B80, 0x00011925,
+ 0x1B80, 0x00011927,
+ 0x1B80, 0x7F201935,
+ 0x1B80, 0x7F201937,
+ 0x1B80, 0x7E001945,
+ 0x1B80, 0x7E001947,
+ 0x1B80, 0x7D001955,
+ 0x1B80, 0x7D001957,
+ 0x1B80, 0x55011965,
+ 0x1B80, 0x55011967,
+ 0x1B80, 0x5C311975,
+ 0x1B80, 0x5C311977,
+ 0x1B80, 0xE15D1985,
+ 0x1B80, 0xE15D1987,
+ 0x1B80, 0xE1611995,
+ 0x1B80, 0xE1611997,
+ 0x1B80, 0x548019A5,
+ 0x1B80, 0x548019A7,
+ 0x1B80, 0x540019B5,
+ 0x1B80, 0x540019B7,
+ 0x1B80, 0xE15D19C5,
+ 0x1B80, 0xE15D19C7,
+ 0x1B80, 0xE16119D5,
+ 0x1B80, 0xE16119D7,
+ 0x1B80, 0x548119E5,
+ 0x1B80, 0x548119E7,
+ 0x1B80, 0x540019F5,
+ 0x1B80, 0x540019F7,
+ 0x1B80, 0xE15D1A05,
+ 0x1B80, 0xE15D1A07,
+ 0x1B80, 0xE1611A15,
+ 0x1B80, 0xE1611A17,
+ 0x1B80, 0x54821A25,
+ 0x1B80, 0x54821A27,
+ 0x1B80, 0x54001A35,
+ 0x1B80, 0x54001A37,
+ 0x1B80, 0xE17E1A45,
+ 0x1B80, 0xE17E1A47,
+ 0x1B80, 0xBFE91A55,
+ 0x1B80, 0xBFE91A57,
+ 0x1B80, 0x301D1A65,
+ 0x1B80, 0x301D1A67,
+ 0x1B80, 0x00231A75,
+ 0x1B80, 0x00231A77,
+ 0x1B80, 0x7B201A85,
+ 0x1B80, 0x7B201A87,
+ 0x1B80, 0x7A001A95,
+ 0x1B80, 0x7A001A97,
+ 0x1B80, 0x79001AA5,
+ 0x1B80, 0x79001AA7,
+ 0x1B80, 0xE1CB1AB5,
+ 0x1B80, 0xE1CB1AB7,
+ 0x1B80, 0x00021AC5,
+ 0x1B80, 0x00021AC7,
+ 0x1B80, 0x00011AD5,
+ 0x1B80, 0x00011AD7,
+ 0x1B80, 0x00221AE5,
+ 0x1B80, 0x00221AE7,
+ 0x1B80, 0x7B201AF5,
+ 0x1B80, 0x7B201AF7,
+ 0x1B80, 0x7A001B05,
+ 0x1B80, 0x7A001B07,
+ 0x1B80, 0x79001B15,
+ 0x1B80, 0x79001B17,
+ 0x1B80, 0xE1CB1B25,
+ 0x1B80, 0xE1CB1B27,
+ 0x1B80, 0x00021B35,
+ 0x1B80, 0x00021B37,
+ 0x1B80, 0x00011B45,
+ 0x1B80, 0x00011B47,
+ 0x1B80, 0x74021B55,
+ 0x1B80, 0x74021B57,
+ 0x1B80, 0x003F1B65,
+ 0x1B80, 0x003F1B67,
+ 0x1B80, 0x74001B75,
+ 0x1B80, 0x74001B77,
+ 0x1B80, 0x00021B85,
+ 0x1B80, 0x00021B87,
+ 0x1B80, 0x00011B95,
+ 0x1B80, 0x00011B97,
+ 0x1B80, 0x4D041BA5,
+ 0x1B80, 0x4D041BA7,
+ 0x1B80, 0x2EF81BB5,
+ 0x1B80, 0x2EF81BB7,
+ 0x1B80, 0x00001BC5,
+ 0x1B80, 0x00001BC7,
+ 0x1B80, 0x23301BD5,
+ 0x1B80, 0x23301BD7,
+ 0x1B80, 0x00241BE5,
+ 0x1B80, 0x00241BE7,
+ 0x1B80, 0x23E01BF5,
+ 0x1B80, 0x23E01BF7,
+ 0x1B80, 0x003F1C05,
+ 0x1B80, 0x003F1C07,
+ 0x1B80, 0x23FC1C15,
+ 0x1B80, 0x23FC1C17,
+ 0x1B80, 0xBFCE1C25,
+ 0x1B80, 0xBFCE1C27,
+ 0x1B80, 0x2EF01C35,
+ 0x1B80, 0x2EF01C37,
+ 0x1B80, 0x00001C45,
+ 0x1B80, 0x00001C47,
+ 0x1B80, 0x4D001C55,
+ 0x1B80, 0x4D001C57,
+ 0x1B80, 0x00011C65,
+ 0x1B80, 0x00011C67,
+ 0x1B80, 0x549F1C75,
+ 0x1B80, 0x549F1C77,
+ 0x1B80, 0x54FF1C85,
+ 0x1B80, 0x54FF1C87,
+ 0x1B80, 0x54001C95,
+ 0x1B80, 0x54001C97,
+ 0x1B80, 0x00011CA5,
+ 0x1B80, 0x00011CA7,
+ 0x1B80, 0x5C311CB5,
+ 0x1B80, 0x5C311CB7,
+ 0x1B80, 0x07141CC5,
+ 0x1B80, 0x07141CC7,
+ 0x1B80, 0x54001CD5,
+ 0x1B80, 0x54001CD7,
+ 0x1B80, 0x5C321CE5,
+ 0x1B80, 0x5C321CE7,
+ 0x1B80, 0x00011CF5,
+ 0x1B80, 0x00011CF7,
+ 0x1B80, 0x5C321D05,
+ 0x1B80, 0x5C321D07,
+ 0x1B80, 0x07141D15,
+ 0x1B80, 0x07141D17,
+ 0x1B80, 0x54001D25,
+ 0x1B80, 0x54001D27,
+ 0x1B80, 0x5C311D35,
+ 0x1B80, 0x5C311D37,
+ 0x1B80, 0x00011D45,
+ 0x1B80, 0x00011D47,
+ 0x1B80, 0x4C981D55,
+ 0x1B80, 0x4C981D57,
+ 0x1B80, 0x4C181D65,
+ 0x1B80, 0x4C181D67,
+ 0x1B80, 0x00011D75,
+ 0x1B80, 0x00011D77,
+ 0x1B80, 0x5C321D85,
+ 0x1B80, 0x5C321D87,
+ 0x1B80, 0x62841D95,
+ 0x1B80, 0x62841D97,
+ 0x1B80, 0x66861DA5,
+ 0x1B80, 0x66861DA7,
+ 0x1B80, 0x6C031DB5,
+ 0x1B80, 0x6C031DB7,
+ 0x1B80, 0x7B201DC5,
+ 0x1B80, 0x7B201DC7,
+ 0x1B80, 0x7A001DD5,
+ 0x1B80, 0x7A001DD7,
+ 0x1B80, 0x79001DE5,
+ 0x1B80, 0x79001DE7,
+ 0x1B80, 0x7F201DF5,
+ 0x1B80, 0x7F201DF7,
+ 0x1B80, 0x7E001E05,
+ 0x1B80, 0x7E001E07,
+ 0x1B80, 0x7D001E15,
+ 0x1B80, 0x7D001E17,
+ 0x1B80, 0x09011E25,
+ 0x1B80, 0x09011E27,
+ 0x1B80, 0x0C011E35,
+ 0x1B80, 0x0C011E37,
+ 0x1B80, 0x0BA61E45,
+ 0x1B80, 0x0BA61E47,
+ 0x1B80, 0x00011E55,
+ 0x1B80, 0x00011E57,
+ 0x1B80, 0x00000006,
+ 0x1B80, 0x00000002,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8822b_bb, rtw_phy_cfg_bb);
+
+static const u32 rtw8822b_bb_pg_type2[] = {
+ 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638,
+ 0, 0, 0, 0x00000c24, 0xffffffff, 0x36384042,
+ 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234,
+ 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363840,
+ 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032,
+ 0, 0, 1, 0x00000c34, 0xffffffff, 0x34363840,
+ 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032,
+ 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34363840,
+ 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032,
+ 0, 0, 0, 0x00000c44, 0xffffffff, 0x38402224,
+ 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323436,
+ 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628,
+ 0, 1, 0, 0x00000e20, 0xffffffff, 0x32343638,
+ 0, 1, 0, 0x00000e24, 0xffffffff, 0x36384042,
+ 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303234,
+ 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34363840,
+ 0, 1, 0, 0x00000e30, 0xffffffff, 0x26283032,
+ 0, 1, 1, 0x00000e34, 0xffffffff, 0x34363840,
+ 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032,
+ 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34363840,
+ 0, 1, 0, 0x00000e40, 0xffffffff, 0x26283032,
+ 0, 1, 0, 0x00000e44, 0xffffffff, 0x38402224,
+ 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323436,
+ 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628,
+ 1, 0, 0, 0x00000c24, 0xffffffff, 0x40424446,
+ 1, 0, 0, 0x00000c28, 0xffffffff, 0x32343638,
+ 1, 0, 0, 0x00000c2c, 0xffffffff, 0x38404244,
+ 1, 0, 0, 0x00000c30, 0xffffffff, 0x30323436,
+ 1, 0, 1, 0x00000c34, 0xffffffff, 0x38404244,
+ 1, 0, 1, 0x00000c38, 0xffffffff, 0x30323436,
+ 1, 0, 0, 0x00000c3c, 0xffffffff, 0x38404244,
+ 1, 0, 0, 0x00000c40, 0xffffffff, 0x30323436,
+ 1, 0, 0, 0x00000c44, 0xffffffff, 0x42442628,
+ 1, 0, 1, 0x00000c48, 0xffffffff, 0x34363840,
+ 1, 0, 1, 0x00000c4c, 0xffffffff, 0x26283032,
+ 1, 1, 0, 0x00000e24, 0xffffffff, 0x40424446,
+ 1, 1, 0, 0x00000e28, 0xffffffff, 0x32343638,
+ 1, 1, 0, 0x00000e2c, 0xffffffff, 0x38404244,
+ 1, 1, 0, 0x00000e30, 0xffffffff, 0x30323436,
+ 1, 1, 1, 0x00000e34, 0xffffffff, 0x38404244,
+ 1, 1, 1, 0x00000e38, 0xffffffff, 0x30323436,
+ 1, 1, 0, 0x00000e3c, 0xffffffff, 0x38404244,
+ 1, 1, 0, 0x00000e40, 0xffffffff, 0x30323436,
+ 1, 1, 0, 0x00000e44, 0xffffffff, 0x42442628,
+ 1, 1, 1, 0x00000e48, 0xffffffff, 0x34363840,
+ 1, 1, 1, 0x00000e4c, 0xffffffff, 0x26283032
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8822b_bb_pg_type2);
+
+static const u32 rtw8822b_bb_pg_type5[] = {
+ 0, 0, 0, 0x00000c20, 0xffffffff, 0x32343638,
+ 0, 0, 0, 0x00000c24, 0xffffffff, 0x36384042,
+ 0, 0, 0, 0x00000c28, 0xffffffff, 0x28303234,
+ 0, 0, 0, 0x00000c2c, 0xffffffff, 0x34363840,
+ 0, 0, 0, 0x00000c30, 0xffffffff, 0x26283032,
+ 0, 0, 1, 0x00000c34, 0xffffffff, 0x34363840,
+ 0, 0, 1, 0x00000c38, 0xffffffff, 0x26283032,
+ 0, 0, 0, 0x00000c3c, 0xffffffff, 0x34363840,
+ 0, 0, 0, 0x00000c40, 0xffffffff, 0x26283032,
+ 0, 0, 0, 0x00000c44, 0xffffffff, 0x38402224,
+ 0, 0, 1, 0x00000c48, 0xffffffff, 0x30323436,
+ 0, 0, 1, 0x00000c4c, 0xffffffff, 0x22242628,
+ 0, 1, 0, 0x00000e20, 0xffffffff, 0x32343638,
+ 0, 1, 0, 0x00000e24, 0xffffffff, 0x36384042,
+ 0, 1, 0, 0x00000e28, 0xffffffff, 0x28303234,
+ 0, 1, 0, 0x00000e2c, 0xffffffff, 0x34363840,
+ 0, 1, 0, 0x00000e30, 0xffffffff, 0x26283032,
+ 0, 1, 1, 0x00000e34, 0xffffffff, 0x34363840,
+ 0, 1, 1, 0x00000e38, 0xffffffff, 0x26283032,
+ 0, 1, 0, 0x00000e3c, 0xffffffff, 0x34363840,
+ 0, 1, 0, 0x00000e40, 0xffffffff, 0x26283032,
+ 0, 1, 0, 0x00000e44, 0xffffffff, 0x38402224,
+ 0, 1, 1, 0x00000e48, 0xffffffff, 0x30323436,
+ 0, 1, 1, 0x00000e4c, 0xffffffff, 0x22242628,
+ 1, 0, 0, 0x00000c24, 0xffffffff, 0x34363840,
+ 1, 0, 0, 0x00000c28, 0xffffffff, 0x26283032,
+ 1, 0, 0, 0x00000c2c, 0xffffffff, 0x32343638,
+ 1, 0, 0, 0x00000c30, 0xffffffff, 0x24262830,
+ 1, 0, 1, 0x00000c34, 0xffffffff, 0x32343638,
+ 1, 0, 1, 0x00000c38, 0xffffffff, 0x24262830,
+ 1, 0, 0, 0x00000c3c, 0xffffffff, 0x32343638,
+ 1, 0, 0, 0x00000c40, 0xffffffff, 0x24262830,
+ 1, 0, 0, 0x00000c44, 0xffffffff, 0x36382022,
+ 1, 0, 1, 0x00000c48, 0xffffffff, 0x28303234,
+ 1, 0, 1, 0x00000c4c, 0xffffffff, 0x20222426,
+ 1, 1, 0, 0x00000e24, 0xffffffff, 0x34363840,
+ 1, 1, 0, 0x00000e28, 0xffffffff, 0x26283032,
+ 1, 1, 0, 0x00000e2c, 0xffffffff, 0x32343638,
+ 1, 1, 0, 0x00000e30, 0xffffffff, 0x24262830,
+ 1, 1, 1, 0x00000e34, 0xffffffff, 0x32343638,
+ 1, 1, 1, 0x00000e38, 0xffffffff, 0x24262830,
+ 1, 1, 0, 0x00000e3c, 0xffffffff, 0x32343638,
+ 1, 1, 0, 0x00000e40, 0xffffffff, 0x24262830,
+ 1, 1, 0, 0x00000e44, 0xffffffff, 0x36382022,
+ 1, 1, 1, 0x00000e48, 0xffffffff, 0x28303234,
+ 1, 1, 1, 0x00000e4c, 0xffffffff, 0x20222426
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8822b_bb_pg_type5);
+
+static const u32 rtw8822b_rf_a[] = {
+ 0x000, 0x00030000,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0xA0000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x00010D24,
+ 0x0EF, 0x00080000,
+ 0x033, 0x00000002,
+ 0x03E, 0x0000003F,
+ 0x8300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000D0F4E,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x000C0F4E,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00000034,
+ 0x03F, 0x0004080E,
+ 0x0EF, 0x00080000,
+ 0x0DF, 0x00002449,
+ 0x033, 0x00000024,
+ 0x03E, 0x0000003F,
+ 0x03F, 0x00060FDE,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00080000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000037,
+ 0x03F, 0x0007EFCE,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00080000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000037,
+ 0x03F, 0x000DEFCE,
+ 0x0EF, 0x00000000,
+ 0x07F, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FB0F8,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0xA0000000, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0xB0000000, 0x00000000,
+ 0x0B1, 0x0007DBE4,
+ 0x0B2, 0x000225D1,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C330,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0003C360,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0xA0000000, 0x00000000,
+ 0x0B3, 0x000FC760,
+ 0xB0000000, 0x00000000,
+ 0x0B4, 0x00099DD0,
+ 0x0B5, 0x000400FC,
+ 0x0B6, 0x000187F0,
+ 0x0B7, 0x00030018,
+ 0x0B8, 0x00080800,
+ 0x0B9, 0x00000000,
+ 0x0BA, 0x00008000,
+ 0x0BB, 0x00000000,
+ 0x0BC, 0x00040030,
+ 0x0BD, 0x00000000,
+ 0x0BE, 0x00000000,
+ 0x0BF, 0x00000000,
+ 0x0C0, 0x00000000,
+ 0x0C1, 0x00000000,
+ 0x0C2, 0x00000000,
+ 0x0C3, 0x00000000,
+ 0x0C4, 0x00002402,
+ 0x0C5, 0x00000009,
+ 0x0C6, 0x00040299,
+ 0x0C7, 0x00055555,
+ 0x0C8, 0x0000C16C,
+ 0x0C9, 0x0001C146,
+ 0x0CA, 0x00000000,
+ 0x0CB, 0x00000000,
+ 0x0CC, 0x00000000,
+ 0x0CD, 0x00000000,
+ 0x0CE, 0x00090C00,
+ 0x0CF, 0x0006D200,
+ 0x0DF, 0x00000009,
+ 0x018, 0x00010524,
+ 0x089, 0x00000207,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FE186,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FE186,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FE186,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0xA0000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0xB0000000, 0x00000000,
+ 0x08B, 0x00061E3C,
+ 0x08C, 0x000112C7,
+ 0x08D, 0x000F4988,
+ 0x08E, 0x00064D40,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000007,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0xB0000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000DFF86,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000DFF86,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000006,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0xB0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000005,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000040C8,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000040C8,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000040C8,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000040C8,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000040C8,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000040C8,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004084,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000040C8,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x000040C8,
+ 0xB0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000004,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004190,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004190,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004190,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004190,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004190,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004190,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004108,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004190,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x00004190,
+ 0xB0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000003,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004998,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004998,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004998,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004998,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004998,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004998,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x0000490C,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004998,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x00004998,
+ 0xB0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000002,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005840,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005840,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005840,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005840,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005840,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005840,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005E00,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005840,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x00005840,
+ 0xB0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000001,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000058C2,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000058C2,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000058C2,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000058C2,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000058C2,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000058C2,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005862,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000058C2,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x000058C2,
+ 0xB0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000000,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005930,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005930,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005930,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005930,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005930,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005930,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005948,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00005930,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x00005930,
+ 0xB0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x033, 0x0000000F,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0xB0000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000DFF86,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000DFF86,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00004080,
+ 0x03F, 0x000C3186,
+ 0x033, 0x0000000D,
+ 0x03E, 0x000040C8,
+ 0x03F, 0x000C3186,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00004190,
+ 0x03F, 0x000C3186,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00004998,
+ 0x03F, 0x000C3186,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00005840,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000009,
+ 0x03E, 0x000058C2,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000008,
+ 0x03E, 0x00005930,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000017,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0xB0000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000DFF86,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C0006,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00004080,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000015,
+ 0x03E, 0x000040C8,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000014,
+ 0x03E, 0x00004190,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000013,
+ 0x03E, 0x00004998,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000012,
+ 0x03E, 0x00005840,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000011,
+ 0x03E, 0x000058C2,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000010,
+ 0x03E, 0x00005930,
+ 0x03F, 0x000C3186,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00004000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000001,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000005,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000006,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000005,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000005,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000005,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000005,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00000005,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x018, 0x00000401,
+ 0x084, 0x00001209,
+ 0x086, 0x000001A0,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0xA0000000, 0x00000000,
+ 0x087, 0x000E8180,
+ 0xB0000000, 0x00000000,
+ 0x088, 0x00070020,
+ 0x0DE, 0x00000010,
+ 0x0EF, 0x00008000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x0000003C,
+ 0x033, 0x0000000E,
+ 0x03F, 0x00000038,
+ 0x033, 0x0000000D,
+ 0x03F, 0x00000030,
+ 0x033, 0x0000000C,
+ 0x03F, 0x00000028,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000020,
+ 0x033, 0x0000000A,
+ 0x03F, 0x00000018,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000010,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000007,
+ 0x03F, 0x0000003C,
+ 0x033, 0x00000006,
+ 0x03F, 0x00000038,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000028,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000020,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000018,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000010,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000008,
+ 0x0EF, 0x00000000,
+ 0x0B8, 0x00080A00,
+ 0x0FE, 0x00000000,
+ 0x0B0, 0x000FF0FA,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0CA, 0x00080000,
+ 0x0FE, 0x00000000,
+ 0x0C9, 0x0001C141,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0B0, 0x000FF0F8,
+ 0x018, 0x00018D24,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0x018, 0x00010D24,
+ 0x01B, 0x00075A40,
+ 0x0EE, 0x00000002,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000006,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000000,
+ 0x0EE, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D3D1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000062,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D301,
+ 0x062, 0x0000D303,
+ 0x063, 0x00000002,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000062,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D301,
+ 0x062, 0x0000D303,
+ 0x063, 0x00000002,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D301,
+ 0x062, 0x0000D303,
+ 0x063, 0x00000002,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D3D1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D3D1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D301,
+ 0x062, 0x0000D303,
+ 0x063, 0x00000002,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D3D1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D3D1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D301,
+ 0x062, 0x0000D303,
+ 0x063, 0x00000002,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D301,
+ 0x062, 0x0000D303,
+ 0x063, 0x00000002,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D301,
+ 0x062, 0x0000D303,
+ 0x063, 0x00000002,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000062,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000062,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D301,
+ 0x062, 0x0000D303,
+ 0x063, 0x00000002,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000062,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D301,
+ 0x062, 0x0000D303,
+ 0x063, 0x00000002,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0xA0000000, 0x00000000,
+ 0x061, 0x0005D3D0,
+ 0x062, 0x0000D303,
+ 0x063, 0x00000002,
+ 0xB0000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A3,
+ 0x030, 0x000093A3,
+ 0x030, 0x0000A3A3,
+ 0x030, 0x0000B3A3,
+ 0x0EF, 0x00000000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000004A3,
+ 0x030, 0x000014A3,
+ 0x030, 0x000024A3,
+ 0x030, 0x000034A3,
+ 0x030, 0x000044A3,
+ 0x030, 0x000054A3,
+ 0x030, 0x000064A3,
+ 0x030, 0x000074A3,
+ 0x030, 0x000084A3,
+ 0x030, 0x000094A3,
+ 0x030, 0x0000A4A3,
+ 0x030, 0x0000B4A3,
+ 0x0EF, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000002A6,
+ 0x030, 0x000012A6,
+ 0x030, 0x000022A6,
+ 0x030, 0x000032A6,
+ 0x030, 0x000042A6,
+ 0x030, 0x000052A6,
+ 0x030, 0x000062A6,
+ 0x030, 0x000072A6,
+ 0x030, 0x000082A6,
+ 0x030, 0x000092A6,
+ 0x030, 0x0000A2A6,
+ 0x030, 0x0000B2A6,
+ 0x0EF, 0x00000000,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x00000303,
+ 0x030, 0x00001303,
+ 0x030, 0x00002303,
+ 0x030, 0x00003303,
+ 0x030, 0x000043A4,
+ 0x030, 0x000053A4,
+ 0x030, 0x000063A4,
+ 0x030, 0x000073A4,
+ 0x030, 0x00008365,
+ 0x030, 0x00009365,
+ 0x030, 0x0000A365,
+ 0x030, 0x0000B365,
+ 0x0EF, 0x00000000,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000002A6,
+ 0x030, 0x000012A6,
+ 0x030, 0x000022A6,
+ 0x030, 0x000032A6,
+ 0x030, 0x000042A6,
+ 0x030, 0x000052A6,
+ 0x030, 0x000062A6,
+ 0x030, 0x000072A6,
+ 0x030, 0x000082A6,
+ 0x030, 0x000092A6,
+ 0x030, 0x0000A2A6,
+ 0x030, 0x0000B2A6,
+ 0x0EF, 0x00000000,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000003A3,
+ 0x030, 0x000013A3,
+ 0x030, 0x000023A3,
+ 0x030, 0x000033A3,
+ 0x030, 0x00004355,
+ 0x030, 0x00005355,
+ 0x030, 0x00006355,
+ 0x030, 0x00007355,
+ 0x030, 0x00008315,
+ 0x030, 0x00009315,
+ 0x030, 0x0000A315,
+ 0x030, 0x0000B315,
+ 0x0EF, 0x00000000,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000004A3,
+ 0x030, 0x000014A3,
+ 0x030, 0x000024A3,
+ 0x030, 0x000034A3,
+ 0x030, 0x000044A3,
+ 0x030, 0x000054A3,
+ 0x030, 0x000064A3,
+ 0x030, 0x000074A3,
+ 0x030, 0x000084A3,
+ 0x030, 0x000094A3,
+ 0x030, 0x0000A4A3,
+ 0x030, 0x0000B4A3,
+ 0x0EF, 0x00000000,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000004A3,
+ 0x030, 0x000014A3,
+ 0x030, 0x000024A3,
+ 0x030, 0x000034A3,
+ 0x030, 0x000044A3,
+ 0x030, 0x000054A3,
+ 0x030, 0x000064A3,
+ 0x030, 0x000074A3,
+ 0x030, 0x000084A3,
+ 0x030, 0x000094A3,
+ 0x030, 0x0000A4A3,
+ 0x030, 0x0000B4A3,
+ 0x0EF, 0x00000000,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x00000384,
+ 0x030, 0x00001384,
+ 0x030, 0x00002384,
+ 0x030, 0x00003384,
+ 0x030, 0x00004425,
+ 0x030, 0x00005425,
+ 0x030, 0x00006425,
+ 0x030, 0x00007425,
+ 0x030, 0x000084A6,
+ 0x030, 0x000094A6,
+ 0x030, 0x0000A4A6,
+ 0x030, 0x0000B4A6,
+ 0x0EF, 0x00000000,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x00000463,
+ 0x030, 0x00001463,
+ 0x030, 0x00002463,
+ 0x030, 0x00003463,
+ 0x030, 0x00004545,
+ 0x030, 0x00005545,
+ 0x030, 0x00006545,
+ 0x030, 0x00007545,
+ 0x030, 0x00008565,
+ 0x030, 0x00009565,
+ 0x030, 0x0000A565,
+ 0x030, 0x0000B565,
+ 0x0EF, 0x00000000,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A3,
+ 0x030, 0x000093A3,
+ 0x030, 0x0000A3A3,
+ 0x030, 0x0000B3A3,
+ 0x0EF, 0x00000000,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000004A3,
+ 0x030, 0x000014A3,
+ 0x030, 0x000024A3,
+ 0x030, 0x000034A3,
+ 0x030, 0x000044A3,
+ 0x030, 0x000054A3,
+ 0x030, 0x000064A3,
+ 0x030, 0x000074A3,
+ 0x030, 0x000084A3,
+ 0x030, 0x000094A3,
+ 0x030, 0x0000A4A3,
+ 0x030, 0x0000B4A3,
+ 0x0EF, 0x00000000,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x00000443,
+ 0x030, 0x00001443,
+ 0x030, 0x00002443,
+ 0x030, 0x00003443,
+ 0x030, 0x000043A4,
+ 0x030, 0x000053A4,
+ 0x030, 0x000063A4,
+ 0x030, 0x000073A4,
+ 0x030, 0x00008365,
+ 0x030, 0x00009365,
+ 0x030, 0x0000A365,
+ 0x030, 0x0000B365,
+ 0x0EF, 0x00000000,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x00000443,
+ 0x030, 0x00001443,
+ 0x030, 0x00002443,
+ 0x030, 0x00003443,
+ 0x030, 0x00004483,
+ 0x030, 0x00005483,
+ 0x030, 0x00006483,
+ 0x030, 0x00007483,
+ 0x030, 0x000084A4,
+ 0x030, 0x000094A4,
+ 0x030, 0x0000A4A4,
+ 0x030, 0x0000B4A4,
+ 0x0EF, 0x00000000,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x00000361,
+ 0x030, 0x00001361,
+ 0x030, 0x00002361,
+ 0x030, 0x00003361,
+ 0x030, 0x00004443,
+ 0x030, 0x00005443,
+ 0x030, 0x00006443,
+ 0x030, 0x00007443,
+ 0x030, 0x00008424,
+ 0x030, 0x00009424,
+ 0x030, 0x0000A424,
+ 0x030, 0x0000B424,
+ 0x0EF, 0x00000000,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x000043A4,
+ 0x030, 0x000053A4,
+ 0x030, 0x000063A4,
+ 0x030, 0x000073A4,
+ 0x030, 0x00008365,
+ 0x030, 0x00009365,
+ 0x030, 0x0000A365,
+ 0x030, 0x0000B365,
+ 0x0EF, 0x00000000,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x00000403,
+ 0x030, 0x00001403,
+ 0x030, 0x00002403,
+ 0x030, 0x00003403,
+ 0x030, 0x000044A2,
+ 0x030, 0x000054A2,
+ 0x030, 0x000064A2,
+ 0x030, 0x000074A2,
+ 0x030, 0x000083A3,
+ 0x030, 0x000093A3,
+ 0x030, 0x0000A3A3,
+ 0x030, 0x0000B3A3,
+ 0x0EF, 0x00000000,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000003A3,
+ 0x030, 0x000013A3,
+ 0x030, 0x000023A3,
+ 0x030, 0x000033A3,
+ 0x030, 0x000043A4,
+ 0x030, 0x000053A4,
+ 0x030, 0x000063A4,
+ 0x030, 0x000073A4,
+ 0x030, 0x00008365,
+ 0x030, 0x00009365,
+ 0x030, 0x0000A365,
+ 0x030, 0x0000B365,
+ 0x0EF, 0x00000000,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000002A6,
+ 0x030, 0x000012A6,
+ 0x030, 0x000022A6,
+ 0x030, 0x000032A6,
+ 0x030, 0x000042A6,
+ 0x030, 0x000052A6,
+ 0x030, 0x000062A6,
+ 0x030, 0x000072A6,
+ 0x030, 0x000082A6,
+ 0x030, 0x000092A6,
+ 0x030, 0x0000A2A6,
+ 0x030, 0x0000B2A6,
+ 0x0EF, 0x00000000,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000004A0,
+ 0x030, 0x000014A0,
+ 0x030, 0x000024A0,
+ 0x030, 0x000034A0,
+ 0x030, 0x000044A0,
+ 0x030, 0x000054A0,
+ 0x030, 0x000064A0,
+ 0x030, 0x000074A0,
+ 0x030, 0x000084A0,
+ 0x030, 0x000094A0,
+ 0x030, 0x0000A4A0,
+ 0x030, 0x0000B4A0,
+ 0x0EF, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000002A1,
+ 0x030, 0x000012A1,
+ 0x030, 0x000022A1,
+ 0x030, 0x000032A1,
+ 0x030, 0x000042A1,
+ 0x030, 0x000052A1,
+ 0x030, 0x000062A1,
+ 0x030, 0x000072A1,
+ 0x030, 0x000082A1,
+ 0x030, 0x000092A1,
+ 0x030, 0x0000A2A1,
+ 0x030, 0x0000B2A1,
+ 0x0EF, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000003A0,
+ 0x030, 0x000013A0,
+ 0x030, 0x000023A0,
+ 0x030, 0x000033A0,
+ 0x030, 0x000043A1,
+ 0x030, 0x000053A1,
+ 0x030, 0x000063A1,
+ 0x030, 0x000073A1,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x0EF, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000002A1,
+ 0x030, 0x000012A1,
+ 0x030, 0x000022A1,
+ 0x030, 0x000032A1,
+ 0x030, 0x000042A1,
+ 0x030, 0x000052A1,
+ 0x030, 0x000062A1,
+ 0x030, 0x000072A1,
+ 0x030, 0x000082A1,
+ 0x030, 0x000092A1,
+ 0x030, 0x0000A2A1,
+ 0x030, 0x0000B2A1,
+ 0x0EF, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000003A0,
+ 0x030, 0x000013A0,
+ 0x030, 0x000023A0,
+ 0x030, 0x000033A0,
+ 0x030, 0x00004430,
+ 0x030, 0x00005430,
+ 0x030, 0x00006430,
+ 0x030, 0x00007430,
+ 0x030, 0x00008372,
+ 0x030, 0x00009372,
+ 0x030, 0x0000A372,
+ 0x030, 0x0000B372,
+ 0x0EF, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000004A0,
+ 0x030, 0x000014A0,
+ 0x030, 0x000024A0,
+ 0x030, 0x000034A0,
+ 0x030, 0x000044A0,
+ 0x030, 0x000054A0,
+ 0x030, 0x000064A0,
+ 0x030, 0x000074A0,
+ 0x030, 0x000084A0,
+ 0x030, 0x000094A0,
+ 0x030, 0x0000A4A0,
+ 0x030, 0x0000B4A0,
+ 0x0EF, 0x00000000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000004A0,
+ 0x030, 0x000014A0,
+ 0x030, 0x000024A0,
+ 0x030, 0x000034A0,
+ 0x030, 0x000044A0,
+ 0x030, 0x000054A0,
+ 0x030, 0x000064A0,
+ 0x030, 0x000074A0,
+ 0x030, 0x000084A0,
+ 0x030, 0x000094A0,
+ 0x030, 0x0000A4A0,
+ 0x030, 0x0000B4A0,
+ 0x0EF, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x030, 0x000003D0,
+ 0x030, 0x000013D0,
+ 0x030, 0x000023D0,
+ 0x030, 0x000033D0,
+ 0x030, 0x000043D0,
+ 0x030, 0x000053D0,
+ 0x030, 0x000063D0,
+ 0x030, 0x000073D0,
+ 0x030, 0x000083D0,
+ 0x030, 0x000093D0,
+ 0x030, 0x0000A3D0,
+ 0x030, 0x0000B3D0,
+ 0x0EF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A3,
+ 0x030, 0x000013A3,
+ 0x030, 0x000023A3,
+ 0x030, 0x000033A3,
+ 0x030, 0x000043A3,
+ 0x030, 0x000053A3,
+ 0x030, 0x000063A3,
+ 0x030, 0x000073A3,
+ 0x030, 0x000083A3,
+ 0x030, 0x000093A3,
+ 0x030, 0x0000A3A3,
+ 0x030, 0x0000B3A3,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000764,
+ 0x030, 0x00001632,
+ 0x030, 0x00002421,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000777,
+ 0x030, 0x00001442,
+ 0x030, 0x00002222,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000776,
+ 0x030, 0x00001455,
+ 0x030, 0x00002335,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000660,
+ 0x030, 0x00001443,
+ 0x030, 0x00002221,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000764,
+ 0x030, 0x00001632,
+ 0x030, 0x00002421,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000764,
+ 0x030, 0x00001632,
+ 0x030, 0x00002421,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000767,
+ 0x030, 0x00001442,
+ 0x030, 0x00002222,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000765,
+ 0x030, 0x00001632,
+ 0x030, 0x00002451,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000764,
+ 0x030, 0x00001632,
+ 0x030, 0x00002421,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000777,
+ 0x030, 0x00001454,
+ 0x030, 0x00002224,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000777,
+ 0x030, 0x00001442,
+ 0x030, 0x00002222,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000777,
+ 0x030, 0x00001442,
+ 0x030, 0x00002222,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000775,
+ 0x030, 0x00001422,
+ 0x030, 0x00002210,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000775,
+ 0x030, 0x00001343,
+ 0x030, 0x00002210,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000764,
+ 0x030, 0x00001632,
+ 0x030, 0x00002421,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000800,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C09,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000C90,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF5,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000025,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000027,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000077,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF6,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000025,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000027,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000077,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x0000042B,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000082A,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000849,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4C,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000CAC,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CED,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CF0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C29,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C69,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000CA8,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF7,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C09,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000C90,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF5,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C09,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000C90,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF5,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000CE5,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF4,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C0B,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C0E,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C2B,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C2E,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF7,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C09,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000C90,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF5,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000025,
+ 0x03F, 0x0000086D,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000870,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000891,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000894,
+ 0x033, 0x00000029,
+ 0x03F, 0x000008B5,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000008F5,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF6,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF6,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000025,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000027,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000077,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000068,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000027,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000077,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x0000042B,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000082A,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000849,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4C,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF4,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000068,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000027,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000077,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x0000042B,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000082A,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000849,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4C,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF4,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C09,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000C90,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF5,
+ 0xB0000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C2A,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C2D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C6A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CAA,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAD,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CB0,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF6,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x0000042A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000829,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000848,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084B,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAC,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CED,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF0,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x0000080B,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000080E,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000848,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000869,
+ 0x033, 0x00000064,
+ 0x03F, 0x000008A9,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C2A,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C2D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C6A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CAA,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAD,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CB0,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C2A,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C2D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C6A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CAA,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAD,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CB0,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000068,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CE5,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF4,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C10,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C4A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CC9,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C2A,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C2D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C6A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CAA,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAD,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CB0,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000065,
+ 0x03F, 0x0000086D,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000870,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000891,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000894,
+ 0x033, 0x00000069,
+ 0x03F, 0x000008B5,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000008F5,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF6,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF6,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000068,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x0000042C,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000082B,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000C8E,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CEC,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF5,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000068,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x0000042C,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000082B,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000C8E,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CEC,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF5,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C2A,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C2D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C6A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CAA,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAD,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CB0,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0xB0000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000C09,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000C90,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000005,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000008,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000B,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000000E,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x0000002E,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000031,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000034,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000053,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000056,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000D1,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000429,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000828,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000847,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF6,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000005,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000008,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000B,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000000E,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x0000002E,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000031,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000034,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000053,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000056,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000D1,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000429,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000828,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000847,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000CAC,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CED,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CF0,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000C2A,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000C2D,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C6A,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF7,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000C09,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000C90,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000C09,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000C90,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000429,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000828,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000847,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000CE5,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF4,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000824,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000827,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000082A,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000082D,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C68,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C6B,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000CCA,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CCD,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000C08,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000C0B,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000C0E,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000C2B,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C2E,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C31,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000CCA,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CCD,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000429,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000828,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000847,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000086A,
+ 0x033, 0x000000A5,
+ 0x03F, 0x0000086D,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000870,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000891,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000894,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000008B5,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000008F5,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000429,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000828,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000847,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF6,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000429,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000828,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000847,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF6,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000005,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000008,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000B,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000000E,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x0000002E,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000031,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000034,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000053,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000056,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000D1,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000005,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000008,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000B,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000000E,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000047,
+ 0x033, 0x000000A5,
+ 0x03F, 0x0000004A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000004D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000050,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000053,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000056,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000094,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x0000042A,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000829,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000848,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084B,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4C,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF4,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000005,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000008,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000B,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000000E,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000047,
+ 0x033, 0x000000A5,
+ 0x03F, 0x0000004A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000004D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000050,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000053,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000056,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000094,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x0000042A,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000829,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000848,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084B,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4C,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF4,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000C09,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000C90,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000400,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000047C,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000047C,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000047C,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000047C,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000047C,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x000008BB,
+ 0x033, 0x00000001,
+ 0x03F, 0x000008BB,
+ 0x033, 0x00000002,
+ 0x03F, 0x000008BB,
+ 0x033, 0x00000003,
+ 0x03F, 0x000008BB,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000047C,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000047C,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000047C,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000047C,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000047C,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000047C,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x000004BB,
+ 0x033, 0x00000001,
+ 0x03F, 0x000004BB,
+ 0x033, 0x00000002,
+ 0x03F, 0x000004BB,
+ 0x033, 0x00000003,
+ 0x03F, 0x000004BB,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000100,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000001,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000002,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000003,
+ 0x03F, 0x00001726,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000001,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000002,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000003,
+ 0x03F, 0x00001726,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000001,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000002,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000003,
+ 0x03F, 0x00001726,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000001,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000002,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000003,
+ 0x03F, 0x00001726,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000001,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000002,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000003,
+ 0x03F, 0x00001726,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000001,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000002,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000003,
+ 0x03F, 0x00001726,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000001,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000002,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000003,
+ 0x03F, 0x00001726,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000001,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000002,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000003,
+ 0x03F, 0x00001726,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000001,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000002,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000003,
+ 0x03F, 0x00001726,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000001,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000002,
+ 0x03F, 0x00001726,
+ 0x033, 0x00000003,
+ 0x03F, 0x00001726,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000F34,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000F34,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000F34,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000F34,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0xA0000000, 0x00000000,
+ 0x081, 0x0000F000,
+ 0x087, 0x00016040,
+ 0x051, 0x00000C00,
+ 0x052, 0x0007C241,
+ 0x053, 0x0001C069,
+ 0x054, 0x00078032,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00058750,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000800,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000002,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000004,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000008,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000071,
+ 0x033, 0x0000000A,
+ 0x03F, 0x00000074,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000002,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000004,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000008,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000071,
+ 0x033, 0x0000000A,
+ 0x03F, 0x00000074,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0005142C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0005144B,
+ 0x033, 0x00000002,
+ 0x03F, 0x0005144E,
+ 0x033, 0x00000003,
+ 0x03F, 0x00051C69,
+ 0x033, 0x00000004,
+ 0x03F, 0x00051C6C,
+ 0x033, 0x00000005,
+ 0x03F, 0x00051C6F,
+ 0x033, 0x00000006,
+ 0x03F, 0x00051CEB,
+ 0x033, 0x00000007,
+ 0x03F, 0x00051CEE,
+ 0x033, 0x00000008,
+ 0x03F, 0x00051CF1,
+ 0x033, 0x00000009,
+ 0x03F, 0x00051CF4,
+ 0x033, 0x0000000A,
+ 0x03F, 0x00051CF7,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000010,
+ 0x033, 0x00000000,
+ 0x008, 0x0009C060,
+ 0x033, 0x00000001,
+ 0x008, 0x0009C060,
+ 0x0EF, 0x00000000,
+ 0x033, 0x000000A2,
+ 0x0EF, 0x00080000,
+ 0x03E, 0x0000593F,
+ 0x8300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000D0F4F,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x000C0F4F,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x033, 0x000000A3,
+ 0x0EF, 0x00080000,
+ 0x03E, 0x00005934,
+ 0x03F, 0x0005AFCF,
+ 0x0EF, 0x00000000,
+ 0x83000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CE, 0x00094400,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CE, 0x00094400,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CE, 0x00094400,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CE, 0x00094400,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CE, 0x00094400,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CE, 0x00094400,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CE, 0x00094400,
+ 0xA0000000, 0x00000000,
+ 0x0CE, 0x00094C00,
+ 0xB0000000, 0x00000000,
+ 0x83000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CF, 0x00072F00,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CF, 0x00072F00,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CF, 0x00072F00,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CF, 0x00072F00,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CF, 0x00064700,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CF, 0x00072F00,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CF, 0x00072F00,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x0CF, 0x00072F00,
+ 0xA0000000, 0x00000000,
+ 0x0CF, 0x00064700,
+ 0xB0000000, 0x00000000,
+ 0x83000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000056,
+ 0x033, 0x00000001,
+ 0x03F, 0x000000D6,
+ 0x0EF, 0x00000000,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000056,
+ 0x033, 0x00000001,
+ 0x03F, 0x000000D6,
+ 0x0EF, 0x00000000,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000056,
+ 0x033, 0x00000001,
+ 0x03F, 0x000000D6,
+ 0x0EF, 0x00000000,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000056,
+ 0x033, 0x00000001,
+ 0x03F, 0x000000D6,
+ 0x0EF, 0x00000000,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000096,
+ 0x033, 0x00000001,
+ 0x03F, 0x000000D6,
+ 0x0EF, 0x00000000,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000056,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000056,
+ 0x0EF, 0x00000000,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000056,
+ 0x033, 0x00000001,
+ 0x03F, 0x000000D6,
+ 0x0EF, 0x00000000,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000004,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000056,
+ 0x033, 0x00000001,
+ 0x03F, 0x000000D6,
+ 0x0EF, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000096,
+ 0x033, 0x00000001,
+ 0x03F, 0x000000D6,
+ 0x0EF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x0B0, 0x000FF0FC,
+ 0x0C4, 0x00081402,
+ 0x0CC, 0x00082000,
+};
+
+RTW_DECL_TABLE_RF_RADIO(rtw8822b_rf_a, A);
+
+static const u32 rtw8822b_rf_b[] = {
+ 0x000, 0x00030000,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x001, 0x0004002D,
+ 0xA0000000, 0x00000000,
+ 0x001, 0x00040029,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x00010D24,
+ 0x0EF, 0x00080000,
+ 0x033, 0x00000002,
+ 0x03E, 0x0000003F,
+ 0x8300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000D0F4E,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x000C0F4E,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00000034,
+ 0x03F, 0x0004080E,
+ 0x0EF, 0x00080000,
+ 0x0DF, 0x00002449,
+ 0x033, 0x00000024,
+ 0x03E, 0x0000003F,
+ 0x03F, 0x00060FDE,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00080000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000037,
+ 0x03F, 0x0007EFCE,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00080000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000037,
+ 0x03F, 0x000DEFCE,
+ 0x0EF, 0x00000000,
+ 0x0DF, 0x00000009,
+ 0x018, 0x00010524,
+ 0x089, 0x00000207,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FE186,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FE186,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0xA0000000, 0x00000000,
+ 0x08A, 0x000FF186,
+ 0xB0000000, 0x00000000,
+ 0x08B, 0x00061E3C,
+ 0x08C, 0x000112C7,
+ 0x08D, 0x000F4988,
+ 0x08E, 0x00064D40,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000007,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x03F, 0x000C3186,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x03F, 0x000C3186,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00004080,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000005,
+ 0x03E, 0x000040C8,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000004,
+ 0x03E, 0x00004190,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000003,
+ 0x03E, 0x00004998,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000002,
+ 0x03E, 0x00005840,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000001,
+ 0x03E, 0x000058C2,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000000,
+ 0x03E, 0x00005930,
+ 0x03F, 0x000C3186,
+ 0x033, 0x0000000F,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x03F, 0x000C3186,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x03F, 0x000C3186,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00004080,
+ 0x03F, 0x000C3186,
+ 0x033, 0x0000000D,
+ 0x8300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000040D0,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x000040C8,
+ 0xB0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00004190,
+ 0x03F, 0x000C3186,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00004998,
+ 0x03F, 0x000C3186,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00005840,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000009,
+ 0x03E, 0x000058C2,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000008,
+ 0x03E, 0x00005930,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000017,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000DFF86,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x03F, 0x000C3186,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000DFF86,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C0006,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004080,
+ 0x03F, 0x000C3186,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x00004040,
+ 0x03F, 0x000C3186,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x00004000,
+ 0x03F, 0x000C3186,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00004080,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000015,
+ 0x8300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x03E, 0x000040D0,
+ 0xA0000000, 0x00000000,
+ 0x03E, 0x000040C8,
+ 0xB0000000, 0x00000000,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000014,
+ 0x03E, 0x00004190,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000013,
+ 0x03E, 0x00004998,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000012,
+ 0x03E, 0x00005840,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000011,
+ 0x03E, 0x000058C2,
+ 0x03F, 0x000C3186,
+ 0x033, 0x00000010,
+ 0x03E, 0x00005930,
+ 0x03F, 0x000C3186,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00004000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000001,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000005,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000002,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000005,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000005,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000005,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00000005,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x018, 0x00000401,
+ 0x084, 0x00001209,
+ 0x086, 0x000001A0,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x087, 0x00068080,
+ 0xA0000000, 0x00000000,
+ 0x087, 0x000E8180,
+ 0xB0000000, 0x00000000,
+ 0x088, 0x00070020,
+ 0x0DE, 0x00000010,
+ 0x0EF, 0x00008000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x0000003C,
+ 0x033, 0x0000000E,
+ 0x03F, 0x00000038,
+ 0x033, 0x0000000D,
+ 0x03F, 0x00000030,
+ 0x033, 0x0000000C,
+ 0x03F, 0x00000028,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000020,
+ 0x033, 0x0000000A,
+ 0x03F, 0x00000018,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000010,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000007,
+ 0x03F, 0x0000003C,
+ 0x033, 0x00000006,
+ 0x03F, 0x00000038,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000028,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000020,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000018,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000010,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000008,
+ 0x0EF, 0x00000000,
+ 0x018, 0x00018D24,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0x018, 0x00010D24,
+ 0x01B, 0x00075A40,
+ 0x0EE, 0x00000002,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000006,
+ 0x03F, 0x00000004,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000000,
+ 0x0EE, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D3D1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000062,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000062,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D3D1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D3D1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D3D1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D3D1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000062,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000062,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000062,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D2A1,
+ 0x062, 0x0000D3A2,
+ 0x063, 0x00000002,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x061, 0x0005D4A0,
+ 0x062, 0x0000D203,
+ 0x063, 0x00000062,
+ 0xA0000000, 0x00000000,
+ 0x061, 0x0005D3D0,
+ 0x062, 0x0000D303,
+ 0x063, 0x00000002,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A3,
+ 0x030, 0x000053A3,
+ 0x030, 0x000063A3,
+ 0x030, 0x000073A3,
+ 0x030, 0x000083A3,
+ 0x030, 0x000093A3,
+ 0x030, 0x0000A3A3,
+ 0x030, 0x0000B3A3,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000004A3,
+ 0x030, 0x000014A3,
+ 0x030, 0x000024A3,
+ 0x030, 0x000034A3,
+ 0x030, 0x000044A3,
+ 0x030, 0x000054A3,
+ 0x030, 0x000064A3,
+ 0x030, 0x000074A3,
+ 0x030, 0x000084A3,
+ 0x030, 0x000094A3,
+ 0x030, 0x0000A4A3,
+ 0x030, 0x0000B4A3,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000002A6,
+ 0x030, 0x000012A6,
+ 0x030, 0x000022A6,
+ 0x030, 0x000032A6,
+ 0x030, 0x000042A6,
+ 0x030, 0x000052A6,
+ 0x030, 0x000062A6,
+ 0x030, 0x000072A6,
+ 0x030, 0x000082A6,
+ 0x030, 0x000092A6,
+ 0x030, 0x0000A2A6,
+ 0x030, 0x0000B2A6,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000303,
+ 0x030, 0x00001303,
+ 0x030, 0x00002303,
+ 0x030, 0x00003303,
+ 0x030, 0x000043A4,
+ 0x030, 0x000053A4,
+ 0x030, 0x000063A4,
+ 0x030, 0x000073A4,
+ 0x030, 0x00008365,
+ 0x030, 0x00009365,
+ 0x030, 0x0000A365,
+ 0x030, 0x0000B365,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000002A6,
+ 0x030, 0x000012A6,
+ 0x030, 0x000022A6,
+ 0x030, 0x000032A6,
+ 0x030, 0x000042A6,
+ 0x030, 0x000052A6,
+ 0x030, 0x000062A6,
+ 0x030, 0x000072A6,
+ 0x030, 0x000082A6,
+ 0x030, 0x000092A6,
+ 0x030, 0x0000A2A6,
+ 0x030, 0x0000B2A6,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000004A4,
+ 0x030, 0x000014A4,
+ 0x030, 0x000024A4,
+ 0x030, 0x000034A4,
+ 0x030, 0x000043A4,
+ 0x030, 0x000053A4,
+ 0x030, 0x000063A4,
+ 0x030, 0x000073A4,
+ 0x030, 0x000083A5,
+ 0x030, 0x000093A5,
+ 0x030, 0x0000A3A5,
+ 0x030, 0x0000B3A5,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000004A3,
+ 0x030, 0x000014A3,
+ 0x030, 0x000024A3,
+ 0x030, 0x000034A3,
+ 0x030, 0x000044A3,
+ 0x030, 0x000054A3,
+ 0x030, 0x000064A3,
+ 0x030, 0x000074A3,
+ 0x030, 0x000084A3,
+ 0x030, 0x000094A3,
+ 0x030, 0x0000A4A3,
+ 0x030, 0x0000B4A3,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000004A3,
+ 0x030, 0x000014A3,
+ 0x030, 0x000024A3,
+ 0x030, 0x000034A3,
+ 0x030, 0x000044A3,
+ 0x030, 0x000054A3,
+ 0x030, 0x000064A3,
+ 0x030, 0x000074A3,
+ 0x030, 0x000084A3,
+ 0x030, 0x000094A3,
+ 0x030, 0x0000A4A3,
+ 0x030, 0x0000B4A3,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000002F4,
+ 0x030, 0x000012F4,
+ 0x030, 0x000022F4,
+ 0x030, 0x000032F4,
+ 0x030, 0x00004365,
+ 0x030, 0x00005365,
+ 0x030, 0x00006365,
+ 0x030, 0x00007365,
+ 0x030, 0x000082A4,
+ 0x030, 0x000092A4,
+ 0x030, 0x0000A2A4,
+ 0x030, 0x0000B2A4,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000382,
+ 0x030, 0x00001382,
+ 0x030, 0x00002382,
+ 0x030, 0x00003382,
+ 0x030, 0x00004445,
+ 0x030, 0x00005445,
+ 0x030, 0x00006445,
+ 0x030, 0x00007445,
+ 0x030, 0x00008425,
+ 0x030, 0x00009425,
+ 0x030, 0x0000A425,
+ 0x030, 0x0000B425,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A3,
+ 0x030, 0x000053A3,
+ 0x030, 0x000063A3,
+ 0x030, 0x000073A3,
+ 0x030, 0x000083A3,
+ 0x030, 0x000093A3,
+ 0x030, 0x0000A3A3,
+ 0x030, 0x0000B3A3,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000004A3,
+ 0x030, 0x000014A3,
+ 0x030, 0x000024A3,
+ 0x030, 0x000034A3,
+ 0x030, 0x000044A3,
+ 0x030, 0x000054A3,
+ 0x030, 0x000064A3,
+ 0x030, 0x000074A3,
+ 0x030, 0x000084A3,
+ 0x030, 0x000094A3,
+ 0x030, 0x0000A4A3,
+ 0x030, 0x0000B4A3,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000443,
+ 0x030, 0x00001443,
+ 0x030, 0x00002443,
+ 0x030, 0x00003443,
+ 0x030, 0x000043A4,
+ 0x030, 0x000053A4,
+ 0x030, 0x000063A4,
+ 0x030, 0x000073A4,
+ 0x030, 0x00008365,
+ 0x030, 0x00009365,
+ 0x030, 0x0000A365,
+ 0x030, 0x0000B365,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000343,
+ 0x030, 0x00001343,
+ 0x030, 0x00002343,
+ 0x030, 0x00003343,
+ 0x030, 0x00004483,
+ 0x030, 0x00005483,
+ 0x030, 0x00006483,
+ 0x030, 0x00007483,
+ 0x030, 0x000083A4,
+ 0x030, 0x000093A4,
+ 0x030, 0x0000A3A4,
+ 0x030, 0x0000B3A4,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x00004423,
+ 0x030, 0x00005423,
+ 0x030, 0x00006423,
+ 0x030, 0x00007423,
+ 0x030, 0x00008324,
+ 0x030, 0x00009324,
+ 0x030, 0x0000A324,
+ 0x030, 0x0000B324,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000303,
+ 0x030, 0x00001303,
+ 0x030, 0x00002303,
+ 0x030, 0x00003303,
+ 0x030, 0x000043A4,
+ 0x030, 0x000053A4,
+ 0x030, 0x000063A4,
+ 0x030, 0x000073A4,
+ 0x030, 0x00008365,
+ 0x030, 0x00009365,
+ 0x030, 0x0000A365,
+ 0x030, 0x0000B365,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000403,
+ 0x030, 0x00001403,
+ 0x030, 0x00002403,
+ 0x030, 0x00003403,
+ 0x030, 0x000043A4,
+ 0x030, 0x000053A4,
+ 0x030, 0x000063A4,
+ 0x030, 0x000073A4,
+ 0x030, 0x000083A3,
+ 0x030, 0x000093A3,
+ 0x030, 0x0000A3A3,
+ 0x030, 0x0000B3A3,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A3,
+ 0x030, 0x000013A3,
+ 0x030, 0x000023A3,
+ 0x030, 0x000033A3,
+ 0x030, 0x000043A4,
+ 0x030, 0x000053A4,
+ 0x030, 0x000063A4,
+ 0x030, 0x000073A4,
+ 0x030, 0x00008365,
+ 0x030, 0x00009365,
+ 0x030, 0x0000A365,
+ 0x030, 0x0000B365,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000002A6,
+ 0x030, 0x000012A6,
+ 0x030, 0x000022A6,
+ 0x030, 0x000032A6,
+ 0x030, 0x000042A6,
+ 0x030, 0x000052A6,
+ 0x030, 0x000062A6,
+ 0x030, 0x000072A6,
+ 0x030, 0x000082A6,
+ 0x030, 0x000092A6,
+ 0x030, 0x0000A2A6,
+ 0x030, 0x0000B2A6,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000004A0,
+ 0x030, 0x000014A0,
+ 0x030, 0x000024A0,
+ 0x030, 0x000034A0,
+ 0x030, 0x000044A0,
+ 0x030, 0x000054A0,
+ 0x030, 0x000064A0,
+ 0x030, 0x000074A0,
+ 0x030, 0x000084A0,
+ 0x030, 0x000094A0,
+ 0x030, 0x0000A4A0,
+ 0x030, 0x0000B4A0,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000002A1,
+ 0x030, 0x000012A1,
+ 0x030, 0x000022A1,
+ 0x030, 0x000032A1,
+ 0x030, 0x000042A1,
+ 0x030, 0x000052A1,
+ 0x030, 0x000062A1,
+ 0x030, 0x000072A1,
+ 0x030, 0x000082A1,
+ 0x030, 0x000092A1,
+ 0x030, 0x0000A2A1,
+ 0x030, 0x0000B2A1,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000004A0,
+ 0x030, 0x000014A0,
+ 0x030, 0x000024A0,
+ 0x030, 0x000034A0,
+ 0x030, 0x000043A1,
+ 0x030, 0x000053A1,
+ 0x030, 0x000063A1,
+ 0x030, 0x000073A1,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000002A1,
+ 0x030, 0x000012A1,
+ 0x030, 0x000022A1,
+ 0x030, 0x000032A1,
+ 0x030, 0x000042A1,
+ 0x030, 0x000052A1,
+ 0x030, 0x000062A1,
+ 0x030, 0x000072A1,
+ 0x030, 0x000082A1,
+ 0x030, 0x000092A1,
+ 0x030, 0x0000A2A1,
+ 0x030, 0x0000B2A1,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000004A1,
+ 0x030, 0x000014A1,
+ 0x030, 0x000024A1,
+ 0x030, 0x000034A1,
+ 0x030, 0x000043A1,
+ 0x030, 0x000053A1,
+ 0x030, 0x000063A1,
+ 0x030, 0x000073A1,
+ 0x030, 0x000083A1,
+ 0x030, 0x000093A1,
+ 0x030, 0x0000A3A1,
+ 0x030, 0x0000B3A1,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000004A0,
+ 0x030, 0x000014A0,
+ 0x030, 0x000024A0,
+ 0x030, 0x000034A0,
+ 0x030, 0x000044A0,
+ 0x030, 0x000054A0,
+ 0x030, 0x000064A0,
+ 0x030, 0x000074A0,
+ 0x030, 0x000084A0,
+ 0x030, 0x000094A0,
+ 0x030, 0x0000A4A0,
+ 0x030, 0x0000B4A0,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000004A0,
+ 0x030, 0x000014A0,
+ 0x030, 0x000024A0,
+ 0x030, 0x000034A0,
+ 0x030, 0x000044A0,
+ 0x030, 0x000054A0,
+ 0x030, 0x000064A0,
+ 0x030, 0x000074A0,
+ 0x030, 0x000084A0,
+ 0x030, 0x000094A0,
+ 0x030, 0x0000A4A0,
+ 0x030, 0x0000B4A0,
+ 0xA0000000, 0x00000000,
+ 0x030, 0x000002D0,
+ 0x030, 0x000012D0,
+ 0x030, 0x000022D0,
+ 0x030, 0x000032D0,
+ 0x030, 0x000042D0,
+ 0x030, 0x000052D0,
+ 0x030, 0x000062D0,
+ 0x030, 0x000072D0,
+ 0x030, 0x000082D0,
+ 0x030, 0x000092D0,
+ 0x030, 0x0000A2D0,
+ 0x030, 0x0000B2D0,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A3,
+ 0x030, 0x000013A3,
+ 0x030, 0x000023A3,
+ 0x030, 0x000033A3,
+ 0x030, 0x000043A4,
+ 0x030, 0x000053A4,
+ 0x030, 0x000063A4,
+ 0x030, 0x000073A4,
+ 0x030, 0x000083A3,
+ 0x030, 0x000093A3,
+ 0x030, 0x0000A3A3,
+ 0x030, 0x0000B3A3,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000203,
+ 0x030, 0x00001203,
+ 0x030, 0x00002203,
+ 0x030, 0x00003203,
+ 0x030, 0x00004203,
+ 0x030, 0x00005203,
+ 0x030, 0x00006203,
+ 0x030, 0x00007203,
+ 0x030, 0x00008203,
+ 0x030, 0x00009203,
+ 0x030, 0x0000A203,
+ 0x030, 0x0000B203,
+ 0xA0000000, 0x00000000,
+ 0x030, 0x000003A2,
+ 0x030, 0x000013A2,
+ 0x030, 0x000023A2,
+ 0x030, 0x000033A2,
+ 0x030, 0x000043A2,
+ 0x030, 0x000053A2,
+ 0x030, 0x000063A2,
+ 0x030, 0x000073A2,
+ 0x030, 0x000083A2,
+ 0x030, 0x000093A2,
+ 0x030, 0x0000A3A2,
+ 0x030, 0x0000B3A2,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000764,
+ 0x030, 0x00001632,
+ 0x030, 0x00002421,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000777,
+ 0x030, 0x00001442,
+ 0x030, 0x00002222,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000764,
+ 0x030, 0x00001452,
+ 0x030, 0x00002220,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000660,
+ 0x030, 0x00001341,
+ 0x030, 0x00002220,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000764,
+ 0x030, 0x00001632,
+ 0x030, 0x00002421,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000764,
+ 0x030, 0x00001632,
+ 0x030, 0x00002421,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000767,
+ 0x030, 0x00001442,
+ 0x030, 0x00002222,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000765,
+ 0x030, 0x00001632,
+ 0x030, 0x00002451,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000764,
+ 0x030, 0x00001632,
+ 0x030, 0x00002421,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000777,
+ 0x030, 0x00001442,
+ 0x030, 0x00002222,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000776,
+ 0x030, 0x00001442,
+ 0x030, 0x00002222,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000777,
+ 0x030, 0x00001442,
+ 0x030, 0x00002222,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004777,
+ 0x030, 0x00005777,
+ 0x030, 0x00006777,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000775,
+ 0x030, 0x00001422,
+ 0x030, 0x00002210,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000775,
+ 0x030, 0x00001222,
+ 0x030, 0x00002210,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000645,
+ 0x030, 0x00001333,
+ 0x030, 0x00002011,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0xA0000000, 0x00000000,
+ 0x030, 0x00000764,
+ 0x030, 0x00001632,
+ 0x030, 0x00002421,
+ 0x030, 0x00004000,
+ 0x030, 0x00005000,
+ 0x030, 0x00006000,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000800,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C09,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000C90,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF5,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000025,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000027,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000077,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF6,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000025,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000027,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000077,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x0000042C,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000082B,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084D,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4E,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C6E,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000CAD,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CED,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CF0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000082B,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000889,
+ 0x033, 0x00000024,
+ 0x03F, 0x000008AA,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF7,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C09,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000C90,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF5,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C09,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000C90,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF5,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000CE5,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF4,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C25,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C28,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C2B,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C68,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C6B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C6E,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF7,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C09,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000C90,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF5,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000CEA,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CED,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CF0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF6,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF6,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF6,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000025,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000027,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000077,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000068,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000027,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000077,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x0000042B,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000082A,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000849,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4C,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF4,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000068,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000027,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000077,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x0000042B,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000082A,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000849,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000084C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C4C,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF4,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000021,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000022,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000023,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000024,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000026,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000028,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000029,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000000F3,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000C09,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000C90,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000CD0,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000CF5,
+ 0xB0000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C2A,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C2D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C6A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CAA,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAD,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CB0,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF6,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x0000042A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000829,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000848,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084B,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAC,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CED,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF0,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000842,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000845,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000866,
+ 0x033, 0x00000063,
+ 0x03F, 0x000008A6,
+ 0x033, 0x00000064,
+ 0x03F, 0x000008C8,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C2A,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C2D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C6A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CAA,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAD,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CB0,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C2A,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C2D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C6A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CAA,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAD,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CB0,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000068,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CE5,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF4,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C10,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C4A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CC9,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C2A,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C2D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C6A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CAA,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAD,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CB0,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CEA,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CED,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF0,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF6,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF6,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000429,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000828,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000847,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF6,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000068,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x0000042C,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000082B,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4E,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C8C,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000C8F,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CEC,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF5,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000068,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006B,
+ 0x033, 0x00000067,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000071,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000074,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000077,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x0000042C,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000082B,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000084A,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000084D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C4E,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000C8C,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000C8F,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CEC,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF5,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000007,
+ 0x033, 0x00000061,
+ 0x03F, 0x0000000A,
+ 0x033, 0x00000062,
+ 0x03F, 0x0000000D,
+ 0x033, 0x00000063,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000064,
+ 0x03F, 0x0000002D,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000030,
+ 0x033, 0x00000066,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000070,
+ 0x033, 0x00000068,
+ 0x03F, 0x000000ED,
+ 0x033, 0x00000069,
+ 0x03F, 0x000000F0,
+ 0x033, 0x0000006A,
+ 0x03F, 0x000000F3,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000C0A,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000C0D,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000C2A,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000C2D,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000C6A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000CAA,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000CAD,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000CB0,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000CF7,
+ 0xB0000000, 0x00000000,
+ 0x83000000, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000C09,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000C90,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000005,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000008,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000B,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000000E,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x0000002E,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000031,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000034,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000053,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000056,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000D1,
+ 0x93000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000429,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000828,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000847,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF6,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000005,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000008,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000B,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000000E,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x0000002E,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000031,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000034,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000053,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000056,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000D1,
+ 0x93000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x0000042A,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000829,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000848,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084B,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4C,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000CAC,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CED,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CF0,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF6,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x93000008, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000826,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000829,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000082C,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000082F,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000086C,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF7,
+ 0x93000009, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000C09,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000C90,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x9300000a, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000C09,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000C90,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000429,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000828,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000847,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000CE5,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF4,
+ 0x9300000d, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x0000080A,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000080D,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000810,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000868,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C68,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C6B,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000CAB,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CAE,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x9300000e, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000C08,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000C0B,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000C0E,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000C2B,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C2E,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C31,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000CAB,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CAE,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000429,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000828,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000847,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000CEA,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CED,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CF0,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF6,
+ 0x93000010, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000429,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000828,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000847,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF6,
+ 0x93000011, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000429,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000828,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000847,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C6C,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CAF,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CD1,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF3,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF6,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000005,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000008,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000B,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000000E,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002B,
+ 0x033, 0x000000A5,
+ 0x03F, 0x0000002E,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000031,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000034,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000053,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000056,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000D1,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x90000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000005,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000008,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000B,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000000E,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000047,
+ 0x033, 0x000000A5,
+ 0x03F, 0x0000004A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000004D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000050,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000053,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000056,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000094,
+ 0x90000003, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x0000042A,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000829,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000848,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084B,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4C,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CEC,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000005,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000008,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000B,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000000E,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000047,
+ 0x033, 0x000000A5,
+ 0x03F, 0x0000004A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000004D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000050,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000053,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000056,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000094,
+ 0x90000005, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x0000042A,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000829,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000848,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000084B,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C4C,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000CEC,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000007,
+ 0x033, 0x000000A1,
+ 0x03F, 0x0000000A,
+ 0x033, 0x000000A2,
+ 0x03F, 0x0000000D,
+ 0x033, 0x000000A3,
+ 0x03F, 0x0000002A,
+ 0x033, 0x000000A4,
+ 0x03F, 0x0000002D,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000030,
+ 0x033, 0x000000A6,
+ 0x03F, 0x0000006D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000070,
+ 0x033, 0x000000A8,
+ 0x03F, 0x000000ED,
+ 0x033, 0x000000A9,
+ 0x03F, 0x000000F0,
+ 0x033, 0x000000AA,
+ 0x03F, 0x000000F3,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x000000A0,
+ 0x03F, 0x00000C09,
+ 0x033, 0x000000A1,
+ 0x03F, 0x00000C0C,
+ 0x033, 0x000000A2,
+ 0x03F, 0x00000C0F,
+ 0x033, 0x000000A3,
+ 0x03F, 0x00000C2C,
+ 0x033, 0x000000A4,
+ 0x03F, 0x00000C2F,
+ 0x033, 0x000000A5,
+ 0x03F, 0x00000C8A,
+ 0x033, 0x000000A6,
+ 0x03F, 0x00000C8D,
+ 0x033, 0x000000A7,
+ 0x03F, 0x00000C90,
+ 0x033, 0x000000A8,
+ 0x03F, 0x00000CEF,
+ 0x033, 0x000000A9,
+ 0x03F, 0x00000CF2,
+ 0x033, 0x000000AA,
+ 0x03F, 0x00000CF5,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000400,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000265A,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000265A,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000265A,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000265A,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000265A,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x000004FB,
+ 0x033, 0x00000001,
+ 0x03F, 0x000004FB,
+ 0x033, 0x00000002,
+ 0x03F, 0x000004FB,
+ 0x033, 0x00000003,
+ 0x03F, 0x000004FB,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000265A,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000265A,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000265A,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000265A,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000265A,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000265A,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x000004BB,
+ 0x033, 0x00000001,
+ 0x03F, 0x000004BB,
+ 0x033, 0x00000002,
+ 0x03F, 0x000004BB,
+ 0x033, 0x00000003,
+ 0x03F, 0x000004BB,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000100,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000745,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000745,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000745,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000745,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000745,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000745,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000745,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000745,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000745,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000745,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000745,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000F34,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000F34,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000F34,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000F34,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x081, 0x0000F400,
+ 0x087, 0x00016040,
+ 0x051, 0x00000808,
+ 0x052, 0x00098002,
+ 0x053, 0x0000FA47,
+ 0x054, 0x00058032,
+ 0x056, 0x00051000,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00082030,
+ 0xA0000000, 0x00000000,
+ 0x081, 0x0000F000,
+ 0x087, 0x00016040,
+ 0x051, 0x00000C00,
+ 0x052, 0x0007C241,
+ 0x053, 0x0001C069,
+ 0x054, 0x00078032,
+ 0x057, 0x0000CE0A,
+ 0x058, 0x00058750,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000800,
+ 0x83000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x93000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000002,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000004,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000008,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000071,
+ 0x033, 0x0000000A,
+ 0x03F, 0x00000074,
+ 0x93000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x93000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x9300000b, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x9300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0005142C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0005142F,
+ 0x033, 0x00000002,
+ 0x03F, 0x00051432,
+ 0x033, 0x00000003,
+ 0x03F, 0x00051CA5,
+ 0x033, 0x00000004,
+ 0x03F, 0x00051CA8,
+ 0x033, 0x00000005,
+ 0x03F, 0x00051CAB,
+ 0x033, 0x00000006,
+ 0x03F, 0x00051CEB,
+ 0x033, 0x00000007,
+ 0x03F, 0x00051CEE,
+ 0x033, 0x00000008,
+ 0x03F, 0x00051CF1,
+ 0x033, 0x00000009,
+ 0x03F, 0x00051CF4,
+ 0x033, 0x0000000A,
+ 0x03F, 0x00051CF7,
+ 0x9300000f, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0005142C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0005144B,
+ 0x033, 0x00000002,
+ 0x03F, 0x00051868,
+ 0x033, 0x00000003,
+ 0x03F, 0x0005186B,
+ 0x033, 0x00000004,
+ 0x03F, 0x0005186E,
+ 0x033, 0x00000005,
+ 0x03F, 0x00051871,
+ 0x033, 0x00000006,
+ 0x03F, 0x00051874,
+ 0x033, 0x00000007,
+ 0x03F, 0x00051895,
+ 0x033, 0x00000008,
+ 0x03F, 0x000518B6,
+ 0x033, 0x00000009,
+ 0x03F, 0x000518F6,
+ 0x033, 0x0000000A,
+ 0x03F, 0x00051CF7,
+ 0x93000012, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000002,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000005,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000008,
+ 0x033, 0x00000003,
+ 0x03F, 0x0000000B,
+ 0x033, 0x00000004,
+ 0x03F, 0x0000000E,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002B,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002E,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000031,
+ 0x033, 0x00000008,
+ 0x03F, 0x0000006E,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000071,
+ 0x033, 0x0000000A,
+ 0x03F, 0x00000074,
+ 0x90000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x90000004, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x90000006, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0x90000007, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000003,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000006,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000009,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000026,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000029,
+ 0x033, 0x00000005,
+ 0x03F, 0x0000002C,
+ 0x033, 0x00000006,
+ 0x03F, 0x0000002F,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000033,
+ 0x033, 0x00000008,
+ 0x03F, 0x00000036,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000039,
+ 0x033, 0x0000000A,
+ 0x03F, 0x0000003C,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0005142C,
+ 0x033, 0x00000001,
+ 0x03F, 0x0005142F,
+ 0x033, 0x00000002,
+ 0x03F, 0x00051432,
+ 0x033, 0x00000003,
+ 0x03F, 0x00051C87,
+ 0x033, 0x00000004,
+ 0x03F, 0x00051C8A,
+ 0x033, 0x00000005,
+ 0x03F, 0x00051C8D,
+ 0x033, 0x00000006,
+ 0x03F, 0x00051CEB,
+ 0x033, 0x00000007,
+ 0x03F, 0x00051CEE,
+ 0x033, 0x00000008,
+ 0x03F, 0x00051CF1,
+ 0x033, 0x00000009,
+ 0x03F, 0x00051CF4,
+ 0x033, 0x0000000A,
+ 0x03F, 0x00051CF7,
+ 0xB0000000, 0x00000000,
+ 0x8300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000010,
+ 0x033, 0x00000000,
+ 0x008, 0x0009C060,
+ 0x033, 0x00000001,
+ 0x008, 0x0009C060,
+ 0x0EF, 0x00000000,
+ 0x033, 0x000000A2,
+ 0x0EF, 0x00080000,
+ 0x03E, 0x0000593F,
+ 0x8300000c, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x000D0F4F,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x000C0F4F,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x033, 0x000000A3,
+ 0x0EF, 0x00080000,
+ 0x03E, 0x00005934,
+ 0x03F, 0x0005AFCF,
+ 0x0EF, 0x00000000,
+};
+
+RTW_DECL_TABLE_RF_RADIO(rtw8822b_rf_b, B);
+
+static const u8 rtw8822b_txpwr_lmt_type2[] = {
+ 0, 0, 0, 0, 1, 32, 2, 0, 0, 0, 1, 28, 1, 0, 0, 0, 1, 30,
+ 0, 0, 0, 0, 2, 32, 2, 0, 0, 0, 2, 28, 1, 0, 0, 0, 2, 30,
+ 0, 0, 0, 0, 3, 32, 2, 0, 0, 0, 3, 28, 1, 0, 0, 0, 3, 30,
+ 0, 0, 0, 0, 4, 32, 2, 0, 0, 0, 4, 28, 1, 0, 0, 0, 4, 30,
+ 0, 0, 0, 0, 5, 32, 2, 0, 0, 0, 5, 28, 1, 0, 0, 0, 5, 30,
+ 0, 0, 0, 0, 6, 32, 2, 0, 0, 0, 6, 28, 1, 0, 0, 0, 6, 30,
+ 0, 0, 0, 0, 7, 32, 2, 0, 0, 0, 7, 28, 1, 0, 0, 0, 7, 30,
+ 0, 0, 0, 0, 8, 32, 2, 0, 0, 0, 8, 28, 1, 0, 0, 0, 8, 30,
+ 0, 0, 0, 0, 9, 32, 2, 0, 0, 0, 9, 28, 1, 0, 0, 0, 9, 30,
+ 0, 0, 0, 0, 10, 32, 2, 0, 0, 0, 10, 28, 1, 0, 0, 0, 10, 30,
+ 0, 0, 0, 0, 11, 32, 2, 0, 0, 0, 11, 28, 1, 0, 0, 0, 11, 30,
+ 0, 0, 0, 0, 12, 26, 2, 0, 0, 0, 12, 28, 1, 0, 0, 0, 12, 30,
+ 0, 0, 0, 0, 13, 20, 2, 0, 0, 0, 13, 28, 1, 0, 0, 0, 13, 28,
+ 0, 0, 0, 0, 14, 63, 2, 0, 0, 0, 14, 63, 1, 0, 0, 0, 14, 32,
+ 0, 0, 0, 1, 1, 26, 2, 0, 0, 1, 1, 30, 1, 0, 0, 1, 1, 34,
+ 0, 0, 0, 1, 2, 30, 2, 0, 0, 1, 2, 30, 1, 0, 0, 1, 2, 34,
+ 0, 0, 0, 1, 3, 32, 2, 0, 0, 1, 3, 30, 1, 0, 0, 1, 3, 34,
+ 0, 0, 0, 1, 4, 34, 2, 0, 0, 1, 4, 30, 1, 0, 0, 1, 4, 34,
+ 0, 0, 0, 1, 5, 34, 2, 0, 0, 1, 5, 30, 1, 0, 0, 1, 5, 34,
+ 0, 0, 0, 1, 6, 34, 2, 0, 0, 1, 6, 30, 1, 0, 0, 1, 6, 34,
+ 0, 0, 0, 1, 7, 34, 2, 0, 0, 1, 7, 30, 1, 0, 0, 1, 7, 34,
+ 0, 0, 0, 1, 8, 34, 2, 0, 0, 1, 8, 30, 1, 0, 0, 1, 8, 34,
+ 0, 0, 0, 1, 9, 32, 2, 0, 0, 1, 9, 30, 1, 0, 0, 1, 9, 34,
+ 0, 0, 0, 1, 10, 30, 2, 0, 0, 1, 10, 30, 1, 0, 0, 1, 10, 34,
+ 0, 0, 0, 1, 11, 28, 2, 0, 0, 1, 11, 30, 1, 0, 0, 1, 11, 34,
+ 0, 0, 0, 1, 12, 22, 2, 0, 0, 1, 12, 30, 1, 0, 0, 1, 12, 34,
+ 0, 0, 0, 1, 13, 14, 2, 0, 0, 1, 13, 30, 1, 0, 0, 1, 13, 34,
+ 0, 0, 0, 1, 14, 63, 2, 0, 0, 1, 14, 63, 1, 0, 0, 1, 14, 63,
+ 0, 0, 0, 2, 1, 26, 2, 0, 0, 2, 1, 30, 1, 0, 0, 2, 1, 34,
+ 0, 0, 0, 2, 2, 30, 2, 0, 0, 2, 2, 30, 1, 0, 0, 2, 2, 34,
+ 0, 0, 0, 2, 3, 32, 2, 0, 0, 2, 3, 30, 1, 0, 0, 2, 3, 34,
+ 0, 0, 0, 2, 4, 34, 2, 0, 0, 2, 4, 30, 1, 0, 0, 2, 4, 34,
+ 0, 0, 0, 2, 5, 34, 2, 0, 0, 2, 5, 30, 1, 0, 0, 2, 5, 34,
+ 0, 0, 0, 2, 6, 34, 2, 0, 0, 2, 6, 30, 1, 0, 0, 2, 6, 34,
+ 0, 0, 0, 2, 7, 34, 2, 0, 0, 2, 7, 30, 1, 0, 0, 2, 7, 34,
+ 0, 0, 0, 2, 8, 34, 2, 0, 0, 2, 8, 30, 1, 0, 0, 2, 8, 34,
+ 0, 0, 0, 2, 9, 32, 2, 0, 0, 2, 9, 30, 1, 0, 0, 2, 9, 34,
+ 0, 0, 0, 2, 10, 30, 2, 0, 0, 2, 10, 30, 1, 0, 0, 2, 10, 34,
+ 0, 0, 0, 2, 11, 26, 2, 0, 0, 2, 11, 30, 1, 0, 0, 2, 11, 34,
+ 0, 0, 0, 2, 12, 20, 2, 0, 0, 2, 12, 30, 1, 0, 0, 2, 12, 34,
+ 0, 0, 0, 2, 13, 14, 2, 0, 0, 2, 13, 30, 1, 0, 0, 2, 13, 34,
+ 0, 0, 0, 2, 14, 63, 2, 0, 0, 2, 14, 63, 1, 0, 0, 2, 14, 63,
+ 0, 0, 0, 3, 1, 26, 2, 0, 0, 3, 1, 18, 1, 0, 0, 3, 1, 30,
+ 0, 0, 0, 3, 2, 28, 2, 0, 0, 3, 2, 18, 1, 0, 0, 3, 2, 30,
+ 0, 0, 0, 3, 3, 30, 2, 0, 0, 3, 3, 18, 1, 0, 0, 3, 3, 30,
+ 0, 0, 0, 3, 4, 30, 2, 0, 0, 3, 4, 18, 1, 0, 0, 3, 4, 30,
+ 0, 0, 0, 3, 5, 32, 2, 0, 0, 3, 5, 18, 1, 0, 0, 3, 5, 30,
+ 0, 0, 0, 3, 6, 32, 2, 0, 0, 3, 6, 18, 1, 0, 0, 3, 6, 30,
+ 0, 0, 0, 3, 7, 32, 2, 0, 0, 3, 7, 18, 1, 0, 0, 3, 7, 30,
+ 0, 0, 0, 3, 8, 30, 2, 0, 0, 3, 8, 18, 1, 0, 0, 3, 8, 30,
+ 0, 0, 0, 3, 9, 30, 2, 0, 0, 3, 9, 18, 1, 0, 0, 3, 9, 30,
+ 0, 0, 0, 3, 10, 28, 2, 0, 0, 3, 10, 18, 1, 0, 0, 3, 10, 30,
+ 0, 0, 0, 3, 11, 26, 2, 0, 0, 3, 11, 18, 1, 0, 0, 3, 11, 30,
+ 0, 0, 0, 3, 12, 20, 2, 0, 0, 3, 12, 18, 1, 0, 0, 3, 12, 30,
+ 0, 0, 0, 3, 13, 14, 2, 0, 0, 3, 13, 18, 1, 0, 0, 3, 13, 30,
+ 0, 0, 0, 3, 14, 63, 2, 0, 0, 3, 14, 63, 1, 0, 0, 3, 14, 63,
+ 0, 0, 1, 2, 1, 63, 2, 0, 1, 2, 1, 63, 1, 0, 1, 2, 1, 63,
+ 0, 0, 1, 2, 2, 63, 2, 0, 1, 2, 2, 63, 1, 0, 1, 2, 2, 63,
+ 0, 0, 1, 2, 3, 26, 2, 0, 1, 2, 3, 30, 1, 0, 1, 2, 3, 34,
+ 0, 0, 1, 2, 4, 26, 2, 0, 1, 2, 4, 30, 1, 0, 1, 2, 4, 34,
+ 0, 0, 1, 2, 5, 30, 2, 0, 1, 2, 5, 30, 1, 0, 1, 2, 5, 34,
+ 0, 0, 1, 2, 6, 32, 2, 0, 1, 2, 6, 30, 1, 0, 1, 2, 6, 34,
+ 0, 0, 1, 2, 7, 30, 2, 0, 1, 2, 7, 30, 1, 0, 1, 2, 7, 34,
+ 0, 0, 1, 2, 8, 26, 2, 0, 1, 2, 8, 30, 1, 0, 1, 2, 8, 34,
+ 0, 0, 1, 2, 9, 26, 2, 0, 1, 2, 9, 30, 1, 0, 1, 2, 9, 34,
+ 0, 0, 1, 2, 10, 20, 2, 0, 1, 2, 10, 30, 1, 0, 1, 2, 10, 34,
+ 0, 0, 1, 2, 11, 14, 2, 0, 1, 2, 11, 30, 1, 0, 1, 2, 11, 34,
+ 0, 0, 1, 2, 12, 63, 2, 0, 1, 2, 12, 63, 1, 0, 1, 2, 12, 63,
+ 0, 0, 1, 2, 13, 63, 2, 0, 1, 2, 13, 63, 1, 0, 1, 2, 13, 63,
+ 0, 0, 1, 2, 14, 63, 2, 0, 1, 2, 14, 63, 1, 0, 1, 2, 14, 63,
+ 0, 0, 1, 3, 1, 63, 2, 0, 1, 3, 1, 63, 1, 0, 1, 3, 1, 63,
+ 0, 0, 1, 3, 2, 63, 2, 0, 1, 3, 2, 63, 1, 0, 1, 3, 2, 63,
+ 0, 0, 1, 3, 3, 24, 2, 0, 1, 3, 3, 18, 1, 0, 1, 3, 3, 30,
+ 0, 0, 1, 3, 4, 24, 2, 0, 1, 3, 4, 18, 1, 0, 1, 3, 4, 30,
+ 0, 0, 1, 3, 5, 26, 2, 0, 1, 3, 5, 18, 1, 0, 1, 3, 5, 30,
+ 0, 0, 1, 3, 6, 28, 2, 0, 1, 3, 6, 18, 1, 0, 1, 3, 6, 30,
+ 0, 0, 1, 3, 7, 26, 2, 0, 1, 3, 7, 18, 1, 0, 1, 3, 7, 30,
+ 0, 0, 1, 3, 8, 26, 2, 0, 1, 3, 8, 18, 1, 0, 1, 3, 8, 30,
+ 0, 0, 1, 3, 9, 26, 2, 0, 1, 3, 9, 18, 1, 0, 1, 3, 9, 30,
+ 0, 0, 1, 3, 10, 20, 2, 0, 1, 3, 10, 18, 1, 0, 1, 3, 10, 30,
+ 0, 0, 1, 3, 11, 14, 2, 0, 1, 3, 11, 18, 1, 0, 1, 3, 11, 30,
+ 0, 0, 1, 3, 12, 63, 2, 0, 1, 3, 12, 63, 1, 0, 1, 3, 12, 63,
+ 0, 0, 1, 3, 13, 63, 2, 0, 1, 3, 13, 63, 1, 0, 1, 3, 13, 63,
+ 0, 0, 1, 3, 14, 63, 2, 0, 1, 3, 14, 63, 1, 0, 1, 3, 14, 63,
+ 0, 1, 0, 1, 36, 36, 2, 1, 0, 1, 36, 32, 1, 1, 0, 1, 36, 30,
+ 0, 1, 0, 1, 40, 38, 2, 1, 0, 1, 40, 32, 1, 1, 0, 1, 40, 30,
+ 0, 1, 0, 1, 44, 38, 2, 1, 0, 1, 44, 32, 1, 1, 0, 1, 44, 30,
+ 0, 1, 0, 1, 48, 38, 2, 1, 0, 1, 48, 32, 1, 1, 0, 1, 48, 30,
+ 0, 1, 0, 1, 52, 38, 2, 1, 0, 1, 52, 32, 1, 1, 0, 1, 52, 28,
+ 0, 1, 0, 1, 56, 38, 2, 1, 0, 1, 56, 32, 1, 1, 0, 1, 56, 28,
+ 0, 1, 0, 1, 60, 38, 2, 1, 0, 1, 60, 32, 1, 1, 0, 1, 60, 28,
+ 0, 1, 0, 1, 64, 34, 2, 1, 0, 1, 64, 32, 1, 1, 0, 1, 64, 28,
+ 0, 1, 0, 1, 100, 32, 2, 1, 0, 1, 100, 32, 1, 1, 0, 1, 100, 32,
+ 0, 1, 0, 1, 104, 38, 2, 1, 0, 1, 104, 32, 1, 1, 0, 1, 104, 32,
+ 0, 1, 0, 1, 108, 38, 2, 1, 0, 1, 108, 32, 1, 1, 0, 1, 108, 32,
+ 0, 1, 0, 1, 112, 38, 2, 1, 0, 1, 112, 32, 1, 1, 0, 1, 112, 32,
+ 0, 1, 0, 1, 116, 38, 2, 1, 0, 1, 116, 32, 1, 1, 0, 1, 116, 32,
+ 0, 1, 0, 1, 120, 38, 2, 1, 0, 1, 120, 32, 1, 1, 0, 1, 120, 32,
+ 0, 1, 0, 1, 124, 38, 2, 1, 0, 1, 124, 32, 1, 1, 0, 1, 124, 32,
+ 0, 1, 0, 1, 128, 38, 2, 1, 0, 1, 128, 32, 1, 1, 0, 1, 128, 32,
+ 0, 1, 0, 1, 132, 38, 2, 1, 0, 1, 132, 32, 1, 1, 0, 1, 132, 32,
+ 0, 1, 0, 1, 136, 38, 2, 1, 0, 1, 136, 32, 1, 1, 0, 1, 136, 32,
+ 0, 1, 0, 1, 140, 34, 2, 1, 0, 1, 140, 32, 1, 1, 0, 1, 140, 32,
+ 0, 1, 0, 1, 144, 34, 2, 1, 0, 1, 144, 32, 1, 1, 0, 1, 144, 63,
+ 0, 1, 0, 1, 149, 38, 2, 1, 0, 1, 149, 63, 1, 1, 0, 1, 149, 63,
+ 0, 1, 0, 1, 153, 38, 2, 1, 0, 1, 153, 63, 1, 1, 0, 1, 153, 63,
+ 0, 1, 0, 1, 157, 38, 2, 1, 0, 1, 157, 63, 1, 1, 0, 1, 157, 63,
+ 0, 1, 0, 1, 161, 38, 2, 1, 0, 1, 161, 63, 1, 1, 0, 1, 161, 63,
+ 0, 1, 0, 1, 165, 38, 2, 1, 0, 1, 165, 63, 1, 1, 0, 1, 165, 63,
+ 0, 1, 0, 2, 36, 36, 2, 1, 0, 2, 36, 32, 1, 1, 0, 2, 36, 28,
+ 0, 1, 0, 2, 40, 38, 2, 1, 0, 2, 40, 32, 1, 1, 0, 2, 40, 28,
+ 0, 1, 0, 2, 44, 38, 2, 1, 0, 2, 44, 32, 1, 1, 0, 2, 44, 28,
+ 0, 1, 0, 2, 48, 38, 2, 1, 0, 2, 48, 32, 1, 1, 0, 2, 48, 28,
+ 0, 1, 0, 2, 52, 38, 2, 1, 0, 2, 52, 32, 1, 1, 0, 2, 52, 28,
+ 0, 1, 0, 2, 56, 38, 2, 1, 0, 2, 56, 32, 1, 1, 0, 2, 56, 28,
+ 0, 1, 0, 2, 60, 38, 2, 1, 0, 2, 60, 32, 1, 1, 0, 2, 60, 28,
+ 0, 1, 0, 2, 64, 34, 2, 1, 0, 2, 64, 32, 1, 1, 0, 2, 64, 28,
+ 0, 1, 0, 2, 100, 32, 2, 1, 0, 2, 100, 32, 1, 1, 0, 2, 100, 32,
+ 0, 1, 0, 2, 104, 38, 2, 1, 0, 2, 104, 32, 1, 1, 0, 2, 104, 32,
+ 0, 1, 0, 2, 108, 38, 2, 1, 0, 2, 108, 32, 1, 1, 0, 2, 108, 32,
+ 0, 1, 0, 2, 112, 38, 2, 1, 0, 2, 112, 32, 1, 1, 0, 2, 112, 32,
+ 0, 1, 0, 2, 116, 38, 2, 1, 0, 2, 116, 32, 1, 1, 0, 2, 116, 32,
+ 0, 1, 0, 2, 120, 38, 2, 1, 0, 2, 120, 32, 1, 1, 0, 2, 120, 32,
+ 0, 1, 0, 2, 124, 38, 2, 1, 0, 2, 124, 32, 1, 1, 0, 2, 124, 32,
+ 0, 1, 0, 2, 128, 38, 2, 1, 0, 2, 128, 32, 1, 1, 0, 2, 128, 32,
+ 0, 1, 0, 2, 132, 38, 2, 1, 0, 2, 132, 32, 1, 1, 0, 2, 132, 32,
+ 0, 1, 0, 2, 136, 38, 2, 1, 0, 2, 136, 32, 1, 1, 0, 2, 136, 32,
+ 0, 1, 0, 2, 140, 32, 2, 1, 0, 2, 140, 32, 1, 1, 0, 2, 140, 32,
+ 0, 1, 0, 2, 144, 26, 2, 1, 0, 2, 144, 63, 1, 1, 0, 2, 144, 63,
+ 0, 1, 0, 2, 149, 38, 2, 1, 0, 2, 149, 63, 1, 1, 0, 2, 149, 63,
+ 0, 1, 0, 2, 153, 38, 2, 1, 0, 2, 153, 63, 1, 1, 0, 2, 153, 63,
+ 0, 1, 0, 2, 157, 38, 2, 1, 0, 2, 157, 63, 1, 1, 0, 2, 157, 63,
+ 0, 1, 0, 2, 161, 38, 2, 1, 0, 2, 161, 63, 1, 1, 0, 2, 161, 63,
+ 0, 1, 0, 2, 165, 38, 2, 1, 0, 2, 165, 63, 1, 1, 0, 2, 165, 63,
+ 0, 1, 0, 3, 36, 34, 2, 1, 0, 3, 36, 20, 1, 1, 0, 3, 36, 22,
+ 0, 1, 0, 3, 40, 36, 2, 1, 0, 3, 40, 20, 1, 1, 0, 3, 40, 22,
+ 0, 1, 0, 3, 44, 36, 2, 1, 0, 3, 44, 20, 1, 1, 0, 3, 44, 22,
+ 0, 1, 0, 3, 48, 36, 2, 1, 0, 3, 48, 20, 1, 1, 0, 3, 48, 22,
+ 0, 1, 0, 3, 52, 36, 2, 1, 0, 3, 52, 20, 1, 1, 0, 3, 52, 22,
+ 0, 1, 0, 3, 56, 36, 2, 1, 0, 3, 56, 20, 1, 1, 0, 3, 56, 22,
+ 0, 1, 0, 3, 60, 36, 2, 1, 0, 3, 60, 20, 1, 1, 0, 3, 60, 22,
+ 0, 1, 0, 3, 64, 34, 2, 1, 0, 3, 64, 20, 1, 1, 0, 3, 64, 22,
+ 0, 1, 0, 3, 100, 32, 2, 1, 0, 3, 100, 20, 1, 1, 0, 3, 100, 30,
+ 0, 1, 0, 3, 104, 36, 2, 1, 0, 3, 104, 20, 1, 1, 0, 3, 104, 30,
+ 0, 1, 0, 3, 108, 38, 2, 1, 0, 3, 108, 20, 1, 1, 0, 3, 108, 30,
+ 0, 1, 0, 3, 112, 38, 2, 1, 0, 3, 112, 20, 1, 1, 0, 3, 112, 30,
+ 0, 1, 0, 3, 116, 38, 2, 1, 0, 3, 116, 20, 1, 1, 0, 3, 116, 30,
+ 0, 1, 0, 3, 120, 38, 2, 1, 0, 3, 120, 20, 1, 1, 0, 3, 120, 30,
+ 0, 1, 0, 3, 124, 38, 2, 1, 0, 3, 124, 20, 1, 1, 0, 3, 124, 30,
+ 0, 1, 0, 3, 128, 38, 2, 1, 0, 3, 128, 20, 1, 1, 0, 3, 128, 30,
+ 0, 1, 0, 3, 132, 38, 2, 1, 0, 3, 132, 20, 1, 1, 0, 3, 132, 30,
+ 0, 1, 0, 3, 136, 36, 2, 1, 0, 3, 136, 20, 1, 1, 0, 3, 136, 30,
+ 0, 1, 0, 3, 140, 32, 2, 1, 0, 3, 140, 20, 1, 1, 0, 3, 140, 30,
+ 0, 1, 0, 3, 144, 26, 2, 1, 0, 3, 144, 63, 1, 1, 0, 3, 144, 63,
+ 0, 1, 0, 3, 149, 38, 2, 1, 0, 3, 149, 63, 1, 1, 0, 3, 149, 63,
+ 0, 1, 0, 3, 153, 38, 2, 1, 0, 3, 153, 63, 1, 1, 0, 3, 153, 63,
+ 0, 1, 0, 3, 157, 38, 2, 1, 0, 3, 157, 63, 1, 1, 0, 3, 157, 63,
+ 0, 1, 0, 3, 161, 38, 2, 1, 0, 3, 161, 63, 1, 1, 0, 3, 161, 63,
+ 0, 1, 0, 3, 165, 38, 2, 1, 0, 3, 165, 63, 1, 1, 0, 3, 165, 63,
+ 0, 1, 1, 2, 38, 28, 2, 1, 1, 2, 38, 30, 1, 1, 1, 2, 38, 30,
+ 0, 1, 1, 2, 46, 36, 2, 1, 1, 2, 46, 30, 1, 1, 1, 2, 46, 30,
+ 0, 1, 1, 2, 54, 36, 2, 1, 1, 2, 54, 30, 1, 1, 1, 2, 54, 30,
+ 0, 1, 1, 2, 62, 30, 2, 1, 1, 2, 62, 30, 1, 1, 1, 2, 62, 30,
+ 0, 1, 1, 2, 102, 30, 2, 1, 1, 2, 102, 30, 1, 1, 1, 2, 102, 30,
+ 0, 1, 1, 2, 110, 36, 2, 1, 1, 2, 110, 30, 1, 1, 1, 2, 110, 30,
+ 0, 1, 1, 2, 118, 36, 2, 1, 1, 2, 118, 30, 1, 1, 1, 2, 118, 30,
+ 0, 1, 1, 2, 126, 36, 2, 1, 1, 2, 126, 30, 1, 1, 1, 2, 126, 30,
+ 0, 1, 1, 2, 134, 36, 2, 1, 1, 2, 134, 30, 1, 1, 1, 2, 134, 30,
+ 0, 1, 1, 2, 142, 30, 2, 1, 1, 2, 142, 63, 1, 1, 1, 2, 142, 63,
+ 0, 1, 1, 2, 151, 36, 2, 1, 1, 2, 151, 63, 1, 1, 1, 2, 151, 63,
+ 0, 1, 1, 2, 159, 36, 2, 1, 1, 2, 159, 63, 1, 1, 1, 2, 159, 63,
+ 0, 1, 1, 3, 38, 26, 2, 1, 1, 3, 38, 20, 1, 1, 1, 3, 38, 22,
+ 0, 1, 1, 3, 46, 36, 2, 1, 1, 3, 46, 20, 1, 1, 1, 3, 46, 22,
+ 0, 1, 1, 3, 54, 36, 2, 1, 1, 3, 54, 20, 1, 1, 1, 3, 54, 22,
+ 0, 1, 1, 3, 62, 28, 2, 1, 1, 3, 62, 20, 1, 1, 1, 3, 62, 22,
+ 0, 1, 1, 3, 102, 28, 2, 1, 1, 3, 102, 20, 1, 1, 1, 3, 102, 30,
+ 0, 1, 1, 3, 110, 36, 2, 1, 1, 3, 110, 20, 1, 1, 1, 3, 110, 30,
+ 0, 1, 1, 3, 118, 36, 2, 1, 1, 3, 118, 20, 1, 1, 1, 3, 118, 30,
+ 0, 1, 1, 3, 126, 36, 2, 1, 1, 3, 126, 20, 1, 1, 1, 3, 126, 30,
+ 0, 1, 1, 3, 134, 36, 2, 1, 1, 3, 134, 20, 1, 1, 1, 3, 134, 30,
+ 0, 1, 1, 3, 142, 30, 2, 1, 1, 3, 142, 63, 1, 1, 1, 3, 142, 63,
+ 0, 1, 1, 3, 151, 36, 2, 1, 1, 3, 151, 63, 1, 1, 1, 3, 151, 63,
+ 0, 1, 1, 3, 159, 36, 2, 1, 1, 3, 159, 63, 1, 1, 1, 3, 159, 63,
+ 0, 1, 2, 4, 42, 26, 2, 1, 2, 4, 42, 30, 1, 1, 2, 4, 42, 28,
+ 0, 1, 2, 4, 58, 26, 2, 1, 2, 4, 58, 30, 1, 1, 2, 4, 58, 28,
+ 0, 1, 2, 4, 106, 26, 2, 1, 2, 4, 106, 30, 1, 1, 2, 4, 106, 30,
+ 0, 1, 2, 4, 122, 36, 2, 1, 2, 4, 122, 30, 1, 1, 2, 4, 122, 30,
+ 0, 1, 2, 4, 138, 36, 2, 1, 2, 4, 138, 63, 1, 1, 2, 4, 138, 63,
+ 0, 1, 2, 4, 155, 36, 2, 1, 2, 4, 155, 63, 1, 1, 2, 4, 155, 63,
+ 0, 1, 2, 5, 42, 24, 2, 1, 2, 5, 42, 20, 1, 1, 2, 5, 42, 22,
+ 0, 1, 2, 5, 58, 24, 2, 1, 2, 5, 58, 20, 1, 1, 2, 5, 58, 22,
+ 0, 1, 2, 5, 106, 26, 2, 1, 2, 5, 106, 20, 1, 1, 2, 5, 106, 30,
+ 0, 1, 2, 5, 122, 36, 2, 1, 2, 5, 122, 20, 1, 1, 2, 5, 122, 30,
+ 0, 1, 2, 5, 138, 36, 2, 1, 2, 5, 138, 63, 1, 1, 2, 5, 138, 63,
+ 0, 1, 2, 5, 155, 36, 2, 1, 2, 5, 155, 63, 1, 1, 2, 5, 155, 63
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8822b_txpwr_lmt_type2);
+
+static const u8 rtw8822b_txpwr_lmt_type5[] = {
+ 0, 0, 0, 0, 1, 32, 2, 0, 0, 0, 1, 28, 1, 0, 0, 0, 1, 30,
+ 0, 0, 0, 0, 2, 32, 2, 0, 0, 0, 2, 28, 1, 0, 0, 0, 2, 30,
+ 0, 0, 0, 0, 3, 32, 2, 0, 0, 0, 3, 28, 1, 0, 0, 0, 3, 30,
+ 0, 0, 0, 0, 4, 32, 2, 0, 0, 0, 4, 28, 1, 0, 0, 0, 4, 30,
+ 0, 0, 0, 0, 5, 32, 2, 0, 0, 0, 5, 28, 1, 0, 0, 0, 5, 30,
+ 0, 0, 0, 0, 6, 32, 2, 0, 0, 0, 6, 28, 1, 0, 0, 0, 6, 30,
+ 0, 0, 0, 0, 7, 32, 2, 0, 0, 0, 7, 28, 1, 0, 0, 0, 7, 30,
+ 0, 0, 0, 0, 8, 32, 2, 0, 0, 0, 8, 28, 1, 0, 0, 0, 8, 30,
+ 0, 0, 0, 0, 9, 32, 2, 0, 0, 0, 9, 28, 1, 0, 0, 0, 9, 30,
+ 0, 0, 0, 0, 10, 32, 2, 0, 0, 0, 10, 28, 1, 0, 0, 0, 10, 30,
+ 0, 0, 0, 0, 11, 32, 2, 0, 0, 0, 11, 28, 1, 0, 0, 0, 11, 30,
+ 0, 0, 0, 0, 12, 26, 2, 0, 0, 0, 12, 28, 1, 0, 0, 0, 12, 30,
+ 0, 0, 0, 0, 13, 20, 2, 0, 0, 0, 13, 28, 1, 0, 0, 0, 13, 28,
+ 0, 0, 0, 0, 14, 63, 2, 0, 0, 0, 14, 63, 1, 0, 0, 0, 14, 32,
+ 0, 0, 0, 1, 1, 26, 2, 0, 0, 1, 1, 30, 1, 0, 0, 1, 1, 34,
+ 0, 0, 0, 1, 2, 30, 2, 0, 0, 1, 2, 30, 1, 0, 0, 1, 2, 34,
+ 0, 0, 0, 1, 3, 32, 2, 0, 0, 1, 3, 30, 1, 0, 0, 1, 3, 34,
+ 0, 0, 0, 1, 4, 34, 2, 0, 0, 1, 4, 30, 1, 0, 0, 1, 4, 34,
+ 0, 0, 0, 1, 5, 34, 2, 0, 0, 1, 5, 30, 1, 0, 0, 1, 5, 34,
+ 0, 0, 0, 1, 6, 34, 2, 0, 0, 1, 6, 30, 1, 0, 0, 1, 6, 34,
+ 0, 0, 0, 1, 7, 34, 2, 0, 0, 1, 7, 30, 1, 0, 0, 1, 7, 34,
+ 0, 0, 0, 1, 8, 34, 2, 0, 0, 1, 8, 30, 1, 0, 0, 1, 8, 34,
+ 0, 0, 0, 1, 9, 32, 2, 0, 0, 1, 9, 30, 1, 0, 0, 1, 9, 34,
+ 0, 0, 0, 1, 10, 30, 2, 0, 0, 1, 10, 30, 1, 0, 0, 1, 10, 34,
+ 0, 0, 0, 1, 11, 28, 2, 0, 0, 1, 11, 30, 1, 0, 0, 1, 11, 34,
+ 0, 0, 0, 1, 12, 22, 2, 0, 0, 1, 12, 30, 1, 0, 0, 1, 12, 34,
+ 0, 0, 0, 1, 13, 14, 2, 0, 0, 1, 13, 30, 1, 0, 0, 1, 13, 34,
+ 0, 0, 0, 1, 14, 63, 2, 0, 0, 1, 14, 63, 1, 0, 0, 1, 14, 63,
+ 0, 0, 0, 2, 1, 26, 2, 0, 0, 2, 1, 30, 1, 0, 0, 2, 1, 34,
+ 0, 0, 0, 2, 2, 30, 2, 0, 0, 2, 2, 30, 1, 0, 0, 2, 2, 34,
+ 0, 0, 0, 2, 3, 32, 2, 0, 0, 2, 3, 30, 1, 0, 0, 2, 3, 34,
+ 0, 0, 0, 2, 4, 34, 2, 0, 0, 2, 4, 30, 1, 0, 0, 2, 4, 34,
+ 0, 0, 0, 2, 5, 34, 2, 0, 0, 2, 5, 30, 1, 0, 0, 2, 5, 34,
+ 0, 0, 0, 2, 6, 34, 2, 0, 0, 2, 6, 30, 1, 0, 0, 2, 6, 34,
+ 0, 0, 0, 2, 7, 34, 2, 0, 0, 2, 7, 30, 1, 0, 0, 2, 7, 34,
+ 0, 0, 0, 2, 8, 34, 2, 0, 0, 2, 8, 30, 1, 0, 0, 2, 8, 34,
+ 0, 0, 0, 2, 9, 32, 2, 0, 0, 2, 9, 30, 1, 0, 0, 2, 9, 34,
+ 0, 0, 0, 2, 10, 30, 2, 0, 0, 2, 10, 30, 1, 0, 0, 2, 10, 34,
+ 0, 0, 0, 2, 11, 26, 2, 0, 0, 2, 11, 30, 1, 0, 0, 2, 11, 34,
+ 0, 0, 0, 2, 12, 20, 2, 0, 0, 2, 12, 30, 1, 0, 0, 2, 12, 34,
+ 0, 0, 0, 2, 13, 14, 2, 0, 0, 2, 13, 30, 1, 0, 0, 2, 13, 34,
+ 0, 0, 0, 2, 14, 63, 2, 0, 0, 2, 14, 63, 1, 0, 0, 2, 14, 63,
+ 0, 0, 0, 3, 1, 26, 2, 0, 0, 3, 1, 18, 1, 0, 0, 3, 1, 30,
+ 0, 0, 0, 3, 2, 28, 2, 0, 0, 3, 2, 18, 1, 0, 0, 3, 2, 30,
+ 0, 0, 0, 3, 3, 30, 2, 0, 0, 3, 3, 18, 1, 0, 0, 3, 3, 30,
+ 0, 0, 0, 3, 4, 30, 2, 0, 0, 3, 4, 18, 1, 0, 0, 3, 4, 30,
+ 0, 0, 0, 3, 5, 32, 2, 0, 0, 3, 5, 18, 1, 0, 0, 3, 5, 30,
+ 0, 0, 0, 3, 6, 32, 2, 0, 0, 3, 6, 18, 1, 0, 0, 3, 6, 30,
+ 0, 0, 0, 3, 7, 32, 2, 0, 0, 3, 7, 18, 1, 0, 0, 3, 7, 30,
+ 0, 0, 0, 3, 8, 30, 2, 0, 0, 3, 8, 18, 1, 0, 0, 3, 8, 30,
+ 0, 0, 0, 3, 9, 30, 2, 0, 0, 3, 9, 18, 1, 0, 0, 3, 9, 30,
+ 0, 0, 0, 3, 10, 28, 2, 0, 0, 3, 10, 18, 1, 0, 0, 3, 10, 30,
+ 0, 0, 0, 3, 11, 26, 2, 0, 0, 3, 11, 18, 1, 0, 0, 3, 11, 30,
+ 0, 0, 0, 3, 12, 20, 2, 0, 0, 3, 12, 18, 1, 0, 0, 3, 12, 30,
+ 0, 0, 0, 3, 13, 14, 2, 0, 0, 3, 13, 18, 1, 0, 0, 3, 13, 30,
+ 0, 0, 0, 3, 14, 63, 2, 0, 0, 3, 14, 63, 1, 0, 0, 3, 14, 63,
+ 0, 0, 1, 2, 1, 63, 2, 0, 1, 2, 1, 63, 1, 0, 1, 2, 1, 63,
+ 0, 0, 1, 2, 2, 63, 2, 0, 1, 2, 2, 63, 1, 0, 1, 2, 2, 63,
+ 0, 0, 1, 2, 3, 26, 2, 0, 1, 2, 3, 30, 1, 0, 1, 2, 3, 34,
+ 0, 0, 1, 2, 4, 26, 2, 0, 1, 2, 4, 30, 1, 0, 1, 2, 4, 34,
+ 0, 0, 1, 2, 5, 30, 2, 0, 1, 2, 5, 30, 1, 0, 1, 2, 5, 34,
+ 0, 0, 1, 2, 6, 32, 2, 0, 1, 2, 6, 30, 1, 0, 1, 2, 6, 34,
+ 0, 0, 1, 2, 7, 30, 2, 0, 1, 2, 7, 30, 1, 0, 1, 2, 7, 34,
+ 0, 0, 1, 2, 8, 26, 2, 0, 1, 2, 8, 30, 1, 0, 1, 2, 8, 34,
+ 0, 0, 1, 2, 9, 26, 2, 0, 1, 2, 9, 30, 1, 0, 1, 2, 9, 34,
+ 0, 0, 1, 2, 10, 20, 2, 0, 1, 2, 10, 30, 1, 0, 1, 2, 10, 34,
+ 0, 0, 1, 2, 11, 14, 2, 0, 1, 2, 11, 30, 1, 0, 1, 2, 11, 34,
+ 0, 0, 1, 2, 12, 63, 2, 0, 1, 2, 12, 63, 1, 0, 1, 2, 12, 63,
+ 0, 0, 1, 2, 13, 63, 2, 0, 1, 2, 13, 63, 1, 0, 1, 2, 13, 63,
+ 0, 0, 1, 2, 14, 63, 2, 0, 1, 2, 14, 63, 1, 0, 1, 2, 14, 63,
+ 0, 0, 1, 3, 1, 63, 2, 0, 1, 3, 1, 63, 1, 0, 1, 3, 1, 63,
+ 0, 0, 1, 3, 2, 63, 2, 0, 1, 3, 2, 63, 1, 0, 1, 3, 2, 63,
+ 0, 0, 1, 3, 3, 24, 2, 0, 1, 3, 3, 18, 1, 0, 1, 3, 3, 30,
+ 0, 0, 1, 3, 4, 24, 2, 0, 1, 3, 4, 18, 1, 0, 1, 3, 4, 30,
+ 0, 0, 1, 3, 5, 26, 2, 0, 1, 3, 5, 18, 1, 0, 1, 3, 5, 30,
+ 0, 0, 1, 3, 6, 28, 2, 0, 1, 3, 6, 18, 1, 0, 1, 3, 6, 30,
+ 0, 0, 1, 3, 7, 26, 2, 0, 1, 3, 7, 18, 1, 0, 1, 3, 7, 30,
+ 0, 0, 1, 3, 8, 26, 2, 0, 1, 3, 8, 18, 1, 0, 1, 3, 8, 30,
+ 0, 0, 1, 3, 9, 26, 2, 0, 1, 3, 9, 18, 1, 0, 1, 3, 9, 30,
+ 0, 0, 1, 3, 10, 20, 2, 0, 1, 3, 10, 18, 1, 0, 1, 3, 10, 30,
+ 0, 0, 1, 3, 11, 14, 2, 0, 1, 3, 11, 18, 1, 0, 1, 3, 11, 30,
+ 0, 0, 1, 3, 12, 63, 2, 0, 1, 3, 12, 63, 1, 0, 1, 3, 12, 63,
+ 0, 0, 1, 3, 13, 63, 2, 0, 1, 3, 13, 63, 1, 0, 1, 3, 13, 63,
+ 0, 0, 1, 3, 14, 63, 2, 0, 1, 3, 14, 63, 1, 0, 1, 3, 14, 63,
+ 0, 1, 0, 1, 36, 30, 2, 1, 0, 1, 36, 32, 1, 1, 0, 1, 36, 30,
+ 0, 1, 0, 1, 40, 32, 2, 1, 0, 1, 40, 32, 1, 1, 0, 1, 40, 30,
+ 0, 1, 0, 1, 44, 32, 2, 1, 0, 1, 44, 32, 1, 1, 0, 1, 44, 30,
+ 0, 1, 0, 1, 48, 32, 2, 1, 0, 1, 48, 32, 1, 1, 0, 1, 48, 30,
+ 0, 1, 0, 1, 52, 32, 2, 1, 0, 1, 52, 32, 1, 1, 0, 1, 52, 28,
+ 0, 1, 0, 1, 56, 32, 2, 1, 0, 1, 56, 32, 1, 1, 0, 1, 56, 28,
+ 0, 1, 0, 1, 60, 32, 2, 1, 0, 1, 60, 32, 1, 1, 0, 1, 60, 28,
+ 0, 1, 0, 1, 64, 28, 2, 1, 0, 1, 64, 32, 1, 1, 0, 1, 64, 28,
+ 0, 1, 0, 1, 100, 26, 2, 1, 0, 1, 100, 32, 1, 1, 0, 1, 100, 32,
+ 0, 1, 0, 1, 104, 32, 2, 1, 0, 1, 104, 32, 1, 1, 0, 1, 104, 32,
+ 0, 1, 0, 1, 108, 32, 2, 1, 0, 1, 108, 32, 1, 1, 0, 1, 108, 32,
+ 0, 1, 0, 1, 112, 32, 2, 1, 0, 1, 112, 32, 1, 1, 0, 1, 112, 32,
+ 0, 1, 0, 1, 116, 32, 2, 1, 0, 1, 116, 32, 1, 1, 0, 1, 116, 32,
+ 0, 1, 0, 1, 120, 32, 2, 1, 0, 1, 120, 32, 1, 1, 0, 1, 120, 32,
+ 0, 1, 0, 1, 124, 32, 2, 1, 0, 1, 124, 32, 1, 1, 0, 1, 124, 32,
+ 0, 1, 0, 1, 128, 32, 2, 1, 0, 1, 128, 32, 1, 1, 0, 1, 128, 32,
+ 0, 1, 0, 1, 132, 32, 2, 1, 0, 1, 132, 32, 1, 1, 0, 1, 132, 32,
+ 0, 1, 0, 1, 136, 32, 2, 1, 0, 1, 136, 32, 1, 1, 0, 1, 136, 32,
+ 0, 1, 0, 1, 140, 28, 2, 1, 0, 1, 140, 32, 1, 1, 0, 1, 140, 32,
+ 0, 1, 0, 1, 144, 28, 2, 1, 0, 1, 144, 63, 1, 1, 0, 1, 144, 63,
+ 0, 1, 0, 1, 149, 32, 2, 1, 0, 1, 149, 63, 1, 1, 0, 1, 149, 63,
+ 0, 1, 0, 1, 153, 32, 2, 1, 0, 1, 153, 63, 1, 1, 0, 1, 153, 63,
+ 0, 1, 0, 1, 157, 32, 2, 1, 0, 1, 157, 63, 1, 1, 0, 1, 157, 63,
+ 0, 1, 0, 1, 161, 32, 2, 1, 0, 1, 161, 63, 1, 1, 0, 1, 161, 63,
+ 0, 1, 0, 1, 165, 32, 2, 1, 0, 1, 165, 63, 1, 1, 0, 1, 165, 63,
+ 0, 1, 0, 2, 36, 30, 2, 1, 0, 2, 36, 32, 1, 1, 0, 2, 36, 28,
+ 0, 1, 0, 2, 40, 32, 2, 1, 0, 2, 40, 32, 1, 1, 0, 2, 40, 28,
+ 0, 1, 0, 2, 44, 32, 2, 1, 0, 2, 44, 32, 1, 1, 0, 2, 44, 28,
+ 0, 1, 0, 2, 48, 32, 2, 1, 0, 2, 48, 32, 1, 1, 0, 2, 48, 28,
+ 0, 1, 0, 2, 52, 32, 2, 1, 0, 2, 52, 32, 1, 1, 0, 2, 52, 28,
+ 0, 1, 0, 2, 56, 32, 2, 1, 0, 2, 56, 32, 1, 1, 0, 2, 56, 28,
+ 0, 1, 0, 2, 60, 32, 2, 1, 0, 2, 60, 32, 1, 1, 0, 2, 60, 28,
+ 0, 1, 0, 2, 64, 28, 2, 1, 0, 2, 64, 32, 1, 1, 0, 2, 64, 28,
+ 0, 1, 0, 2, 100, 26, 2, 1, 0, 2, 100, 32, 1, 1, 0, 2, 100, 32,
+ 0, 1, 0, 2, 104, 32, 2, 1, 0, 2, 104, 32, 1, 1, 0, 2, 104, 32,
+ 0, 1, 0, 2, 108, 32, 2, 1, 0, 2, 108, 32, 1, 1, 0, 2, 108, 32,
+ 0, 1, 0, 2, 112, 32, 2, 1, 0, 2, 112, 32, 1, 1, 0, 2, 112, 32,
+ 0, 1, 0, 2, 116, 32, 2, 1, 0, 2, 116, 32, 1, 1, 0, 2, 116, 32,
+ 0, 1, 0, 2, 120, 32, 2, 1, 0, 2, 120, 32, 1, 1, 0, 2, 120, 32,
+ 0, 1, 0, 2, 124, 32, 2, 1, 0, 2, 124, 32, 1, 1, 0, 2, 124, 32,
+ 0, 1, 0, 2, 128, 32, 2, 1, 0, 2, 128, 32, 1, 1, 0, 2, 128, 32,
+ 0, 1, 0, 2, 132, 32, 2, 1, 0, 2, 132, 32, 1, 1, 0, 2, 132, 32,
+ 0, 1, 0, 2, 136, 32, 2, 1, 0, 2, 136, 32, 1, 1, 0, 2, 136, 32,
+ 0, 1, 0, 2, 140, 26, 2, 1, 0, 2, 140, 32, 1, 1, 0, 2, 140, 32,
+ 0, 1, 0, 2, 144, 26, 2, 1, 0, 2, 144, 63, 1, 1, 0, 2, 144, 63,
+ 0, 1, 0, 2, 149, 32, 2, 1, 0, 2, 149, 63, 1, 1, 0, 2, 149, 63,
+ 0, 1, 0, 2, 153, 32, 2, 1, 0, 2, 153, 63, 1, 1, 0, 2, 153, 63,
+ 0, 1, 0, 2, 157, 32, 2, 1, 0, 2, 157, 63, 1, 1, 0, 2, 157, 63,
+ 0, 1, 0, 2, 161, 32, 2, 1, 0, 2, 161, 63, 1, 1, 0, 2, 161, 63,
+ 0, 1, 0, 2, 165, 32, 2, 1, 0, 2, 165, 63, 1, 1, 0, 2, 165, 63,
+ 0, 1, 0, 3, 36, 28, 2, 1, 0, 3, 36, 20, 1, 1, 0, 3, 36, 22,
+ 0, 1, 0, 3, 40, 30, 2, 1, 0, 3, 40, 20, 1, 1, 0, 3, 40, 22,
+ 0, 1, 0, 3, 44, 30, 2, 1, 0, 3, 44, 20, 1, 1, 0, 3, 44, 22,
+ 0, 1, 0, 3, 48, 30, 2, 1, 0, 3, 48, 20, 1, 1, 0, 3, 48, 22,
+ 0, 1, 0, 3, 52, 30, 2, 1, 0, 3, 52, 20, 1, 1, 0, 3, 52, 22,
+ 0, 1, 0, 3, 56, 30, 2, 1, 0, 3, 56, 20, 1, 1, 0, 3, 56, 22,
+ 0, 1, 0, 3, 60, 30, 2, 1, 0, 3, 60, 20, 1, 1, 0, 3, 60, 22,
+ 0, 1, 0, 3, 64, 28, 2, 1, 0, 3, 64, 20, 1, 1, 0, 3, 64, 22,
+ 0, 1, 0, 3, 100, 26, 2, 1, 0, 3, 100, 20, 1, 1, 0, 3, 100, 30,
+ 0, 1, 0, 3, 104, 30, 2, 1, 0, 3, 104, 20, 1, 1, 0, 3, 104, 30,
+ 0, 1, 0, 3, 108, 32, 2, 1, 0, 3, 108, 20, 1, 1, 0, 3, 108, 30,
+ 0, 1, 0, 3, 112, 32, 2, 1, 0, 3, 112, 20, 1, 1, 0, 3, 112, 30,
+ 0, 1, 0, 3, 116, 32, 2, 1, 0, 3, 116, 20, 1, 1, 0, 3, 116, 30,
+ 0, 1, 0, 3, 120, 32, 2, 1, 0, 3, 120, 20, 1, 1, 0, 3, 120, 30,
+ 0, 1, 0, 3, 124, 32, 2, 1, 0, 3, 124, 20, 1, 1, 0, 3, 124, 30,
+ 0, 1, 0, 3, 128, 32, 2, 1, 0, 3, 128, 20, 1, 1, 0, 3, 128, 30,
+ 0, 1, 0, 3, 132, 32, 2, 1, 0, 3, 132, 20, 1, 1, 0, 3, 132, 30,
+ 0, 1, 0, 3, 136, 30, 2, 1, 0, 3, 136, 20, 1, 1, 0, 3, 136, 30,
+ 0, 1, 0, 3, 140, 26, 2, 1, 0, 3, 140, 20, 1, 1, 0, 3, 140, 30,
+ 0, 1, 0, 3, 144, 26, 2, 1, 0, 3, 144, 63, 1, 1, 0, 3, 144, 63,
+ 0, 1, 0, 3, 149, 32, 2, 1, 0, 3, 149, 63, 1, 1, 0, 3, 149, 63,
+ 0, 1, 0, 3, 153, 32, 2, 1, 0, 3, 153, 63, 1, 1, 0, 3, 153, 63,
+ 0, 1, 0, 3, 157, 32, 2, 1, 0, 3, 157, 63, 1, 1, 0, 3, 157, 63,
+ 0, 1, 0, 3, 161, 32, 2, 1, 0, 3, 161, 63, 1, 1, 0, 3, 161, 63,
+ 0, 1, 0, 3, 165, 32, 2, 1, 0, 3, 165, 63, 1, 1, 0, 3, 165, 63,
+ 0, 1, 1, 2, 38, 22, 2, 1, 1, 2, 38, 30, 1, 1, 1, 2, 38, 30,
+ 0, 1, 1, 2, 46, 30, 2, 1, 1, 2, 46, 30, 1, 1, 1, 2, 46, 30,
+ 0, 1, 1, 2, 54, 30, 2, 1, 1, 2, 54, 30, 1, 1, 1, 2, 54, 30,
+ 0, 1, 1, 2, 62, 24, 2, 1, 1, 2, 62, 30, 1, 1, 1, 2, 62, 30,
+ 0, 1, 1, 2, 102, 24, 2, 1, 1, 2, 102, 30, 1, 1, 1, 2, 102, 30,
+ 0, 1, 1, 2, 110, 30, 2, 1, 1, 2, 110, 30, 1, 1, 1, 2, 110, 30,
+ 0, 1, 1, 2, 118, 30, 2, 1, 1, 2, 118, 30, 1, 1, 1, 2, 118, 30,
+ 0, 1, 1, 2, 126, 30, 2, 1, 1, 2, 126, 30, 1, 1, 1, 2, 126, 30,
+ 0, 1, 1, 2, 134, 30, 2, 1, 1, 2, 134, 30, 1, 1, 1, 2, 134, 30,
+ 0, 1, 1, 2, 142, 30, 2, 1, 1, 2, 142, 63, 1, 1, 1, 2, 142, 63,
+ 0, 1, 1, 2, 151, 30, 2, 1, 1, 2, 151, 63, 1, 1, 1, 2, 151, 63,
+ 0, 1, 1, 2, 159, 30, 2, 1, 1, 2, 159, 63, 1, 1, 1, 2, 159, 63,
+ 0, 1, 1, 3, 38, 20, 2, 1, 1, 3, 38, 20, 1, 1, 1, 3, 38, 22,
+ 0, 1, 1, 3, 46, 30, 2, 1, 1, 3, 46, 20, 1, 1, 1, 3, 46, 22,
+ 0, 1, 1, 3, 54, 30, 2, 1, 1, 3, 54, 20, 1, 1, 1, 3, 54, 22,
+ 0, 1, 1, 3, 62, 22, 2, 1, 1, 3, 62, 20, 1, 1, 1, 3, 62, 22,
+ 0, 1, 1, 3, 102, 22, 2, 1, 1, 3, 102, 20, 1, 1, 1, 3, 102, 30,
+ 0, 1, 1, 3, 110, 30, 2, 1, 1, 3, 110, 20, 1, 1, 1, 3, 110, 30,
+ 0, 1, 1, 3, 118, 30, 2, 1, 1, 3, 118, 20, 1, 1, 1, 3, 118, 30,
+ 0, 1, 1, 3, 126, 30, 2, 1, 1, 3, 126, 20, 1, 1, 1, 3, 126, 30,
+ 0, 1, 1, 3, 134, 30, 2, 1, 1, 3, 134, 20, 1, 1, 1, 3, 134, 30,
+ 0, 1, 1, 3, 142, 30, 2, 1, 1, 3, 142, 63, 1, 1, 1, 3, 142, 63,
+ 0, 1, 1, 3, 151, 30, 2, 1, 1, 3, 151, 63, 1, 1, 1, 3, 151, 63,
+ 0, 1, 1, 3, 159, 30, 2, 1, 1, 3, 159, 63, 1, 1, 1, 3, 159, 63,
+ 0, 1, 2, 4, 42, 20, 2, 1, 2, 4, 42, 30, 1, 1, 2, 4, 42, 28,
+ 0, 1, 2, 4, 58, 20, 2, 1, 2, 4, 58, 30, 1, 1, 2, 4, 58, 28,
+ 0, 1, 2, 4, 106, 20, 2, 1, 2, 4, 106, 30, 1, 1, 2, 4, 106, 30,
+ 0, 1, 2, 4, 122, 30, 2, 1, 2, 4, 122, 30, 1, 1, 2, 4, 122, 30,
+ 0, 1, 2, 4, 138, 30, 2, 1, 2, 4, 138, 63, 1, 1, 2, 4, 138, 63,
+ 0, 1, 2, 4, 155, 30, 2, 1, 2, 4, 155, 63, 1, 1, 2, 4, 155, 63,
+ 0, 1, 2, 5, 42, 18, 2, 1, 2, 5, 42, 20, 1, 1, 2, 5, 42, 22,
+ 0, 1, 2, 5, 58, 18, 2, 1, 2, 5, 58, 20, 1, 1, 2, 5, 58, 22,
+ 0, 1, 2, 5, 106, 20, 2, 1, 2, 5, 106, 20, 1, 1, 2, 5, 106, 30,
+ 0, 1, 2, 5, 122, 30, 2, 1, 2, 5, 122, 20, 1, 1, 2, 5, 122, 30,
+ 0, 1, 2, 5, 138, 30, 2, 1, 2, 5, 138, 63, 1, 1, 2, 5, 138, 63,
+ 0, 1, 2, 5, 155, 30, 2, 1, 2, 5, 155, 63, 1, 1, 2, 5, 155, 63,
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8822b_txpwr_lmt_type5);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b_table.h b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.h
new file mode 100644
index 000000000000..d4c268889368
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b_table.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW8822B_TABLE_H__
+#define __RTW8822B_TABLE_H__
+
+extern const struct rtw_table rtw8822b_mac_tbl;
+extern const struct rtw_table rtw8822b_agc_tbl;
+extern const struct rtw_table rtw8822b_bb_tbl;
+extern const struct rtw_table rtw8822b_bb_pg_type2_tbl;
+extern const struct rtw_table rtw8822b_bb_pg_type5_tbl;
+extern const struct rtw_table rtw8822b_rf_a_tbl;
+extern const struct rtw_table rtw8822b_rf_b_tbl;
+extern const struct rtw_table rtw8822b_txpwr_lmt_type2_tbl;
+extern const struct rtw_table rtw8822b_txpwr_lmt_type5_tbl;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
new file mode 100644
index 000000000000..b4f7242e5aa3
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -0,0 +1,1890 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "fw.h"
+#include "tx.h"
+#include "rx.h"
+#include "phy.h"
+#include "rtw8822c.h"
+#include "rtw8822c_table.h"
+#include "mac.h"
+#include "reg.h"
+#include "debug.h"
+
+static void rtw8822c_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
+ u8 rx_path, bool is_tx2_path);
+
+static void rtw8822ce_efuse_parsing(struct rtw_efuse *efuse,
+ struct rtw8822c_efuse *map)
+{
+ ether_addr_copy(efuse->addr, map->e.mac_addr);
+}
+
+static int rtw8822c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw8822c_efuse *map;
+ int i;
+
+ map = (struct rtw8822c_efuse *)log_map;
+
+ efuse->rfe_option = map->rfe_option;
+ efuse->crystal_cap = map->xtal_k;
+ efuse->channel_plan = map->channel_plan;
+ efuse->country_code[0] = map->country_code[0];
+ efuse->country_code[1] = map->country_code[1];
+ efuse->bt_setting = map->rf_bt_setting;
+ efuse->regd = map->rf_board_option & 0x7;
+
+ for (i = 0; i < 4; i++)
+ efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
+
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_PCIE:
+ rtw8822ce_efuse_parsing(efuse, map);
+ break;
+ default:
+ /* unsupported now */
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static void rtw8822c_header_file_init(struct rtw_dev *rtwdev, bool pre)
+{
+ rtw_write32_set(rtwdev, REG_3WIRE, BIT_3WIRE_TX_EN | BIT_3WIRE_RX_EN);
+ rtw_write32_set(rtwdev, REG_3WIRE, BIT_3WIRE_PI_ON);
+ rtw_write32_set(rtwdev, REG_3WIRE2, BIT_3WIRE_TX_EN | BIT_3WIRE_RX_EN);
+ rtw_write32_set(rtwdev, REG_3WIRE2, BIT_3WIRE_PI_ON);
+
+ if (pre)
+ rtw_write32_clr(rtwdev, REG_ENCCK, BIT_CCK_OFDM_BLK_EN);
+ else
+ rtw_write32_set(rtwdev, REG_ENCCK, BIT_CCK_OFDM_BLK_EN);
+}
+
+static void rtw8822c_dac_backup_reg(struct rtw_dev *rtwdev,
+ struct rtw_backup_info *backup,
+ struct rtw_backup_info *backup_rf)
+{
+ u32 path, i;
+ u32 val;
+ u32 reg;
+ u32 rf_addr[DACK_RF_8822C] = {0x8f};
+ u32 addrs[DACK_REG_8822C] = {0x180c, 0x1810, 0x410c, 0x4110,
+ 0x1c3c, 0x1c24, 0x1d70, 0x9b4,
+ 0x1a00, 0x1a14, 0x1d58, 0x1c38,
+ 0x1e24, 0x1e28, 0x1860, 0x4160};
+
+ for (i = 0; i < DACK_REG_8822C; i++) {
+ backup[i].len = 4;
+ backup[i].reg = addrs[i];
+ backup[i].val = rtw_read32(rtwdev, addrs[i]);
+ }
+
+ for (path = 0; path < DACK_PATH_8822C; path++) {
+ for (i = 0; i < DACK_RF_8822C; i++) {
+ reg = rf_addr[i];
+ val = rtw_read_rf(rtwdev, path, reg, RFREG_MASK);
+ backup_rf[path * i + i].reg = reg;
+ backup_rf[path * i + i].val = val;
+ }
+ }
+}
+
+static void rtw8822c_dac_restore_reg(struct rtw_dev *rtwdev,
+ struct rtw_backup_info *backup,
+ struct rtw_backup_info *backup_rf)
+{
+ u32 path, i;
+ u32 val;
+ u32 reg;
+
+ rtw_restore_reg(rtwdev, backup, DACK_REG_8822C);
+
+ for (path = 0; path < DACK_PATH_8822C; path++) {
+ for (i = 0; i < DACK_RF_8822C; i++) {
+ val = backup_rf[path * i + i].val;
+ reg = backup_rf[path * i + i].reg;
+ rtw_write_rf(rtwdev, path, reg, RFREG_MASK, val);
+ }
+ }
+}
+
+static void rtw8822c_rf_minmax_cmp(struct rtw_dev *rtwdev, u32 value,
+ u32 *min, u32 *max)
+{
+ if (value >= 0x200) {
+ if (*min >= 0x200) {
+ if (*min > value)
+ *min = value;
+ } else {
+ *min = value;
+ }
+ if (*max >= 0x200) {
+ if (*max < value)
+ *max = value;
+ }
+ } else {
+ if (*min < 0x200) {
+ if (*min > value)
+ *min = value;
+ }
+
+ if (*max >= 0x200) {
+ *max = value;
+ } else {
+ if (*max < value)
+ *max = value;
+ }
+ }
+}
+
+static void swap_u32(u32 *v1, u32 *v2)
+{
+ u32 tmp;
+
+ tmp = *v1;
+ *v1 = *v2;
+ *v2 = tmp;
+}
+
+static void __rtw8822c_dac_iq_sort(struct rtw_dev *rtwdev, u32 *v1, u32 *v2)
+{
+ if (*v1 >= 0x200 && *v2 >= 0x200) {
+ if (*v1 > *v2)
+ swap_u32(v1, v2);
+ } else if (*v1 < 0x200 && *v2 < 0x200) {
+ if (*v1 > *v2)
+ swap_u32(v1, v2);
+ } else if (*v1 < 0x200 && *v2 >= 0x200) {
+ swap_u32(v1, v2);
+ }
+}
+
+static void rtw8822c_dac_iq_sort(struct rtw_dev *rtwdev, u32 *iv, u32 *qv)
+{
+ u32 i, j;
+
+ for (i = 0; i < DACK_SN_8822C - 1; i++) {
+ for (j = 0; j < (DACK_SN_8822C - 1 - i) ; j++) {
+ __rtw8822c_dac_iq_sort(rtwdev, &iv[j], &iv[j + 1]);
+ __rtw8822c_dac_iq_sort(rtwdev, &qv[j], &qv[j + 1]);
+ }
+ }
+}
+
+static void rtw8822c_dac_iq_offset(struct rtw_dev *rtwdev, u32 *vec, u32 *val)
+{
+ u32 p, m, t, i;
+
+ m = 0;
+ p = 0;
+ for (i = 10; i < DACK_SN_8822C - 10; i++) {
+ if (vec[i] > 0x200)
+ m = (0x400 - vec[i]) + m;
+ else
+ p = vec[i] + p;
+ }
+
+ if (p > m) {
+ t = p - m;
+ t = t / (DACK_SN_8822C - 20);
+ } else {
+ t = m - p;
+ t = t / (DACK_SN_8822C - 20);
+ if (t != 0x0)
+ t = 0x400 - t;
+ }
+
+ *val = t;
+}
+
+static u32 rtw8822c_get_path_base_addr(u8 path)
+{
+ u32 base_addr;
+
+ switch (path) {
+ case RF_PATH_A:
+ base_addr = 0x1800;
+ break;
+ case RF_PATH_B:
+ base_addr = 0x4100;
+ break;
+ default:
+ WARN_ON(1);
+ return -1;
+ }
+
+ return base_addr;
+}
+
+static bool rtw8822c_dac_iq_check(struct rtw_dev *rtwdev, u32 value)
+{
+ bool ret = true;
+
+ if ((value >= 0x200 && (0x400 - value) > 0x64) ||
+ (value < 0x200 && value > 0x64)) {
+ ret = false;
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] Error overflow\n");
+ }
+
+ return ret;
+}
+
+static void rtw8822c_dac_cal_iq_sample(struct rtw_dev *rtwdev, u32 *iv, u32 *qv)
+{
+ u32 temp;
+ int i = 0, cnt = 0;
+
+ while (i < DACK_SN_8822C && cnt < 10000) {
+ cnt++;
+ temp = rtw_read32_mask(rtwdev, 0x2dbc, 0x3fffff);
+ iv[i] = (temp & 0x3ff000) >> 12;
+ qv[i] = temp & 0x3ff;
+
+ if (rtw8822c_dac_iq_check(rtwdev, iv[i]) &&
+ rtw8822c_dac_iq_check(rtwdev, qv[i]))
+ i++;
+ }
+}
+
+static void rtw8822c_dac_cal_iq_search(struct rtw_dev *rtwdev,
+ u32 *iv, u32 *qv,
+ u32 *i_value, u32 *q_value)
+{
+ u32 i_max = 0, q_max = 0, i_min = 0, q_min = 0;
+ u32 i_delta, q_delta;
+ u32 temp;
+ int i, cnt = 0;
+
+ do {
+ i_min = iv[0];
+ i_max = iv[0];
+ q_min = qv[0];
+ q_max = qv[0];
+ for (i = 0; i < DACK_SN_8822C; i++) {
+ rtw8822c_rf_minmax_cmp(rtwdev, iv[i], &i_min, &i_max);
+ rtw8822c_rf_minmax_cmp(rtwdev, qv[i], &q_min, &q_max);
+ }
+
+ if (i_max < 0x200 && i_min < 0x200)
+ i_delta = i_max - i_min;
+ else if (i_max >= 0x200 && i_min >= 0x200)
+ i_delta = i_max - i_min;
+ else
+ i_delta = i_max + (0x400 - i_min);
+
+ if (q_max < 0x200 && q_min < 0x200)
+ q_delta = q_max - q_min;
+ else if (q_max >= 0x200 && q_min >= 0x200)
+ q_delta = q_max - q_min;
+ else
+ q_delta = q_max + (0x400 - q_min);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[DACK] i: min=0x%08x, max=0x%08x, delta=0x%08x\n",
+ i_min, i_max, i_delta);
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[DACK] q: min=0x%08x, max=0x%08x, delta=0x%08x\n",
+ q_min, q_max, q_delta);
+
+ rtw8822c_dac_iq_sort(rtwdev, iv, qv);
+
+ if (i_delta > 5 || q_delta > 5) {
+ temp = rtw_read32_mask(rtwdev, 0x2dbc, 0x3fffff);
+ iv[0] = (temp & 0x3ff000) >> 12;
+ qv[0] = temp & 0x3ff;
+ temp = rtw_read32_mask(rtwdev, 0x2dbc, 0x3fffff);
+ iv[DACK_SN_8822C - 1] = (temp & 0x3ff000) >> 12;
+ qv[DACK_SN_8822C - 1] = temp & 0x3ff;
+ } else {
+ break;
+ }
+ } while (cnt++ < 100);
+
+ rtw8822c_dac_iq_offset(rtwdev, iv, i_value);
+ rtw8822c_dac_iq_offset(rtwdev, qv, q_value);
+}
+
+static void rtw8822c_dac_cal_rf_mode(struct rtw_dev *rtwdev,
+ u32 *i_value, u32 *q_value)
+{
+ u32 iv[DACK_SN_8822C], qv[DACK_SN_8822C];
+ u32 rf_a, rf_b;
+
+ mdelay(10);
+
+ rf_a = rtw_read_rf(rtwdev, RF_PATH_A, 0x0, RFREG_MASK);
+ rf_b = rtw_read_rf(rtwdev, RF_PATH_B, 0x0, RFREG_MASK);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] RF path-A=0x%05x\n", rf_a);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] RF path-B=0x%05x\n", rf_b);
+
+ rtw8822c_dac_cal_iq_sample(rtwdev, iv, qv);
+ rtw8822c_dac_cal_iq_search(rtwdev, iv, qv, i_value, q_value);
+}
+
+static void rtw8822c_dac_bb_setting(struct rtw_dev *rtwdev)
+{
+ rtw_write32_mask(rtwdev, 0x1d58, 0xff8, 0x1ff);
+ rtw_write32_mask(rtwdev, 0x1a00, 0x3, 0x2);
+ rtw_write32_mask(rtwdev, 0x1a14, 0x300, 0x3);
+ rtw_write32(rtwdev, 0x1d70, 0x7e7e7e7e);
+ rtw_write32_mask(rtwdev, 0x180c, 0x3, 0x0);
+ rtw_write32_mask(rtwdev, 0x410c, 0x3, 0x0);
+ rtw_write32(rtwdev, 0x1b00, 0x00000008);
+ rtw_write8(rtwdev, 0x1bcc, 0x3f);
+ rtw_write32(rtwdev, 0x1b00, 0x0000000a);
+ rtw_write8(rtwdev, 0x1bcc, 0x3f);
+ rtw_write32_mask(rtwdev, 0x1e24, BIT(31), 0x0);
+ rtw_write32_mask(rtwdev, 0x1e28, 0xf, 0x3);
+}
+
+static void rtw8822c_dac_cal_adc(struct rtw_dev *rtwdev,
+ u8 path, u32 *adc_ic, u32 *adc_qc)
+{
+ u32 ic = 0, qc = 0, temp = 0;
+ u32 base_addr;
+ u32 path_sel;
+ int i;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] ADCK path(%d)\n", path);
+
+ base_addr = rtw8822c_get_path_base_addr(path);
+ switch (path) {
+ case RF_PATH_A:
+ path_sel = 0xa0000;
+ break;
+ case RF_PATH_B:
+ path_sel = 0x80000;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ /* ADCK step1 */
+ rtw_write32_mask(rtwdev, base_addr + 0x30, BIT(30), 0x0);
+ if (path == RF_PATH_B)
+ rtw_write32(rtwdev, base_addr + 0x30, 0x30db8041);
+ rtw_write32(rtwdev, base_addr + 0x60, 0xf0040ff0);
+ rtw_write32(rtwdev, base_addr + 0x0c, 0xdff00220);
+ rtw_write32(rtwdev, base_addr + 0x10, 0x02dd08c4);
+ rtw_write32(rtwdev, base_addr + 0x0c, 0x10000260);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x0, RFREG_MASK, 0x10000);
+ rtw_write_rf(rtwdev, RF_PATH_B, 0x0, RFREG_MASK, 0x10000);
+ for (i = 0; i < 10; i++) {
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] ADCK count=%d\n", i);
+ rtw_write32(rtwdev, 0x1c3c, path_sel + 0x8003);
+ rtw_write32(rtwdev, 0x1c24, 0x00010002);
+ rtw8822c_dac_cal_rf_mode(rtwdev, &ic, &qc);
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[DACK] before: i=0x%x, q=0x%x\n", ic, qc);
+
+ /* compensation value */
+ if (ic != 0x0) {
+ ic = 0x400 - ic;
+ *adc_ic = ic;
+ }
+ if (qc != 0x0) {
+ qc = 0x400 - qc;
+ *adc_qc = qc;
+ }
+ temp = (ic & 0x3ff) | ((qc & 0x3ff) << 10);
+ rtw_write32(rtwdev, base_addr + 0x68, temp);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] ADCK 0x%08x=0x08%x\n",
+ base_addr + 0x68, temp);
+ /* check ADC DC offset */
+ rtw_write32(rtwdev, 0x1c3c, path_sel + 0x8103);
+ rtw8822c_dac_cal_rf_mode(rtwdev, &ic, &qc);
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[DACK] after: i=0x%08x, q=0x%08x\n", ic, qc);
+ if (ic >= 0x200)
+ ic = 0x400 - ic;
+ if (qc >= 0x200)
+ qc = 0x400 - qc;
+ if (ic < 5 && qc < 5)
+ break;
+ }
+
+ /* ADCK step2 */
+ rtw_write32(rtwdev, 0x1c3c, 0x00000003);
+ rtw_write32(rtwdev, base_addr + 0x0c, 0x10000260);
+ rtw_write32(rtwdev, base_addr + 0x10, 0x02d508c4);
+
+ /* release pull low switch on IQ path */
+ rtw_write_rf(rtwdev, path, 0x8f, BIT(13), 0x1);
+}
+
+static void rtw8822c_dac_cal_step1(struct rtw_dev *rtwdev, u8 path)
+{
+ u32 base_addr;
+
+ base_addr = rtw8822c_get_path_base_addr(path);
+
+ rtw_write32(rtwdev, base_addr + 0x0c, 0xdff00220);
+ if (path == RF_PATH_A) {
+ rtw_write32(rtwdev, base_addr + 0x60, 0xf0040ff0);
+ rtw_write32(rtwdev, 0x1c38, 0xffffffff);
+ }
+ rtw_write32(rtwdev, base_addr + 0x10, 0x02d508c5);
+ rtw_write32(rtwdev, 0x9b4, 0xdb66db00);
+ rtw_write32(rtwdev, base_addr + 0xb0, 0x0a11fb88);
+ rtw_write32(rtwdev, base_addr + 0xbc, 0x0008ff81);
+ rtw_write32(rtwdev, base_addr + 0xc0, 0x0003d208);
+ rtw_write32(rtwdev, base_addr + 0xcc, 0x0a11fb88);
+ rtw_write32(rtwdev, base_addr + 0xd8, 0x0008ff81);
+ rtw_write32(rtwdev, base_addr + 0xdc, 0x0003d208);
+ rtw_write32(rtwdev, base_addr + 0xb8, 0x60000000);
+ mdelay(2);
+ rtw_write32(rtwdev, base_addr + 0xbc, 0x000aff8d);
+ mdelay(2);
+ rtw_write32(rtwdev, base_addr + 0xb0, 0x0a11fb89);
+ rtw_write32(rtwdev, base_addr + 0xcc, 0x0a11fb89);
+ mdelay(1);
+ rtw_write32(rtwdev, base_addr + 0xb8, 0x62000000);
+ mdelay(20);
+ rtw_write32(rtwdev, base_addr + 0xd4, 0x62000000);
+ mdelay(20);
+ rtw_write32(rtwdev, base_addr + 0xb8, 0x02000000);
+ mdelay(20);
+ rtw_write32(rtwdev, base_addr + 0xbc, 0x0008ff87);
+ rtw_write32(rtwdev, 0x9b4, 0xdb6db600);
+ rtw_write32(rtwdev, base_addr + 0x10, 0x02d508c5);
+ rtw_write32(rtwdev, base_addr + 0xbc, 0x0008ff87);
+ rtw_write32(rtwdev, base_addr + 0x60, 0xf0000000);
+}
+
+static void rtw8822c_dac_cal_step2(struct rtw_dev *rtwdev,
+ u8 path, u32 *ic_out, u32 *qc_out)
+{
+ u32 base_addr;
+ u32 ic, qc, ic_in, qc_in;
+
+ base_addr = rtw8822c_get_path_base_addr(path);
+ rtw_write32_mask(rtwdev, base_addr + 0xbc, 0xf0000000, 0x0);
+ rtw_write32_mask(rtwdev, base_addr + 0xc0, 0xf, 0x8);
+ rtw_write32_mask(rtwdev, base_addr + 0xd8, 0xf0000000, 0x0);
+ rtw_write32_mask(rtwdev, base_addr + 0xdc, 0xf, 0x8);
+
+ rtw_write32(rtwdev, 0x1b00, 0x00000008);
+ rtw_write8(rtwdev, 0x1bcc, 0x03f);
+ rtw_write32(rtwdev, base_addr + 0x0c, 0xdff00220);
+ rtw_write32(rtwdev, base_addr + 0x10, 0x02d508c5);
+ rtw_write32(rtwdev, 0x1c3c, 0x00088103);
+
+ rtw8822c_dac_cal_rf_mode(rtwdev, &ic_in, &qc_in);
+ ic = ic_in;
+ qc = qc_in;
+
+ /* compensation value */
+ if (ic != 0x0)
+ ic = 0x400 - ic;
+ if (qc != 0x0)
+ qc = 0x400 - qc;
+ if (ic < 0x300) {
+ ic = ic * 2 * 6 / 5;
+ ic = ic + 0x80;
+ } else {
+ ic = (0x400 - ic) * 2 * 6 / 5;
+ ic = 0x7f - ic;
+ }
+ if (qc < 0x300) {
+ qc = qc * 2 * 6 / 5;
+ qc = qc + 0x80;
+ } else {
+ qc = (0x400 - qc) * 2 * 6 / 5;
+ qc = 0x7f - qc;
+ }
+
+ *ic_out = ic;
+ *qc_out = qc;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] before i=0x%x, q=0x%x\n", ic_in, qc_in);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] after i=0x%x, q=0x%x\n", ic, qc);
+}
+
+static void rtw8822c_dac_cal_step3(struct rtw_dev *rtwdev, u8 path,
+ u32 adc_ic, u32 adc_qc,
+ u32 *ic_in, u32 *qc_in,
+ u32 *i_out, u32 *q_out)
+{
+ u32 base_addr;
+ u32 ic, qc;
+ u32 temp;
+
+ base_addr = rtw8822c_get_path_base_addr(path);
+ ic = *ic_in;
+ qc = *qc_in;
+
+ rtw_write32(rtwdev, base_addr + 0x0c, 0xdff00220);
+ rtw_write32(rtwdev, base_addr + 0x10, 0x02d508c5);
+ rtw_write32(rtwdev, 0x9b4, 0xdb66db00);
+ rtw_write32(rtwdev, base_addr + 0xb0, 0x0a11fb88);
+ rtw_write32(rtwdev, base_addr + 0xbc, 0xc008ff81);
+ rtw_write32(rtwdev, base_addr + 0xc0, 0x0003d208);
+ rtw_write32_mask(rtwdev, base_addr + 0xbc, 0xf0000000, ic & 0xf);
+ rtw_write32_mask(rtwdev, base_addr + 0xc0, 0xf, (ic & 0xf0) >> 4);
+ rtw_write32(rtwdev, base_addr + 0xcc, 0x0a11fb88);
+ rtw_write32(rtwdev, base_addr + 0xd8, 0xe008ff81);
+ rtw_write32(rtwdev, base_addr + 0xdc, 0x0003d208);
+ rtw_write32_mask(rtwdev, base_addr + 0xd8, 0xf0000000, qc & 0xf);
+ rtw_write32_mask(rtwdev, base_addr + 0xdc, 0xf, (qc & 0xf0) >> 4);
+ rtw_write32(rtwdev, base_addr + 0xb8, 0x60000000);
+ mdelay(2);
+ rtw_write32_mask(rtwdev, base_addr + 0xbc, 0xe, 0x6);
+ mdelay(2);
+ rtw_write32(rtwdev, base_addr + 0xb0, 0x0a11fb89);
+ rtw_write32(rtwdev, base_addr + 0xcc, 0x0a11fb89);
+ mdelay(1);
+ rtw_write32(rtwdev, base_addr + 0xb8, 0x62000000);
+ mdelay(20);
+ rtw_write32(rtwdev, base_addr + 0xd4, 0x62000000);
+ mdelay(20);
+ rtw_write32(rtwdev, base_addr + 0xb8, 0x02000000);
+ mdelay(20);
+ rtw_write32_mask(rtwdev, base_addr + 0xbc, 0xe, 0x3);
+ rtw_write32(rtwdev, 0x9b4, 0xdb6db600);
+
+ /* check DAC DC offset */
+ temp = ((adc_ic + 0x10) & 0x3ff) | (((adc_qc + 0x10) & 0x3ff) << 10);
+ rtw_write32(rtwdev, base_addr + 0x68, temp);
+ rtw_write32(rtwdev, base_addr + 0x10, 0x02d508c5);
+ rtw_write32(rtwdev, base_addr + 0x60, 0xf0000000);
+ rtw8822c_dac_cal_rf_mode(rtwdev, &ic, &qc);
+ if (ic >= 0x10)
+ ic = ic - 0x10;
+ else
+ ic = 0x400 - (0x10 - ic);
+
+ if (qc >= 0x10)
+ qc = qc - 0x10;
+ else
+ qc = 0x400 - (0x10 - qc);
+
+ *i_out = ic;
+ *q_out = qc;
+
+ if (ic >= 0x200)
+ ic = 0x400 - ic;
+ if (qc >= 0x200)
+ qc = 0x400 - qc;
+
+ *ic_in = ic;
+ *qc_in = qc;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[DACK] after DACK i=0x%x, q=0x%x\n", *i_out, *q_out);
+}
+
+static void rtw8822c_dac_cal_step4(struct rtw_dev *rtwdev, u8 path)
+{
+ u32 base_addr = rtw8822c_get_path_base_addr(path);
+
+ rtw_write32(rtwdev, base_addr + 0x68, 0x0);
+ rtw_write32(rtwdev, base_addr + 0x10, 0x02d508c4);
+ rtw_write32_mask(rtwdev, base_addr + 0xbc, 0x1, 0x0);
+ rtw_write32_mask(rtwdev, base_addr + 0x30, BIT(30), 0x1);
+}
+
+static void rtw8822c_rf_dac_cal(struct rtw_dev *rtwdev)
+{
+ struct rtw_backup_info backup_rf[DACK_RF_8822C * DACK_PATH_8822C];
+ struct rtw_backup_info backup[DACK_REG_8822C];
+ u32 ic = 0, qc = 0, i;
+ u32 i_a = 0x0, q_a = 0x0, i_b = 0x0, q_b = 0x0;
+ u32 ic_a = 0x0, qc_a = 0x0, ic_b = 0x0, qc_b = 0x0;
+ u32 adc_ic_a = 0x0, adc_qc_a = 0x0, adc_ic_b = 0x0, adc_qc_b = 0x0;
+
+ rtw8822c_dac_backup_reg(rtwdev, backup, backup_rf);
+
+ rtw8822c_dac_bb_setting(rtwdev);
+
+ /* path-A */
+ rtw8822c_dac_cal_adc(rtwdev, RF_PATH_A, &adc_ic_a, &adc_qc_a);
+ for (i = 0; i < 10; i++) {
+ rtw8822c_dac_cal_step1(rtwdev, RF_PATH_A);
+ rtw8822c_dac_cal_step2(rtwdev, RF_PATH_A, &ic, &qc);
+ ic_a = ic;
+ qc_a = qc;
+
+ rtw8822c_dac_cal_step3(rtwdev, RF_PATH_A, adc_ic_a, adc_qc_a,
+ &ic, &qc, &i_a, &q_a);
+
+ if (ic < 5 && qc < 5)
+ break;
+ }
+ rtw8822c_dac_cal_step4(rtwdev, RF_PATH_A);
+
+ /* path-B */
+ rtw8822c_dac_cal_adc(rtwdev, RF_PATH_B, &adc_ic_b, &adc_qc_b);
+ for (i = 0; i < 10; i++) {
+ rtw8822c_dac_cal_step1(rtwdev, RF_PATH_B);
+ rtw8822c_dac_cal_step2(rtwdev, RF_PATH_B, &ic, &qc);
+ ic_b = ic;
+ qc_b = qc;
+
+ rtw8822c_dac_cal_step3(rtwdev, RF_PATH_B, adc_ic_b, adc_qc_b,
+ &ic, &qc, &i_b, &q_b);
+
+ if (ic < 5 && qc < 5)
+ break;
+ }
+ rtw8822c_dac_cal_step4(rtwdev, RF_PATH_B);
+
+ rtw_write32(rtwdev, 0x1b00, 0x00000008);
+ rtw_write32_mask(rtwdev, 0x4130, BIT(30), 0x1);
+ rtw_write8(rtwdev, 0x1bcc, 0x0);
+ rtw_write32(rtwdev, 0x1b00, 0x0000000a);
+ rtw_write8(rtwdev, 0x1bcc, 0x0);
+
+ rtw8822c_dac_restore_reg(rtwdev, backup, backup_rf);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] path A: ic=0x%x, qc=0x%x\n", ic_a, qc_a);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] path B: ic=0x%x, qc=0x%x\n", ic_b, qc_b);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] path A: i=0x%x, q=0x%x\n", i_a, q_a);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] path B: i=0x%x, q=0x%x\n", i_b, q_b);
+}
+
+static void rtw8822c_rf_x2_check(struct rtw_dev *rtwdev)
+{
+ u8 x2k_busy;
+
+ mdelay(1);
+ x2k_busy = rtw_read_rf(rtwdev, RF_PATH_A, 0xb8, BIT(15));
+ if (x2k_busy == 1) {
+ rtw_write_rf(rtwdev, RF_PATH_A, 0xb8, RFREG_MASK, 0xC4440);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0xba, RFREG_MASK, 0x6840D);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0xb8, RFREG_MASK, 0x80440);
+ mdelay(1);
+ }
+}
+
+static void rtw8822c_rf_init(struct rtw_dev *rtwdev)
+{
+ rtw8822c_rf_dac_cal(rtwdev);
+ rtw8822c_rf_x2_check(rtwdev);
+}
+
+static void rtw8822c_phy_set_param(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 crystal_cap;
+ u8 cck_gi_u_bnd_msb = 0;
+ u8 cck_gi_u_bnd_lsb = 0;
+ u8 cck_gi_l_bnd_msb = 0;
+ u8 cck_gi_l_bnd_lsb = 0;
+ bool is_tx2_path;
+
+ /* power on BB/RF domain */
+ rtw_write8_set(rtwdev, REG_SYS_FUNC_EN,
+ BIT_FEN_BB_GLB_RST | BIT_FEN_BB_RSTB);
+ rtw_write8_set(rtwdev, REG_RF_CTRL,
+ BIT_RF_EN | BIT_RF_RSTB | BIT_RF_SDM_RSTB);
+ rtw_write32_set(rtwdev, REG_WLRF1, BIT_WLRF1_BBRF_EN);
+
+ /* pre init before header files config */
+ rtw8822c_header_file_init(rtwdev, true);
+
+ rtw_phy_load_tables(rtwdev);
+
+ crystal_cap = rtwdev->efuse.crystal_cap & 0x7f;
+ rtw_write32_mask(rtwdev, REG_ANAPAR_XTAL_0, 0xfffc00,
+ crystal_cap | (crystal_cap << 7));
+
+ /* post init after header files config */
+ rtw8822c_header_file_init(rtwdev, false);
+
+ is_tx2_path = false;
+ rtw8822c_config_trx_mode(rtwdev, hal->antenna_tx, hal->antenna_rx,
+ is_tx2_path);
+ rtw_phy_init(rtwdev);
+
+ cck_gi_u_bnd_msb = (u8)rtw_read32_mask(rtwdev, 0x1a98, 0xc000);
+ cck_gi_u_bnd_lsb = (u8)rtw_read32_mask(rtwdev, 0x1aa8, 0xf0000);
+ cck_gi_l_bnd_msb = (u8)rtw_read32_mask(rtwdev, 0x1a98, 0xc0);
+ cck_gi_l_bnd_lsb = (u8)rtw_read32_mask(rtwdev, 0x1a70, 0x0f000000);
+
+ dm_info->cck_gi_u_bnd = ((cck_gi_u_bnd_msb << 4) | (cck_gi_u_bnd_lsb));
+ dm_info->cck_gi_l_bnd = ((cck_gi_l_bnd_msb << 4) | (cck_gi_l_bnd_lsb));
+
+ rtw8822c_rf_init(rtwdev);
+ /* wifi path controller */
+ rtw_write32_mask(rtwdev, 0x70, 0xff000000, 0x0e);
+ rtw_write32_mask(rtwdev, 0x1704, 0xffffffff, 0x7700);
+ rtw_write32_mask(rtwdev, 0x1700, 0xffffffff, 0xc00f0038);
+ rtw_write32_mask(rtwdev, 0x6c0, 0xffffffff, 0xaaaaaaaa);
+ rtw_write32_mask(rtwdev, 0x6c4, 0xffffffff, 0xaaaaaaaa);
+}
+
+#define WLAN_TXQ_RPT_EN 0x1F
+#define WLAN_SLOT_TIME 0x09
+#define WLAN_PIFS_TIME 0x1C
+#define WLAN_SIFS_CCK_CONT_TX 0x0A
+#define WLAN_SIFS_OFDM_CONT_TX 0x0E
+#define WLAN_SIFS_CCK_TRX 0x0A
+#define WLAN_SIFS_OFDM_TRX 0x10
+#define WLAN_NAV_MAX 0xC8
+#define WLAN_RDG_NAV 0x05
+#define WLAN_TXOP_NAV 0x1B
+#define WLAN_CCK_RX_TSF 0x30
+#define WLAN_OFDM_RX_TSF 0x30
+#define WLAN_TBTT_PROHIBIT 0x04 /* unit : 32us */
+#define WLAN_TBTT_HOLD_TIME 0x064 /* unit : 32us */
+#define WLAN_DRV_EARLY_INT 0x04
+#define WLAN_BCN_CTRL_CLT0 0x10
+#define WLAN_BCN_DMA_TIME 0x02
+#define WLAN_BCN_MAX_ERR 0xFF
+#define WLAN_SIFS_CCK_DUR_TUNE 0x0A
+#define WLAN_SIFS_OFDM_DUR_TUNE 0x10
+#define WLAN_SIFS_CCK_CTX 0x0A
+#define WLAN_SIFS_CCK_IRX 0x0A
+#define WLAN_SIFS_OFDM_CTX 0x0E
+#define WLAN_SIFS_OFDM_IRX 0x0E
+#define WLAN_EIFS_DUR_TUNE 0x40
+#define WLAN_EDCA_VO_PARAM 0x002FA226
+#define WLAN_EDCA_VI_PARAM 0x005EA328
+#define WLAN_EDCA_BE_PARAM 0x005EA42B
+#define WLAN_EDCA_BK_PARAM 0x0000A44F
+
+#define WLAN_RX_FILTER0 0xFFFFFFFF
+#define WLAN_RX_FILTER2 0xFFFF
+#define WLAN_RCR_CFG 0xE400220E
+#define WLAN_RXPKT_MAX_SZ 12288
+#define WLAN_RXPKT_MAX_SZ_512 (WLAN_RXPKT_MAX_SZ >> 9)
+
+#define WLAN_AMPDU_MAX_TIME 0x70
+#define WLAN_RTS_LEN_TH 0xFF
+#define WLAN_RTS_TX_TIME_TH 0x08
+#define WLAN_MAX_AGG_PKT_LIMIT 0x20
+#define WLAN_RTS_MAX_AGG_PKT_LIMIT 0x20
+#define WLAN_PRE_TXCNT_TIME_TH 0x1E0
+#define FAST_EDCA_VO_TH 0x06
+#define FAST_EDCA_VI_TH 0x06
+#define FAST_EDCA_BE_TH 0x06
+#define FAST_EDCA_BK_TH 0x06
+#define WLAN_BAR_RETRY_LIMIT 0x01
+#define WLAN_BAR_ACK_TYPE 0x05
+#define WLAN_RA_TRY_RATE_AGG_LIMIT 0x08
+#define WLAN_RESP_TXRATE 0x84
+#define WLAN_ACK_TO 0x21
+#define WLAN_ACK_TO_CCK 0x6A
+#define WLAN_DATA_RATE_FB_CNT_1_4 0x01000000
+#define WLAN_DATA_RATE_FB_CNT_5_8 0x08070504
+#define WLAN_RTS_RATE_FB_CNT_5_8 0x08070504
+#define WLAN_DATA_RATE_FB_RATE0 0xFE01F010
+#define WLAN_DATA_RATE_FB_RATE0_H 0x40000000
+#define WLAN_RTS_RATE_FB_RATE1 0x003FF010
+#define WLAN_RTS_RATE_FB_RATE1_H 0x40000000
+#define WLAN_RTS_RATE_FB_RATE4 0x0600F010
+#define WLAN_RTS_RATE_FB_RATE4_H 0x400003E0
+#define WLAN_RTS_RATE_FB_RATE5 0x0600F015
+#define WLAN_RTS_RATE_FB_RATE5_H 0x000000E0
+
+#define WLAN_TX_FUNC_CFG1 0x30
+#define WLAN_TX_FUNC_CFG2 0x30
+#define WLAN_MAC_OPT_NORM_FUNC1 0x98
+#define WLAN_MAC_OPT_LB_FUNC1 0x80
+#define WLAN_MAC_OPT_FUNC2 0x30810041
+
+#define WLAN_SIFS_CFG (WLAN_SIFS_CCK_CONT_TX | \
+ (WLAN_SIFS_OFDM_CONT_TX << BIT_SHIFT_SIFS_OFDM_CTX) | \
+ (WLAN_SIFS_CCK_TRX << BIT_SHIFT_SIFS_CCK_TRX) | \
+ (WLAN_SIFS_OFDM_TRX << BIT_SHIFT_SIFS_OFDM_TRX))
+
+#define WLAN_SIFS_DUR_TUNE (WLAN_SIFS_CCK_DUR_TUNE | \
+ (WLAN_SIFS_OFDM_DUR_TUNE << 8))
+
+#define WLAN_TBTT_TIME (WLAN_TBTT_PROHIBIT |\
+ (WLAN_TBTT_HOLD_TIME << BIT_SHIFT_TBTT_HOLD_TIME_AP))
+
+#define WLAN_NAV_CFG (WLAN_RDG_NAV | (WLAN_TXOP_NAV << 16))
+#define WLAN_RX_TSF_CFG (WLAN_CCK_RX_TSF | (WLAN_OFDM_RX_TSF) << 8)
+
+#define MAC_CLK_SPEED 80 /* 80M */
+#define EFUSE_PCB_INFO_OFFSET 0xCA
+
+static int rtw8822c_mac_init(struct rtw_dev *rtwdev)
+{
+ u8 value8;
+ u16 value16;
+ u32 value32;
+ u16 pre_txcnt;
+
+ /* txq control */
+ value8 = rtw_read8(rtwdev, REG_FWHW_TXQ_CTRL);
+ value8 |= (BIT(7) & ~BIT(1) & ~BIT(2));
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL, value8);
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 1, WLAN_TXQ_RPT_EN);
+ /* sifs control */
+ rtw_write16(rtwdev, REG_SPEC_SIFS, WLAN_SIFS_DUR_TUNE);
+ rtw_write32(rtwdev, REG_SIFS, WLAN_SIFS_CFG);
+ rtw_write16(rtwdev, REG_RESP_SIFS_CCK,
+ WLAN_SIFS_CCK_CTX | WLAN_SIFS_CCK_IRX << 8);
+ rtw_write16(rtwdev, REG_RESP_SIFS_OFDM,
+ WLAN_SIFS_OFDM_CTX | WLAN_SIFS_OFDM_IRX << 8);
+ /* rate fallback control */
+ rtw_write32(rtwdev, REG_DARFRC, WLAN_DATA_RATE_FB_CNT_1_4);
+ rtw_write32(rtwdev, REG_DARFRCH, WLAN_DATA_RATE_FB_CNT_5_8);
+ rtw_write32(rtwdev, REG_RARFRCH, WLAN_RTS_RATE_FB_CNT_5_8);
+ rtw_write32(rtwdev, REG_ARFR0, WLAN_DATA_RATE_FB_RATE0);
+ rtw_write32(rtwdev, REG_ARFRH0, WLAN_DATA_RATE_FB_RATE0_H);
+ rtw_write32(rtwdev, REG_ARFR1_V1, WLAN_RTS_RATE_FB_RATE1);
+ rtw_write32(rtwdev, REG_ARFRH1_V1, WLAN_RTS_RATE_FB_RATE1_H);
+ rtw_write32(rtwdev, REG_ARFR4, WLAN_RTS_RATE_FB_RATE4);
+ rtw_write32(rtwdev, REG_ARFRH4, WLAN_RTS_RATE_FB_RATE4_H);
+ rtw_write32(rtwdev, REG_ARFR5, WLAN_RTS_RATE_FB_RATE5);
+ rtw_write32(rtwdev, REG_ARFRH5, WLAN_RTS_RATE_FB_RATE5_H);
+ /* protocol configuration */
+ rtw_write8(rtwdev, REG_AMPDU_MAX_TIME_V1, WLAN_AMPDU_MAX_TIME);
+ rtw_write8_set(rtwdev, REG_TX_HANG_CTRL, BIT_EN_EOF_V1);
+ pre_txcnt = WLAN_PRE_TXCNT_TIME_TH | BIT_EN_PRECNT;
+ rtw_write8(rtwdev, REG_PRECNT_CTRL, (u8)(pre_txcnt & 0xFF));
+ rtw_write8(rtwdev, REG_PRECNT_CTRL + 1, (u8)(pre_txcnt >> 8));
+ value32 = WLAN_RTS_LEN_TH | (WLAN_RTS_TX_TIME_TH << 8) |
+ (WLAN_MAX_AGG_PKT_LIMIT << 16) |
+ (WLAN_RTS_MAX_AGG_PKT_LIMIT << 24);
+ rtw_write32(rtwdev, REG_PROT_MODE_CTRL, value32);
+ rtw_write16(rtwdev, REG_BAR_MODE_CTRL + 2,
+ WLAN_BAR_RETRY_LIMIT | WLAN_RA_TRY_RATE_AGG_LIMIT << 8);
+ rtw_write8(rtwdev, REG_FAST_EDCA_VOVI_SETTING, FAST_EDCA_VO_TH);
+ rtw_write8(rtwdev, REG_FAST_EDCA_VOVI_SETTING + 2, FAST_EDCA_VI_TH);
+ rtw_write8(rtwdev, REG_FAST_EDCA_BEBK_SETTING, FAST_EDCA_BE_TH);
+ rtw_write8(rtwdev, REG_FAST_EDCA_BEBK_SETTING + 2, FAST_EDCA_BK_TH);
+ /* close BA parser */
+ rtw_write8_clr(rtwdev, REG_LIFETIME_EN, BIT_BA_PARSER_EN);
+ rtw_write32_clr(rtwdev, REG_RRSR, BITS_RRSR_RSC);
+
+ /* EDCA configuration */
+ rtw_write32(rtwdev, REG_EDCA_VO_PARAM, WLAN_EDCA_VO_PARAM);
+ rtw_write32(rtwdev, REG_EDCA_VI_PARAM, WLAN_EDCA_VI_PARAM);
+ rtw_write32(rtwdev, REG_EDCA_BE_PARAM, WLAN_EDCA_BE_PARAM);
+ rtw_write32(rtwdev, REG_EDCA_BK_PARAM, WLAN_EDCA_BK_PARAM);
+ rtw_write8(rtwdev, REG_PIFS, WLAN_PIFS_TIME);
+ rtw_write8_clr(rtwdev, REG_TX_PTCL_CTRL + 1, BIT_SIFS_BK_EN >> 8);
+ rtw_write8_set(rtwdev, REG_RD_CTRL + 1,
+ (BIT_DIS_TXOP_CFE | BIT_DIS_LSIG_CFE |
+ BIT_DIS_STBC_CFE) >> 8);
+
+ /* MAC clock configuration */
+ rtw_write32_clr(rtwdev, REG_AFE_CTRL1, BIT_MAC_CLK_SEL);
+ rtw_write8(rtwdev, REG_USTIME_TSF, MAC_CLK_SPEED);
+ rtw_write8(rtwdev, REG_USTIME_EDCA, MAC_CLK_SPEED);
+
+ rtw_write8_set(rtwdev, REG_MISC_CTRL,
+ BIT_EN_FREE_CNT | BIT_DIS_SECOND_CCA);
+ rtw_write8_clr(rtwdev, REG_TIMER0_SRC_SEL, BIT_TSFT_SEL_TIMER0);
+ rtw_write16(rtwdev, REG_TXPAUSE, 0x0000);
+ rtw_write8(rtwdev, REG_SLOT, WLAN_SLOT_TIME);
+ rtw_write32(rtwdev, REG_RD_NAV_NXT, WLAN_NAV_CFG);
+ rtw_write16(rtwdev, REG_RXTSF_OFFSET_CCK, WLAN_RX_TSF_CFG);
+ /* Set beacon cotnrol - enable TSF and other related functions */
+ rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION);
+ /* Set send beacon related registers */
+ rtw_write32(rtwdev, REG_TBTT_PROHIBIT, WLAN_TBTT_TIME);
+ rtw_write8(rtwdev, REG_DRVERLYINT, WLAN_DRV_EARLY_INT);
+ rtw_write8(rtwdev, REG_BCN_CTRL_CLINT0, WLAN_BCN_CTRL_CLT0);
+ rtw_write8(rtwdev, REG_BCNDMATIM, WLAN_BCN_DMA_TIME);
+ rtw_write8(rtwdev, REG_BCN_MAX_ERR, WLAN_BCN_MAX_ERR);
+
+ /* WMAC configuration */
+ rtw_write8(rtwdev, REG_BBPSF_CTRL + 2, WLAN_RESP_TXRATE);
+ rtw_write8(rtwdev, REG_ACKTO, WLAN_ACK_TO);
+ rtw_write8(rtwdev, REG_ACKTO_CCK, WLAN_ACK_TO_CCK);
+ rtw_write16(rtwdev, REG_EIFS, WLAN_EIFS_DUR_TUNE);
+ rtw_write8(rtwdev, REG_NAV_CTRL + 2, WLAN_NAV_MAX);
+ rtw_write8(rtwdev, REG_WMAC_TRXPTCL_CTL_H + 2, WLAN_BAR_ACK_TYPE);
+ rtw_write32(rtwdev, REG_RXFLTMAP0, WLAN_RX_FILTER0);
+ rtw_write16(rtwdev, REG_RXFLTMAP2, WLAN_RX_FILTER2);
+ rtw_write32(rtwdev, REG_RCR, WLAN_RCR_CFG);
+ rtw_write8(rtwdev, REG_RX_PKT_LIMIT, WLAN_RXPKT_MAX_SZ_512);
+ rtw_write8(rtwdev, REG_TCR + 2, WLAN_TX_FUNC_CFG2);
+ rtw_write8(rtwdev, REG_TCR + 1, WLAN_TX_FUNC_CFG1);
+ rtw_write32_set(rtwdev, REG_GENERAL_OPTION, BIT_DUMMY_FCS_READY_MASK_EN);
+ rtw_write32(rtwdev, REG_WMAC_OPTION_FUNCTION + 8, WLAN_MAC_OPT_FUNC2);
+ rtw_write8(rtwdev, REG_WMAC_OPTION_FUNCTION_1, WLAN_MAC_OPT_NORM_FUNC1);
+
+ /* init low power */
+ value16 = rtw_read16(rtwdev, REG_RXPSF_CTRL + 2) & 0xF00F;
+ value16 |= (BIT_RXGCK_VHT_FIFOTHR(1) | BIT_RXGCK_HT_FIFOTHR(1) |
+ BIT_RXGCK_OFDM_FIFOTHR(1) | BIT_RXGCK_CCK_FIFOTHR(1)) >> 16;
+ rtw_write16(rtwdev, REG_RXPSF_CTRL + 2, value16);
+ value16 = 0;
+ value16 = BIT_SET_RXPSF_PKTLENTHR(value16, 1);
+ value16 |= BIT_RXPSF_CTRLEN | BIT_RXPSF_VHTCHKEN | BIT_RXPSF_HTCHKEN
+ | BIT_RXPSF_OFDMCHKEN | BIT_RXPSF_CCKCHKEN
+ | BIT_RXPSF_OFDMRST;
+ rtw_write16(rtwdev, REG_RXPSF_CTRL, value16);
+ rtw_write32(rtwdev, REG_RXPSF_TYPE_CTRL, 0xFFFFFFFF);
+ /* rx ignore configuration */
+ value16 = rtw_read16(rtwdev, REG_RXPSF_CTRL);
+ value16 &= ~(BIT_RXPSF_MHCHKEN | BIT_RXPSF_CCKRST |
+ BIT_RXPSF_CONT_ERRCHKEN);
+ value16 = BIT_SET_RXPSF_ERRTHR(value16, 0x07);
+ rtw_write16(rtwdev, REG_RXPSF_CTRL, value16);
+
+ return 0;
+}
+
+static void rtw8822c_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
+{
+#define RF18_BAND_MASK (BIT(16) | BIT(9) | BIT(8))
+#define RF18_BAND_2G (0)
+#define RF18_BAND_5G (BIT(16) | BIT(8))
+#define RF18_CHANNEL_MASK (MASKBYTE0)
+#define RF18_RFSI_MASK (BIT(18) | BIT(17))
+#define RF18_RFSI_GE_CH80 (BIT(17))
+#define RF18_RFSI_GT_CH140 (BIT(18))
+#define RF18_BW_MASK (BIT(13) | BIT(12))
+#define RF18_BW_20M (BIT(13) | BIT(12))
+#define RF18_BW_40M (BIT(13))
+#define RF18_BW_80M (BIT(12))
+
+ u32 rf_reg18 = 0;
+ u32 rf_rxbb = 0;
+
+ rf_reg18 = rtw_read_rf(rtwdev, RF_PATH_A, 0x18, RFREG_MASK);
+
+ rf_reg18 &= ~(RF18_BAND_MASK | RF18_CHANNEL_MASK | RF18_RFSI_MASK |
+ RF18_BW_MASK);
+
+ rf_reg18 |= (channel <= 14 ? RF18_BAND_2G : RF18_BAND_5G);
+ rf_reg18 |= (channel & RF18_CHANNEL_MASK);
+ if (channel > 144)
+ rf_reg18 |= RF18_RFSI_GT_CH140;
+ else if (channel >= 80)
+ rf_reg18 |= RF18_RFSI_GE_CH80;
+
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_5:
+ case RTW_CHANNEL_WIDTH_10:
+ case RTW_CHANNEL_WIDTH_20:
+ default:
+ rf_reg18 |= RF18_BW_20M;
+ rf_rxbb = 0x18;
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ /* RF bandwidth */
+ rf_reg18 |= RF18_BW_40M;
+ rf_rxbb = 0x10;
+ break;
+ case RTW_CHANNEL_WIDTH_80:
+ rf_reg18 |= RF18_BW_80M;
+ rf_rxbb = 0x8;
+ break;
+ }
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE2, 0x04, 0x01);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWA, 0x1f, 0x12);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWD0, 0xfffff, rf_rxbb);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE2, 0x04, 0x00);
+
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_LUTWE2, 0x04, 0x01);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_LUTWA, 0x1f, 0x12);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_LUTWD0, 0xfffff, rf_rxbb);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_LUTWE2, 0x04, 0x00);
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, rf_reg18);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_CFGCH, RFREG_MASK, rf_reg18);
+}
+
+static void rtw8822c_toggle_igi(struct rtw_dev *rtwdev)
+{
+ u32 igi;
+
+ igi = rtw_read32_mask(rtwdev, REG_RXIGI, 0x7f);
+ rtw_write32_mask(rtwdev, REG_RXIGI, 0x7f, igi - 2);
+ rtw_write32_mask(rtwdev, REG_RXIGI, 0x7f00, igi - 2);
+ rtw_write32_mask(rtwdev, REG_RXIGI, 0x7f, igi);
+ rtw_write32_mask(rtwdev, REG_RXIGI, 0x7f00, igi);
+}
+
+static void rtw8822c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ u8 primary_ch_idx)
+{
+ if (channel <= 14) {
+ rtw_write32_clr(rtwdev, REG_BGCTRL, BITS_RX_IQ_WEIGHT);
+ rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x8);
+ rtw_write32_set(rtwdev, REG_TXF4, BIT(20));
+ rtw_write32_clr(rtwdev, REG_CCK_CHECK, BIT_CHECK_CCK_EN);
+ rtw_write32_clr(rtwdev, REG_CCKTXONLY, BIT_BB_CCK_CHECK_EN);
+ rtw_write32_mask(rtwdev, REG_CCAMSK, 0x3F000000, 0xF);
+
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL0, 0x1f0, 0x0);
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL, 0x1f0, 0x0);
+ if (channel == 13 || channel == 14)
+ rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x969);
+ else if (channel == 11 || channel == 12)
+ rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x96a);
+ else
+ rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x9aa);
+ if (channel == 14) {
+ rtw_write32_mask(rtwdev, REG_TXF0, MASKHWORD, 0x3da0);
+ rtw_write32_mask(rtwdev, REG_TXF1, MASKDWORD,
+ 0x4962c931);
+ rtw_write32_mask(rtwdev, REG_TXF2, MASKLWORD, 0x6aa3);
+ rtw_write32_mask(rtwdev, REG_TXF3, MASKHWORD, 0xaa7b);
+ rtw_write32_mask(rtwdev, REG_TXF4, MASKLWORD, 0xf3d7);
+ rtw_write32_mask(rtwdev, REG_TXF5, MASKDWORD, 0x0);
+ rtw_write32_mask(rtwdev, REG_TXF6, MASKDWORD,
+ 0xff012455);
+ rtw_write32_mask(rtwdev, REG_TXF7, MASKDWORD, 0xffff);
+ } else {
+ rtw_write32_mask(rtwdev, REG_TXF0, MASKHWORD, 0x5284);
+ rtw_write32_mask(rtwdev, REG_TXF1, MASKDWORD,
+ 0x3e18fec8);
+ rtw_write32_mask(rtwdev, REG_TXF2, MASKLWORD, 0x0a88);
+ rtw_write32_mask(rtwdev, REG_TXF3, MASKHWORD, 0xacc4);
+ rtw_write32_mask(rtwdev, REG_TXF4, MASKLWORD, 0xc8b2);
+ rtw_write32_mask(rtwdev, REG_TXF5, MASKDWORD,
+ 0x00faf0de);
+ rtw_write32_mask(rtwdev, REG_TXF6, MASKDWORD,
+ 0x00122344);
+ rtw_write32_mask(rtwdev, REG_TXF7, MASKDWORD,
+ 0x0fffffff);
+ }
+ if (channel == 13)
+ rtw_write32_mask(rtwdev, REG_TXDFIR0, 0x70, 0x3);
+ else
+ rtw_write32_mask(rtwdev, REG_TXDFIR0, 0x70, 0x1);
+ } else if (channel > 35) {
+ rtw_write32_set(rtwdev, REG_CCKTXONLY, BIT_BB_CCK_CHECK_EN);
+ rtw_write32_set(rtwdev, REG_CCK_CHECK, BIT_CHECK_CCK_EN);
+ rtw_write32_set(rtwdev, REG_BGCTRL, BITS_RX_IQ_WEIGHT);
+ rtw_write32_clr(rtwdev, REG_TXF4, BIT(20));
+ rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x0);
+ rtw_write32_mask(rtwdev, REG_CCAMSK, 0x3F000000, 0x22);
+ rtw_write32_mask(rtwdev, REG_TXDFIR0, 0x70, 0x3);
+ if (channel >= 36 && channel <= 64) {
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL0, 0x1f0, 0x1);
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL, 0x1f0, 0x1);
+ } else if (channel >= 100 && channel <= 144) {
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL0, 0x1f0, 0x2);
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL, 0x1f0, 0x2);
+ } else if (channel >= 149) {
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL0, 0x1f0, 0x3);
+ rtw_write32_mask(rtwdev, REG_RXAGCCTL, 0x1f0, 0x3);
+ }
+
+ if (channel >= 36 && channel <= 51)
+ rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x494);
+ else if (channel >= 52 && channel <= 55)
+ rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x493);
+ else if (channel >= 56 && channel <= 111)
+ rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x453);
+ else if (channel >= 112 && channel <= 119)
+ rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x452);
+ else if (channel >= 120 && channel <= 172)
+ rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x412);
+ else if (channel >= 173 && channel <= 177)
+ rtw_write32_mask(rtwdev, REG_SCOTRK, 0xfff, 0x411);
+ }
+
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_20:
+ rtw_write32_mask(rtwdev, REG_DFIRBW, 0x3FF0, 0x19B);
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xf, 0x0);
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xffc0, 0x0);
+ rtw_write32_mask(rtwdev, REG_TXCLK, 0x700, 0x7);
+ rtw_write32_mask(rtwdev, REG_TXCLK, 0x700000, 0x6);
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ rtw_write32_mask(rtwdev, REG_CCKSB, BIT(4),
+ (primary_ch_idx == 1 ? 1 : 0));
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xf, 0x5);
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xc0, 0x0);
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xff00,
+ (primary_ch_idx | (primary_ch_idx << 4)));
+ break;
+ case RTW_CHANNEL_WIDTH_80:
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xf, 0xa);
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xc0, 0x0);
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xff00,
+ (primary_ch_idx | (primary_ch_idx << 4)));
+ break;
+ case RTW_CHANNEL_WIDTH_5:
+ rtw_write32_mask(rtwdev, REG_DFIRBW, 0x3FF0, 0x2AB);
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xf, 0x0);
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xffc0, 0x1);
+ rtw_write32_mask(rtwdev, REG_TXCLK, 0x700, 0x4);
+ rtw_write32_mask(rtwdev, REG_TXCLK, 0x700000, 0x4);
+ break;
+ case RTW_CHANNEL_WIDTH_10:
+ rtw_write32_mask(rtwdev, REG_DFIRBW, 0x3FF0, 0x2AB);
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xf, 0x0);
+ rtw_write32_mask(rtwdev, REG_TXBWCTL, 0xffc0, 0x2);
+ rtw_write32_mask(rtwdev, REG_TXCLK, 0x700, 0x6);
+ rtw_write32_mask(rtwdev, REG_TXCLK, 0x700000, 0x5);
+ break;
+ }
+}
+
+static void rtw8822c_set_channel(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ u8 primary_chan_idx)
+{
+ rtw8822c_set_channel_bb(rtwdev, channel, bw, primary_chan_idx);
+ rtw_set_channel_mac(rtwdev, channel, bw, primary_chan_idx);
+ rtw8822c_set_channel_rf(rtwdev, channel, bw);
+ rtw8822c_toggle_igi(rtwdev);
+}
+
+static void rtw8822c_config_cck_rx_path(struct rtw_dev *rtwdev, u8 rx_path)
+{
+ if (rx_path == BB_PATH_A || rx_path == BB_PATH_B) {
+ rtw_write32_mask(rtwdev, REG_CCANRX, 0x00060000, 0x0);
+ rtw_write32_mask(rtwdev, REG_CCANRX, 0x00600000, 0x0);
+ } else if (rx_path == BB_PATH_AB) {
+ rtw_write32_mask(rtwdev, REG_CCANRX, 0x00600000, 0x1);
+ rtw_write32_mask(rtwdev, REG_CCANRX, 0x00060000, 0x1);
+ }
+
+ if (rx_path == BB_PATH_A)
+ rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0x0f000000, 0x0);
+ else if (rx_path == BB_PATH_B)
+ rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0x0f000000, 0x5);
+ else if (rx_path == BB_PATH_AB)
+ rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0x0f000000, 0x1);
+}
+
+static void rtw8822c_config_ofdm_rx_path(struct rtw_dev *rtwdev, u8 rx_path)
+{
+ if (rx_path == BB_PATH_A || rx_path == BB_PATH_B) {
+ rtw_write32_mask(rtwdev, REG_RXFNCTL, 0x300, 0x0);
+ rtw_write32_mask(rtwdev, REG_RXFNCTL, 0x600000, 0x0);
+ rtw_write32_mask(rtwdev, REG_AGCSWSH, BIT(17), 0x0);
+ rtw_write32_mask(rtwdev, REG_ANTWTPD, BIT(20), 0x0);
+ rtw_write32_mask(rtwdev, REG_MRCM, BIT(24), 0x0);
+ } else if (rx_path == BB_PATH_AB) {
+ rtw_write32_mask(rtwdev, REG_RXFNCTL, 0x300, 0x1);
+ rtw_write32_mask(rtwdev, REG_RXFNCTL, 0x600000, 0x1);
+ rtw_write32_mask(rtwdev, REG_AGCSWSH, BIT(17), 0x1);
+ rtw_write32_mask(rtwdev, REG_ANTWTPD, BIT(20), 0x1);
+ rtw_write32_mask(rtwdev, REG_MRCM, BIT(24), 0x1);
+ }
+
+ rtw_write32_mask(rtwdev, 0x824, 0x0f000000, rx_path);
+ rtw_write32_mask(rtwdev, 0x824, 0x000f0000, rx_path);
+}
+
+static void rtw8822c_config_rx_path(struct rtw_dev *rtwdev, u8 rx_path)
+{
+ rtw8822c_config_cck_rx_path(rtwdev, rx_path);
+ rtw8822c_config_ofdm_rx_path(rtwdev, rx_path);
+}
+
+static void rtw8822c_config_cck_tx_path(struct rtw_dev *rtwdev, u8 tx_path,
+ bool is_tx2_path)
+{
+ if (tx_path == BB_PATH_A) {
+ rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x8);
+ } else if (tx_path == BB_PATH_B) {
+ rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x4);
+ } else {
+ if (is_tx2_path)
+ rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0xc);
+ else
+ rtw_write32_mask(rtwdev, REG_RXCCKSEL, 0xf0000000, 0x8);
+ }
+}
+
+static void rtw8822c_config_ofdm_tx_path(struct rtw_dev *rtwdev, u8 tx_path,
+ bool is_tx2_path)
+{
+ if (tx_path == BB_PATH_A) {
+ rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x11);
+ rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xff, 0x0);
+ } else if (tx_path == BB_PATH_B) {
+ rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x12);
+ rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xff, 0x0);
+ } else {
+ if (is_tx2_path) {
+ rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x33);
+ rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xffff, 0x0404);
+ } else {
+ rtw_write32_mask(rtwdev, REG_ANTMAP0, 0xff, 0x31);
+ rtw_write32_mask(rtwdev, REG_TXLGMAP, 0xffff, 0x0400);
+ }
+ }
+}
+
+static void rtw8822c_config_tx_path(struct rtw_dev *rtwdev, u8 tx_path,
+ bool is_tx2_path)
+{
+ rtw8822c_config_cck_tx_path(rtwdev, tx_path, is_tx2_path);
+ rtw8822c_config_ofdm_tx_path(rtwdev, tx_path, is_tx2_path);
+}
+
+static void rtw8822c_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
+ u8 rx_path, bool is_tx2_path)
+{
+ if ((tx_path | rx_path) & BB_PATH_A)
+ rtw_write32_mask(rtwdev, REG_ORITXCODE, MASK20BITS, 0x33312);
+ else
+ rtw_write32_mask(rtwdev, REG_ORITXCODE, MASK20BITS, 0x11111);
+ if ((tx_path | rx_path) & BB_PATH_B)
+ rtw_write32_mask(rtwdev, REG_ORITXCODE2, MASK20BITS, 0x33312);
+ else
+ rtw_write32_mask(rtwdev, REG_ORITXCODE2, MASK20BITS, 0x11111);
+
+ rtw8822c_config_rx_path(rtwdev, rx_path);
+ rtw8822c_config_tx_path(rtwdev, tx_path, is_tx2_path);
+
+ rtw8822c_toggle_igi(rtwdev);
+}
+
+static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 l_bnd, u_bnd;
+ u8 gain_a, gain_b;
+ s8 rx_power[RTW_RF_PATH_MAX];
+ s8 min_rx_power = -120;
+
+ rx_power[RF_PATH_A] = GET_PHY_STAT_P0_PWDB_A(phy_status);
+ rx_power[RF_PATH_B] = GET_PHY_STAT_P0_PWDB_B(phy_status);
+ l_bnd = dm_info->cck_gi_l_bnd;
+ u_bnd = dm_info->cck_gi_u_bnd;
+ gain_a = GET_PHY_STAT_P0_GAIN_A(phy_status);
+ gain_b = GET_PHY_STAT_P0_GAIN_B(phy_status);
+ if (gain_a < l_bnd)
+ rx_power[RF_PATH_A] += (l_bnd - gain_a) << 1;
+ else if (gain_a > u_bnd)
+ rx_power[RF_PATH_A] -= (gain_a - u_bnd) << 1;
+ if (gain_b < l_bnd)
+ rx_power[RF_PATH_A] += (l_bnd - gain_b) << 1;
+ else if (gain_b > u_bnd)
+ rx_power[RF_PATH_A] -= (gain_b - u_bnd) << 1;
+
+ rx_power[RF_PATH_A] -= 110;
+ rx_power[RF_PATH_B] -= 110;
+
+ pkt_stat->rx_power[RF_PATH_A] = max3(rx_power[RF_PATH_A],
+ rx_power[RF_PATH_B],
+ min_rx_power);
+ pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
+ pkt_stat->bw = RTW_CHANNEL_WIDTH_20;
+ pkt_stat->signal_power = max(pkt_stat->rx_power[RF_PATH_A],
+ min_rx_power);
+}
+
+static void query_phy_status_page1(struct rtw_dev *rtwdev, u8 *phy_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ u8 rxsc, bw;
+ s8 min_rx_power = -120;
+
+ if (pkt_stat->rate > DESC_RATE11M && pkt_stat->rate < DESC_RATEMCS0)
+ rxsc = GET_PHY_STAT_P1_L_RXSC(phy_status);
+ else
+ rxsc = GET_PHY_STAT_P1_HT_RXSC(phy_status);
+
+ if (rxsc >= 9 && rxsc <= 12)
+ bw = RTW_CHANNEL_WIDTH_40;
+ else if (rxsc >= 13)
+ bw = RTW_CHANNEL_WIDTH_80;
+ else
+ bw = RTW_CHANNEL_WIDTH_20;
+
+ pkt_stat->rx_power[RF_PATH_A] = GET_PHY_STAT_P1_PWDB_A(phy_status) - 110;
+ pkt_stat->rx_power[RF_PATH_B] = GET_PHY_STAT_P1_PWDB_B(phy_status) - 110;
+ pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 2);
+ pkt_stat->bw = bw;
+ pkt_stat->signal_power = max3(pkt_stat->rx_power[RF_PATH_A],
+ pkt_stat->rx_power[RF_PATH_B],
+ min_rx_power);
+}
+
+static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ u8 page;
+
+ page = *phy_status & 0xf;
+
+ switch (page) {
+ case 0:
+ query_phy_status_page0(rtwdev, phy_status, pkt_stat);
+ break;
+ case 1:
+ query_phy_status_page1(rtwdev, phy_status, pkt_stat);
+ break;
+ default:
+ rtw_warn(rtwdev, "unused phy status page (%d)\n", page);
+ return;
+ }
+}
+
+static void rtw8822c_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc,
+ struct rtw_rx_pkt_stat *pkt_stat,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_hdr *hdr;
+ u32 desc_sz = rtwdev->chip->rx_pkt_desc_sz;
+ u8 *phy_status = NULL;
+
+ memset(pkt_stat, 0, sizeof(*pkt_stat));
+
+ pkt_stat->phy_status = GET_RX_DESC_PHYST(rx_desc);
+ pkt_stat->icv_err = GET_RX_DESC_ICV_ERR(rx_desc);
+ pkt_stat->crc_err = GET_RX_DESC_CRC32(rx_desc);
+ pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc);
+ pkt_stat->is_c2h = GET_RX_DESC_C2H(rx_desc);
+ pkt_stat->pkt_len = GET_RX_DESC_PKT_LEN(rx_desc);
+ pkt_stat->drv_info_sz = GET_RX_DESC_DRV_INFO_SIZE(rx_desc);
+ pkt_stat->shift = GET_RX_DESC_SHIFT(rx_desc);
+ pkt_stat->rate = GET_RX_DESC_RX_RATE(rx_desc);
+ pkt_stat->cam_id = GET_RX_DESC_MACID(rx_desc);
+ pkt_stat->ppdu_cnt = GET_RX_DESC_PPDU_CNT(rx_desc);
+ pkt_stat->tsf_low = GET_RX_DESC_TSFL(rx_desc);
+
+ /* drv_info_sz is in unit of 8-bytes */
+ pkt_stat->drv_info_sz *= 8;
+
+ /* c2h cmd pkt's rx/phy status is not interested */
+ if (pkt_stat->is_c2h)
+ return;
+
+ hdr = (struct ieee80211_hdr *)(rx_desc + desc_sz + pkt_stat->shift +
+ pkt_stat->drv_info_sz);
+ if (pkt_stat->phy_status) {
+ phy_status = rx_desc + desc_sz + pkt_stat->shift;
+ query_phy_status(rtwdev, phy_status, pkt_stat);
+ }
+
+ rtw_rx_fill_rx_status(rtwdev, pkt_stat, hdr, rx_status, phy_status);
+}
+
+static void
+rtw8822c_set_write_tx_power_ref(struct rtw_dev *rtwdev, u8 *tx_pwr_ref_cck,
+ u8 *tx_pwr_ref_ofdm)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ u32 txref_cck[2] = {0x18a0, 0x41a0};
+ u32 txref_ofdm[2] = {0x18e8, 0x41e8};
+ u8 path;
+
+ for (path = 0; path < hal->rf_path_num; path++) {
+ rtw_write32_mask(rtwdev, 0x1c90, BIT(15), 0);
+ rtw_write32_mask(rtwdev, txref_cck[path], 0x7f0000,
+ tx_pwr_ref_cck[path]);
+ }
+ for (path = 0; path < hal->rf_path_num; path++) {
+ rtw_write32_mask(rtwdev, 0x1c90, BIT(15), 0);
+ rtw_write32_mask(rtwdev, txref_ofdm[path], 0x1fc00,
+ tx_pwr_ref_ofdm[path]);
+ }
+}
+
+static void rtw8822c_set_tx_power_diff(struct rtw_dev *rtwdev, u8 rate,
+ s8 *diff_idx)
+{
+ u32 offset_txagc = 0x3a00;
+ u8 rate_idx = rate & 0xfc;
+ u8 pwr_idx[4];
+ u32 phy_pwr_idx;
+ int i;
+
+ for (i = 0; i < 4; i++)
+ pwr_idx[i] = diff_idx[i] & 0x7f;
+
+ phy_pwr_idx = pwr_idx[0] |
+ (pwr_idx[1] << 8) |
+ (pwr_idx[2] << 16) |
+ (pwr_idx[3] << 24);
+
+ rtw_write32_mask(rtwdev, 0x1c90, BIT(15), 0x0);
+ rtw_write32_mask(rtwdev, offset_txagc + rate_idx, MASKDWORD,
+ phy_pwr_idx);
+}
+
+static void rtw8822c_set_tx_power_index(struct rtw_dev *rtwdev)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 rs, rate, j;
+ u8 pwr_ref_cck[2] = {hal->tx_pwr_tbl[RF_PATH_A][DESC_RATE11M],
+ hal->tx_pwr_tbl[RF_PATH_B][DESC_RATE11M]};
+ u8 pwr_ref_ofdm[2] = {hal->tx_pwr_tbl[RF_PATH_A][DESC_RATEMCS7],
+ hal->tx_pwr_tbl[RF_PATH_B][DESC_RATEMCS7]};
+ s8 diff_a, diff_b;
+ u8 pwr_a, pwr_b;
+ s8 diff_idx[4];
+
+ rtw8822c_set_write_tx_power_ref(rtwdev, pwr_ref_cck, pwr_ref_ofdm);
+ for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++) {
+ for (j = 0; j < rtw_rate_size[rs]; j++) {
+ rate = rtw_rate_section[rs][j];
+ pwr_a = hal->tx_pwr_tbl[RF_PATH_A][rate];
+ pwr_b = hal->tx_pwr_tbl[RF_PATH_B][rate];
+ if (rs == 0) {
+ diff_a = (s8)pwr_a - (s8)pwr_ref_cck[0];
+ diff_b = (s8)pwr_b - (s8)pwr_ref_cck[1];
+ } else {
+ diff_a = (s8)pwr_a - (s8)pwr_ref_ofdm[0];
+ diff_b = (s8)pwr_b - (s8)pwr_ref_ofdm[1];
+ }
+ diff_idx[rate % 4] = min(diff_a, diff_b);
+ if (rate % 4 == 3)
+ rtw8822c_set_tx_power_diff(rtwdev, rate - 3,
+ diff_idx);
+ }
+ }
+}
+
+static void rtw8822c_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 ldo_pwr;
+
+ ldo_pwr = rtw_read8(rtwdev, REG_ANAPARLDO_POW_MAC);
+ ldo_pwr = enable ? ldo_pwr | BIT_LDOE25_PON : ldo_pwr & ~BIT_LDOE25_PON;
+ rtw_write8(rtwdev, REG_ANAPARLDO_POW_MAC, ldo_pwr);
+}
+
+static void rtw8822c_false_alarm_statistics(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u32 cck_enable;
+ u32 cck_fa_cnt;
+ u32 ofdm_fa_cnt;
+ u32 ofdm_tx_counter;
+
+ cck_enable = rtw_read32(rtwdev, REG_ENCCK) & BIT_CCK_BLK_EN;
+ cck_fa_cnt = rtw_read16(rtwdev, REG_CCK_FACNT);
+ ofdm_fa_cnt = rtw_read16(rtwdev, REG_OFDM_FACNT);
+ ofdm_tx_counter = rtw_read16(rtwdev, REG_OFDM_TXCNT);
+ ofdm_fa_cnt -= ofdm_tx_counter;
+
+ dm_info->cck_fa_cnt = cck_fa_cnt;
+ dm_info->ofdm_fa_cnt = ofdm_fa_cnt;
+ dm_info->total_fa_cnt = ofdm_fa_cnt;
+ dm_info->total_fa_cnt += cck_enable ? cck_fa_cnt : 0;
+
+ rtw_write32_mask(rtwdev, REG_CCANRX, BIT_CCK_FA_RST, 0);
+ rtw_write32_mask(rtwdev, REG_CCANRX, BIT_CCK_FA_RST, 2);
+ rtw_write32_mask(rtwdev, REG_CCANRX, BIT_OFDM_FA_RST, 0);
+ rtw_write32_mask(rtwdev, REG_CCANRX, BIT_OFDM_FA_RST, 2);
+ rtw_write32_set(rtwdev, REG_CNT_CTRL, BIT_ALL_CNT_RST);
+ rtw_write32_clr(rtwdev, REG_CNT_CTRL, BIT_ALL_CNT_RST);
+}
+
+static void rtw8822c_do_iqk(struct rtw_dev *rtwdev)
+{
+}
+
+static struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822c[] = {
+ {0x0086,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0086,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_POLLING, BIT(1), BIT(1)},
+ {0x002E,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), BIT(2)},
+ {0x002D,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x007F,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), 0},
+ {0x004A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3) | BIT(4) | BIT(7), 0},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static struct rtw_pwr_seq_cmd trans_cardemu_to_act_8822c[] = {
+ {0x0000,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3) | BIT(2)), 0},
+ {0x0075,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(1), BIT(1)},
+ {0x0075,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0xFF1A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0x002E,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), 0},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3)), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(0), 0},
+ {0x0074,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ {0x0071,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), 0},
+ {0x0062,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(7) | BIT(6) | BIT(5)),
+ (BIT(7) | BIT(6) | BIT(5))},
+ {0x0061,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(7) | BIT(6) | BIT(5)), 0},
+ {0x001F,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(7) | BIT(6)), BIT(7)},
+ {0x00EF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(7) | BIT(6)), BIT(7)},
+ {0x1045,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), BIT(4)},
+ {0x0010,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), BIT(2)},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static struct rtw_pwr_seq_cmd trans_act_to_cardemu_8822c[] = {
+ {0x0093,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), 0},
+ {0x001F,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0x00EF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0},
+ {0x1045,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), 0},
+ {0xFF1A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x30},
+ {0x0049,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(1), 0},
+ {0x0000,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8822c[] = {
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), BIT(7)},
+ {0x0007,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x00},
+ {0x0067,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), 0},
+ {0x004A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0081,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7) | BIT(6), 0},
+ {0x0090,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(2), BIT(2)},
+ {0x0086,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_SDIO,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0xFFFF,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ 0,
+ RTW_PWR_CMD_END, 0, 0},
+};
+
+static struct rtw_pwr_seq_cmd *card_enable_flow_8822c[] = {
+ trans_carddis_to_cardemu_8822c,
+ trans_cardemu_to_act_8822c,
+ NULL
+};
+
+static struct rtw_pwr_seq_cmd *card_disable_flow_8822c[] = {
+ trans_act_to_cardemu_8822c,
+ trans_cardemu_to_carddis_8822c,
+ NULL
+};
+
+static struct rtw_intf_phy_para usb2_param_8822c[] = {
+ {0xFFFF, 0x00,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_ALL,
+ RTW_INTF_PHY_PLATFORM_ALL},
+};
+
+static struct rtw_intf_phy_para usb3_param_8822c[] = {
+ {0xFFFF, 0x0000,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_ALL,
+ RTW_INTF_PHY_PLATFORM_ALL},
+};
+
+static struct rtw_intf_phy_para pcie_gen1_param_8822c[] = {
+ {0xFFFF, 0x0000,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_ALL,
+ RTW_INTF_PHY_PLATFORM_ALL},
+};
+
+static struct rtw_intf_phy_para pcie_gen2_param_8822c[] = {
+ {0xFFFF, 0x0000,
+ RTW_IP_SEL_PHY,
+ RTW_INTF_PHY_CUT_ALL,
+ RTW_INTF_PHY_PLATFORM_ALL},
+};
+
+static struct rtw_intf_phy_para_table phy_para_table_8822c = {
+ .usb2_para = usb2_param_8822c,
+ .usb3_para = usb3_param_8822c,
+ .gen1_para = pcie_gen1_param_8822c,
+ .gen2_para = pcie_gen2_param_8822c,
+ .n_usb2_para = ARRAY_SIZE(usb2_param_8822c),
+ .n_usb3_para = ARRAY_SIZE(usb2_param_8822c),
+ .n_gen1_para = ARRAY_SIZE(pcie_gen1_param_8822c),
+ .n_gen2_para = ARRAY_SIZE(pcie_gen2_param_8822c),
+};
+
+static const struct rtw_rfe_def rtw8822c_rfe_defs[] = {
+ [0] = RTW_DEF_RFE(8822c, 0, 0),
+ [1] = RTW_DEF_RFE(8822c, 0, 0),
+ [2] = RTW_DEF_RFE(8822c, 0, 0),
+};
+
+static struct rtw_hw_reg rtw8822c_dig[] = {
+ [0] = { .addr = 0x1d70, .mask = 0x7f },
+ [1] = { .addr = 0x1d70, .mask = 0x7f00 },
+};
+
+static struct rtw_page_table page_table_8822c[] = {
+ {64, 64, 64, 64, 1},
+ {64, 64, 64, 64, 1},
+ {64, 64, 0, 0, 1},
+ {64, 64, 64, 0, 1},
+ {64, 64, 64, 64, 1},
+};
+
+static struct rtw_rqpn rqpn_table_8822c[] = {
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_HIGH,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+};
+
+static struct rtw_chip_ops rtw8822c_ops = {
+ .phy_set_param = rtw8822c_phy_set_param,
+ .read_efuse = rtw8822c_read_efuse,
+ .query_rx_desc = rtw8822c_query_rx_desc,
+ .set_channel = rtw8822c_set_channel,
+ .mac_init = rtw8822c_mac_init,
+ .read_rf = rtw_phy_read_rf,
+ .write_rf = rtw_phy_write_rf_reg_mix,
+ .set_tx_power_index = rtw8822c_set_tx_power_index,
+ .cfg_ldo25 = rtw8822c_cfg_ldo25,
+ .false_alarm_statistics = rtw8822c_false_alarm_statistics,
+ .do_iqk = rtw8822c_do_iqk,
+};
+
+struct rtw_chip_info rtw8822c_hw_spec = {
+ .ops = &rtw8822c_ops,
+ .id = RTW_CHIP_TYPE_8822C,
+ .fw_name = "rtw88/rtw8822c_fw.bin",
+ .tx_pkt_desc_sz = 48,
+ .tx_buf_desc_sz = 16,
+ .rx_pkt_desc_sz = 24,
+ .rx_buf_desc_sz = 8,
+ .phy_efuse_size = 512,
+ .log_efuse_size = 768,
+ .ptct_efuse_size = 124,
+ .txff_size = 262144,
+ .rxff_size = 24576,
+ .txgi_factor = 2,
+ .is_pwr_by_rate_dec = false,
+ .max_power_index = 0x7f,
+ .csi_buf_pg_num = 50,
+ .band = RTW_BAND_2G | RTW_BAND_5G,
+ .page_size = 128,
+ .dig_min = 0x20,
+ .ht_supported = true,
+ .vht_supported = true,
+ .sys_func_en = 0xD8,
+ .pwr_on_seq = card_enable_flow_8822c,
+ .pwr_off_seq = card_disable_flow_8822c,
+ .page_table = page_table_8822c,
+ .rqpn_table = rqpn_table_8822c,
+ .intf_table = &phy_para_table_8822c,
+ .dig = rtw8822c_dig,
+ .rf_base_addr = {0x3c00, 0x4c00},
+ .rf_sipi_addr = {0x1808, 0x4108},
+ .mac_tbl = &rtw8822c_mac_tbl,
+ .agc_tbl = &rtw8822c_agc_tbl,
+ .bb_tbl = &rtw8822c_bb_tbl,
+ .rfk_init_tbl = &rtw8822c_array_mp_cal_init_tbl,
+ .rf_tbl = {&rtw8822c_rf_a_tbl, &rtw8822c_rf_b_tbl},
+ .rfe_defs = rtw8822c_rfe_defs,
+ .rfe_defs_size = ARRAY_SIZE(rtw8822c_rfe_defs),
+};
+EXPORT_SYMBOL(rtw8822c_hw_spec);
+
+MODULE_FIRMWARE("rtw88/rtw8822c_fw.bin");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
new file mode 100644
index 000000000000..d3bd9850baa0
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW8822C_H__
+#define __RTW8822C_H__
+
+#include <asm/byteorder.h>
+
+struct rtw8822cu_efuse {
+ u8 res0[0x30]; /* 0x120 */
+ u8 vid[2]; /* 0x150 */
+ u8 pid[2];
+ u8 res1[3];
+ u8 mac_addr[ETH_ALEN]; /* 0x157 */
+ u8 res2[0x3d];
+};
+
+struct rtw8822ce_efuse {
+ u8 mac_addr[ETH_ALEN]; /* 0x120 */
+ u8 vender_id[2];
+ u8 device_id[2];
+ u8 sub_vender_id[2];
+ u8 sub_device_id[2];
+ u8 pmc[2];
+ u8 exp_device_cap[2];
+ u8 msi_cap;
+ u8 ltr_cap; /* 0x133 */
+ u8 exp_link_control[2];
+ u8 link_cap[4];
+ u8 link_control[2];
+ u8 serial_number[8];
+ u8 res0:2; /* 0x144 */
+ u8 ltr_en:1;
+ u8 res1:2;
+ u8 obff:2;
+ u8 res2:3;
+ u8 obff_cap:2;
+ u8 res3:4;
+ u8 class_code[3];
+ u8 res4;
+ u8 pci_pm_L1_2_supp:1;
+ u8 pci_pm_L1_1_supp:1;
+ u8 aspm_pm_L1_2_supp:1;
+ u8 aspm_pm_L1_1_supp:1;
+ u8 L1_pm_substates_supp:1;
+ u8 res5:3;
+ u8 port_common_mode_restore_time;
+ u8 port_t_power_on_scale:2;
+ u8 res6:1;
+ u8 port_t_power_on_value:5;
+ u8 res7;
+};
+
+struct rtw8822c_efuse {
+ __le16 rtl_id;
+ u8 res0[0x0e];
+
+ /* power index for four RF paths */
+ struct rtw_txpwr_idx txpwr_idx_table[4];
+
+ u8 channel_plan; /* 0xb8 */
+ u8 xtal_k;
+ u8 res1;
+ u8 iqk_lck;
+ u8 res2[5]; /* 0xbc */
+ u8 rf_board_option;
+ u8 rf_feature_option;
+ u8 rf_bt_setting;
+ u8 eeprom_version;
+ u8 eeprom_customer_id;
+ u8 tx_bb_swing_setting_2g;
+ u8 tx_bb_swing_setting_5g;
+ u8 tx_pwr_calibrate_rate;
+ u8 rf_antenna_option; /* 0xc9 */
+ u8 rfe_option;
+ u8 country_code[2];
+ u8 res3[3];
+ u8 path_a_thermal; /* 0xd0 */
+ u8 path_b_thermal;
+ u8 res4[2];
+ u8 rx_gain_gap_2g_ofdm;
+ u8 res5;
+ u8 rx_gain_gap_2g_cck;
+ u8 res6;
+ u8 rx_gain_gap_5gl;
+ u8 res7;
+ u8 rx_gain_gap_5gm;
+ u8 res8;
+ u8 rx_gain_gap_5gh;
+ u8 res9;
+ u8 res10[0x42];
+ union {
+ struct rtw8822cu_efuse u;
+ struct rtw8822ce_efuse e;
+ };
+};
+
+#define DACK_PATH_8822C 2
+#define DACK_REG_8822C 16
+#define DACK_RF_8822C 1
+#define DACK_SN_8822C 100
+
+/* phy status page0 */
+#define GET_PHY_STAT_P0_PWDB_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(15, 8))
+#define GET_PHY_STAT_P0_PWDB_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(7, 0))
+#define GET_PHY_STAT_P0_GAIN_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(21, 16))
+#define GET_PHY_STAT_P0_GAIN_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(29, 24))
+
+/* phy status page1 */
+#define GET_PHY_STAT_P1_PWDB_A(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(15, 8))
+#define GET_PHY_STAT_P1_PWDB_B(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x00), GENMASK(23, 16))
+#define GET_PHY_STAT_P1_L_RXSC(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(11, 8))
+#define GET_PHY_STAT_P1_HT_RXSC(phy_stat) \
+ le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(15, 12))
+
+#define REG_ANAPARLDO_POW_MAC 0x0029
+#define BIT_LDOE25_PON BIT(0)
+#define REG_RRSR 0x0440
+#define BITS_RRSR_RSC (BIT(21) | BIT(22))
+
+#define REG_TXDFIR0 0x808
+#define REG_DFIRBW 0x810
+#define REG_ANTMAP0 0x820
+#define REG_ANTMAP 0x824
+#define REG_DYMPRITH 0x86c
+#define REG_DYMENTH0 0x870
+#define REG_DYMENTH 0x874
+#define REG_DYMTHMIN 0x8a4
+#define REG_TXBWCTL 0x9b0
+#define REG_TXCLK 0x9b4
+#define REG_SCOTRK 0xc30
+#define REG_MRCM 0xc38
+#define REG_AGCSWSH 0xc44
+#define REG_ANTWTPD 0xc54
+#define REG_ORITXCODE 0x1800
+#define REG_3WIRE 0x180c
+#define BIT_3WIRE_TX_EN BIT(0)
+#define BIT_3WIRE_RX_EN BIT(1)
+#define BIT_3WIRE_PI_ON BIT(28)
+#define REG_RXAGCCTL0 0x18ac
+#define REG_CCKSB 0x1a00
+#define REG_RXCCKSEL 0x1a04
+#define REG_BGCTRL 0x1a14
+#define BITS_RX_IQ_WEIGHT (BIT(8) | BIT(9))
+#define REG_TXF0 0x1a20
+#define REG_TXF1 0x1a24
+#define REG_TXF2 0x1a28
+#define REG_CCANRX 0x1a2c
+#define BIT_CCK_FA_RST (BIT(14) | BIT(15))
+#define BIT_OFDM_FA_RST (BIT(12) | BIT(13))
+#define REG_CCK_FACNT 0x1a5c
+#define REG_CCKTXONLY 0x1a80
+#define BIT_BB_CCK_CHECK_EN BIT(18)
+#define REG_TXF3 0x1a98
+#define REG_TXF4 0x1a9c
+#define REG_TXF5 0x1aa0
+#define REG_TXF6 0x1aac
+#define REG_TXF7 0x1ab0
+#define REG_TXANT 0x1c28
+#define REG_ENCCK 0x1c3c
+#define BIT_CCK_BLK_EN BIT(1)
+#define BIT_CCK_OFDM_BLK_EN (BIT(0) | BIT(1))
+#define REG_CCAMSK 0x1c80
+#define REG_RXFNCTL 0x1d30
+#define REG_RXIGI 0x1d70
+#define REG_ENFN 0x1e24
+#define REG_TXANTSEG 0x1e28
+#define REG_TXLGMAP 0x1e2c
+#define REG_CCKPATH 0x1e5c
+#define REG_CNT_CTRL 0x1eb4
+#define BIT_ALL_CNT_RST BIT(25)
+#define REG_OFDM_FACNT 0x2d00
+#define REG_OFDM_TXCNT 0x2de0
+#define REG_ORITXCODE2 0x4100
+#define REG_3WIRE2 0x410c
+#define REG_RXAGCCTL 0x41ac
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
new file mode 100644
index 000000000000..49044f510c6c
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
@@ -0,0 +1,11753 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "phy.h"
+#include "rtw8822c_table.h"
+
+static const u32 rtw8822c_mac[] = {
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8822c_mac, rtw_phy_cfg_mac);
+
+static const u32 rtw8822c_agc[] = {
+ 0x1D90, 0x300001FF,
+ 0x1D90, 0x300101FF,
+ 0x1D90, 0x300201FE,
+ 0x1D90, 0x300301FD,
+ 0x1D90, 0x300401FC,
+ 0x1D90, 0x300501FB,
+ 0x1D90, 0x300601FA,
+ 0x1D90, 0x300701F9,
+ 0x1D90, 0x300801F8,
+ 0x1D90, 0x300901F7,
+ 0x1D90, 0x300A01F6,
+ 0x1D90, 0x300B01F5,
+ 0x1D90, 0x300C01F4,
+ 0x1D90, 0x300D01F3,
+ 0x1D90, 0x300E01F2,
+ 0x1D90, 0x300F01F1,
+ 0x1D90, 0x301001F0,
+ 0x1D90, 0x301101EF,
+ 0x1D90, 0x301201EE,
+ 0x1D90, 0x301301ED,
+ 0x1D90, 0x301401EC,
+ 0x1D90, 0x301501EB,
+ 0x1D90, 0x30160192,
+ 0x1D90, 0x30170191,
+ 0x1D90, 0x30180190,
+ 0x1D90, 0x3019018F,
+ 0x1D90, 0x301A018E,
+ 0x1D90, 0x301B018D,
+ 0x1D90, 0x301C018C,
+ 0x1D90, 0x301D018B,
+ 0x1D90, 0x301E018A,
+ 0x1D90, 0x301F0189,
+ 0x1D90, 0x30200188,
+ 0x1D90, 0x30210187,
+ 0x1D90, 0x30220186,
+ 0x1D90, 0x30230185,
+ 0x1D90, 0x3024014B,
+ 0x1D90, 0x3025014A,
+ 0x1D90, 0x30260149,
+ 0x1D90, 0x30270148,
+ 0x1D90, 0x30280147,
+ 0x1D90, 0x30290146,
+ 0x1D90, 0x302A0145,
+ 0x1D90, 0x302B0144,
+ 0x1D90, 0x302C0143,
+ 0x1D90, 0x302D0142,
+ 0x1D90, 0x302E00C8,
+ 0x1D90, 0x302F00C7,
+ 0x1D90, 0x303000C6,
+ 0x1D90, 0x303100C5,
+ 0x1D90, 0x303200C4,
+ 0x1D90, 0x30330088,
+ 0x1D90, 0x30340087,
+ 0x1D90, 0x30350086,
+ 0x1D90, 0x30360045,
+ 0x1D90, 0x30370044,
+ 0x1D90, 0x30380043,
+ 0x1D90, 0x30390023,
+ 0x1D90, 0x303A0022,
+ 0x1D90, 0x303B0021,
+ 0x1D90, 0x303C0020,
+ 0x1D90, 0x303D0002,
+ 0x1D90, 0x303E0001,
+ 0x1D90, 0x303F0000,
+ 0x1D90, 0x304000FF,
+ 0x1D90, 0x304100FF,
+ 0x1D90, 0x304200FF,
+ 0x1D90, 0x304300FF,
+ 0x1D90, 0x304400FE,
+ 0x1D90, 0x304500FD,
+ 0x1D90, 0x304600FC,
+ 0x1D90, 0x304700FB,
+ 0x1D90, 0x304800FA,
+ 0x1D90, 0x304900F9,
+ 0x1D90, 0x304A00F8,
+ 0x1D90, 0x304B00F7,
+ 0x1D90, 0x304C00F6,
+ 0x1D90, 0x304D00F5,
+ 0x1D90, 0x304E00F4,
+ 0x1D90, 0x304F00F3,
+ 0x1D90, 0x305000F2,
+ 0x1D90, 0x305100F1,
+ 0x1D90, 0x305200F0,
+ 0x1D90, 0x305300EF,
+ 0x1D90, 0x305400EE,
+ 0x1D90, 0x305500ED,
+ 0x1D90, 0x305600EC,
+ 0x1D90, 0x305700EB,
+ 0x1D90, 0x305800EA,
+ 0x1D90, 0x305900E9,
+ 0x1D90, 0x305A00E8,
+ 0x1D90, 0x305B00E7,
+ 0x1D90, 0x305C00E6,
+ 0x1D90, 0x305D00C7,
+ 0x1D90, 0x305E00C6,
+ 0x1D90, 0x305F00C5,
+ 0x1D90, 0x306000C4,
+ 0x1D90, 0x306100C3,
+ 0x1D90, 0x306200C2,
+ 0x1D90, 0x306300A4,
+ 0x1D90, 0x306400A3,
+ 0x1D90, 0x306500A2,
+ 0x1D90, 0x30660086,
+ 0x1D90, 0x30670085,
+ 0x1D90, 0x30680084,
+ 0x1D90, 0x30690083,
+ 0x1D90, 0x306A0082,
+ 0x1D90, 0x306B0069,
+ 0x1D90, 0x306C0068,
+ 0x1D90, 0x306D0067,
+ 0x1D90, 0x306E0066,
+ 0x1D90, 0x306F0065,
+ 0x1D90, 0x30700064,
+ 0x1D90, 0x30710063,
+ 0x1D90, 0x30720044,
+ 0x1D90, 0x30730043,
+ 0x1D90, 0x30740042,
+ 0x1D90, 0x30750025,
+ 0x1D90, 0x30760024,
+ 0x1D90, 0x30770023,
+ 0x1D90, 0x30780022,
+ 0x1D90, 0x30790021,
+ 0x1D90, 0x307A0020,
+ 0x1D90, 0x307B0003,
+ 0x1D90, 0x307C0002,
+ 0x1D90, 0x307D0001,
+ 0x1D90, 0x307E0000,
+ 0x1D90, 0x307F0000,
+ 0x1D90, 0x308000FF,
+ 0x1D90, 0x308100FF,
+ 0x1D90, 0x308200FF,
+ 0x1D90, 0x308300FF,
+ 0x1D90, 0x308400FE,
+ 0x1D90, 0x308500FD,
+ 0x1D90, 0x308600FC,
+ 0x1D90, 0x308700FB,
+ 0x1D90, 0x308800FA,
+ 0x1D90, 0x308900F9,
+ 0x1D90, 0x308A00F8,
+ 0x1D90, 0x308B00F7,
+ 0x1D90, 0x308C00F6,
+ 0x1D90, 0x308D00F5,
+ 0x1D90, 0x308E00F4,
+ 0x1D90, 0x308F00F3,
+ 0x1D90, 0x309000F2,
+ 0x1D90, 0x309100F1,
+ 0x1D90, 0x309200F0,
+ 0x1D90, 0x309300EF,
+ 0x1D90, 0x309400EE,
+ 0x1D90, 0x309500ED,
+ 0x1D90, 0x309600EC,
+ 0x1D90, 0x309700EB,
+ 0x1D90, 0x309800EA,
+ 0x1D90, 0x309900E9,
+ 0x1D90, 0x309A00E8,
+ 0x1D90, 0x309B00E7,
+ 0x1D90, 0x309C00E6,
+ 0x1D90, 0x309D00C7,
+ 0x1D90, 0x309E00C6,
+ 0x1D90, 0x309F00C5,
+ 0x1D90, 0x30A000C4,
+ 0x1D90, 0x30A100C3,
+ 0x1D90, 0x30A200C2,
+ 0x1D90, 0x30A300A4,
+ 0x1D90, 0x30A400A3,
+ 0x1D90, 0x30A500A2,
+ 0x1D90, 0x30A60086,
+ 0x1D90, 0x30A70085,
+ 0x1D90, 0x30A80084,
+ 0x1D90, 0x30A90083,
+ 0x1D90, 0x30AA0082,
+ 0x1D90, 0x30AB0069,
+ 0x1D90, 0x30AC0068,
+ 0x1D90, 0x30AD0067,
+ 0x1D90, 0x30AE0066,
+ 0x1D90, 0x30AF0065,
+ 0x1D90, 0x30B00064,
+ 0x1D90, 0x30B10063,
+ 0x1D90, 0x30B20044,
+ 0x1D90, 0x30B30043,
+ 0x1D90, 0x30B40042,
+ 0x1D90, 0x30B50025,
+ 0x1D90, 0x30B60024,
+ 0x1D90, 0x30B70023,
+ 0x1D90, 0x30B80022,
+ 0x1D90, 0x30B90021,
+ 0x1D90, 0x30BA0020,
+ 0x1D90, 0x30BB0003,
+ 0x1D90, 0x30BC0002,
+ 0x1D90, 0x30BD0001,
+ 0x1D90, 0x30BE0000,
+ 0x1D90, 0x30BF0000,
+ 0x1D90, 0x30C000FF,
+ 0x1D90, 0x30C100FF,
+ 0x1D90, 0x30C200FF,
+ 0x1D90, 0x30C300FF,
+ 0x1D90, 0x30C400FE,
+ 0x1D90, 0x30C500FD,
+ 0x1D90, 0x30C600FC,
+ 0x1D90, 0x30C700FB,
+ 0x1D90, 0x30C800FA,
+ 0x1D90, 0x30C900F9,
+ 0x1D90, 0x30CA00F8,
+ 0x1D90, 0x30CB00F7,
+ 0x1D90, 0x30CC00F6,
+ 0x1D90, 0x30CD00F5,
+ 0x1D90, 0x30CE00F4,
+ 0x1D90, 0x30CF00F3,
+ 0x1D90, 0x30D000F2,
+ 0x1D90, 0x30D100F1,
+ 0x1D90, 0x30D200F0,
+ 0x1D90, 0x30D300EF,
+ 0x1D90, 0x30D400EE,
+ 0x1D90, 0x30D500ED,
+ 0x1D90, 0x30D600EC,
+ 0x1D90, 0x30D700EB,
+ 0x1D90, 0x30D800EA,
+ 0x1D90, 0x30D900E9,
+ 0x1D90, 0x30DA00E8,
+ 0x1D90, 0x30DB00E7,
+ 0x1D90, 0x30DC00E6,
+ 0x1D90, 0x30DD00C7,
+ 0x1D90, 0x30DE00C6,
+ 0x1D90, 0x30DF00C5,
+ 0x1D90, 0x30E000C4,
+ 0x1D90, 0x30E100C3,
+ 0x1D90, 0x30E200C2,
+ 0x1D90, 0x30E300A4,
+ 0x1D90, 0x30E400A3,
+ 0x1D90, 0x30E500A2,
+ 0x1D90, 0x30E60086,
+ 0x1D90, 0x30E70085,
+ 0x1D90, 0x30E80084,
+ 0x1D90, 0x30E90083,
+ 0x1D90, 0x30EA0082,
+ 0x1D90, 0x30EB0069,
+ 0x1D90, 0x30EC0068,
+ 0x1D90, 0x30ED0067,
+ 0x1D90, 0x30EE0066,
+ 0x1D90, 0x30EF0065,
+ 0x1D90, 0x30F00064,
+ 0x1D90, 0x30F10063,
+ 0x1D90, 0x30F20044,
+ 0x1D90, 0x30F30043,
+ 0x1D90, 0x30F40042,
+ 0x1D90, 0x30F50025,
+ 0x1D90, 0x30F60024,
+ 0x1D90, 0x30F70023,
+ 0x1D90, 0x30F80022,
+ 0x1D90, 0x30F90021,
+ 0x1D90, 0x30FA0020,
+ 0x1D90, 0x30FB0003,
+ 0x1D90, 0x30FC0002,
+ 0x1D90, 0x30FD0001,
+ 0x1D90, 0x30FE0000,
+ 0x1D90, 0x30FF0000,
+ 0x1D90, 0x310001FF,
+ 0x1D90, 0x310101FF,
+ 0x1D90, 0x310201FF,
+ 0x1D90, 0x310301FF,
+ 0x1D90, 0x310401FF,
+ 0x1D90, 0x310501FF,
+ 0x1D90, 0x310601FF,
+ 0x1D90, 0x310701FF,
+ 0x1D90, 0x310801FF,
+ 0x1D90, 0x310901FE,
+ 0x1D90, 0x310A01FD,
+ 0x1D90, 0x310B01FC,
+ 0x1D90, 0x310C01FB,
+ 0x1D90, 0x310D01FA,
+ 0x1D90, 0x310E01F9,
+ 0x1D90, 0x310F01F8,
+ 0x1D90, 0x311001F7,
+ 0x1D90, 0x311101F6,
+ 0x1D90, 0x311201F5,
+ 0x1D90, 0x311301F4,
+ 0x1D90, 0x311401F3,
+ 0x1D90, 0x311501F2,
+ 0x1D90, 0x311601F1,
+ 0x1D90, 0x311701F0,
+ 0x1D90, 0x311801EF,
+ 0x1D90, 0x311901EE,
+ 0x1D90, 0x311A01ED,
+ 0x1D90, 0x311B01EC,
+ 0x1D90, 0x311C01EB,
+ 0x1D90, 0x311D0192,
+ 0x1D90, 0x311E0191,
+ 0x1D90, 0x311F0190,
+ 0x1D90, 0x3120018F,
+ 0x1D90, 0x3121018E,
+ 0x1D90, 0x3122018D,
+ 0x1D90, 0x3123018C,
+ 0x1D90, 0x3124018B,
+ 0x1D90, 0x3125018A,
+ 0x1D90, 0x31260189,
+ 0x1D90, 0x31270188,
+ 0x1D90, 0x31280187,
+ 0x1D90, 0x31290186,
+ 0x1D90, 0x312A0185,
+ 0x1D90, 0x312B0149,
+ 0x1D90, 0x312C0148,
+ 0x1D90, 0x312D0147,
+ 0x1D90, 0x312E0146,
+ 0x1D90, 0x312F0145,
+ 0x1D90, 0x31300144,
+ 0x1D90, 0x31310143,
+ 0x1D90, 0x31320142,
+ 0x1D90, 0x31330141,
+ 0x1D90, 0x31340140,
+ 0x1D90, 0x313500C7,
+ 0x1D90, 0x313600C6,
+ 0x1D90, 0x313700C5,
+ 0x1D90, 0x313800C4,
+ 0x1D90, 0x313900C3,
+ 0x1D90, 0x313A0088,
+ 0x1D90, 0x313B0087,
+ 0x1D90, 0x313C0086,
+ 0x1D90, 0x313D0045,
+ 0x1D90, 0x313E0044,
+ 0x1D90, 0x313F0043,
+ 0x1D90, 0x314001FF,
+ 0x1D90, 0x314101FF,
+ 0x1D90, 0x314201FF,
+ 0x1D90, 0x314301FF,
+ 0x1D90, 0x314401FF,
+ 0x1D90, 0x314501FF,
+ 0x1D90, 0x314601FF,
+ 0x1D90, 0x314701FE,
+ 0x1D90, 0x314801FD,
+ 0x1D90, 0x314901FC,
+ 0x1D90, 0x314A01FB,
+ 0x1D90, 0x314B01FA,
+ 0x1D90, 0x314C01F9,
+ 0x1D90, 0x314D01F8,
+ 0x1D90, 0x314E01F7,
+ 0x1D90, 0x314F01F6,
+ 0x1D90, 0x315001F5,
+ 0x1D90, 0x315101F4,
+ 0x1D90, 0x315201F3,
+ 0x1D90, 0x315301F2,
+ 0x1D90, 0x315401F1,
+ 0x1D90, 0x315501F0,
+ 0x1D90, 0x315601EF,
+ 0x1D90, 0x315701EE,
+ 0x1D90, 0x315801ED,
+ 0x1D90, 0x315901EC,
+ 0x1D90, 0x315A01EB,
+ 0x1D90, 0x315B01EA,
+ 0x1D90, 0x315C01E9,
+ 0x1D90, 0x315D018F,
+ 0x1D90, 0x315E018E,
+ 0x1D90, 0x315F018D,
+ 0x1D90, 0x3160018C,
+ 0x1D90, 0x3161018B,
+ 0x1D90, 0x3162018A,
+ 0x1D90, 0x31630189,
+ 0x1D90, 0x31640188,
+ 0x1D90, 0x31650187,
+ 0x1D90, 0x31660186,
+ 0x1D90, 0x31670185,
+ 0x1D90, 0x31680184,
+ 0x1D90, 0x31690183,
+ 0x1D90, 0x316A0182,
+ 0x1D90, 0x316B0149,
+ 0x1D90, 0x316C0148,
+ 0x1D90, 0x316D0147,
+ 0x1D90, 0x316E0146,
+ 0x1D90, 0x316F0145,
+ 0x1D90, 0x31700144,
+ 0x1D90, 0x31710143,
+ 0x1D90, 0x31720142,
+ 0x1D90, 0x31730141,
+ 0x1D90, 0x31740140,
+ 0x1D90, 0x317500C7,
+ 0x1D90, 0x317600C6,
+ 0x1D90, 0x317700C5,
+ 0x1D90, 0x317800C4,
+ 0x1D90, 0x317900C3,
+ 0x1D90, 0x317A0088,
+ 0x1D90, 0x317B0087,
+ 0x1D90, 0x317C0086,
+ 0x1D90, 0x317D0045,
+ 0x1D90, 0x317E0044,
+ 0x1D90, 0x317F0043,
+ 0x1D90, 0x318001FE,
+ 0x1D90, 0x318101FD,
+ 0x1D90, 0x318201FC,
+ 0x1D90, 0x318301FB,
+ 0x1D90, 0x318401FA,
+ 0x1D90, 0x318501F9,
+ 0x1D90, 0x318601F8,
+ 0x1D90, 0x318701F7,
+ 0x1D90, 0x318801F6,
+ 0x1D90, 0x318901F5,
+ 0x1D90, 0x318A01F4,
+ 0x1D90, 0x318B01F3,
+ 0x1D90, 0x318C01F2,
+ 0x1D90, 0x318D01F1,
+ 0x1D90, 0x318E01F0,
+ 0x1D90, 0x318F01EF,
+ 0x1D90, 0x319001EE,
+ 0x1D90, 0x319101ED,
+ 0x1D90, 0x319201EC,
+ 0x1D90, 0x319301EB,
+ 0x1D90, 0x319401EA,
+ 0x1D90, 0x319501E9,
+ 0x1D90, 0x3196018F,
+ 0x1D90, 0x3197018E,
+ 0x1D90, 0x3198018D,
+ 0x1D90, 0x3199018C,
+ 0x1D90, 0x319A018B,
+ 0x1D90, 0x319B018A,
+ 0x1D90, 0x319C0189,
+ 0x1D90, 0x319D0188,
+ 0x1D90, 0x319E0187,
+ 0x1D90, 0x319F0186,
+ 0x1D90, 0x31A00185,
+ 0x1D90, 0x31A10184,
+ 0x1D90, 0x31A20183,
+ 0x1D90, 0x31A30182,
+ 0x1D90, 0x31A40149,
+ 0x1D90, 0x31A50148,
+ 0x1D90, 0x31A60147,
+ 0x1D90, 0x31A70146,
+ 0x1D90, 0x31A80145,
+ 0x1D90, 0x31A90144,
+ 0x1D90, 0x31AA0143,
+ 0x1D90, 0x31AB0142,
+ 0x1D90, 0x31AC0141,
+ 0x1D90, 0x31AD0140,
+ 0x1D90, 0x31AE00C7,
+ 0x1D90, 0x31AF00C6,
+ 0x1D90, 0x31B000C5,
+ 0x1D90, 0x31B100C4,
+ 0x1D90, 0x31B200C3,
+ 0x1D90, 0x31B30088,
+ 0x1D90, 0x31B40087,
+ 0x1D90, 0x31B50086,
+ 0x1D90, 0x31B60045,
+ 0x1D90, 0x31B70044,
+ 0x1D90, 0x31B80043,
+ 0x1D90, 0x31B90023,
+ 0x1D90, 0x31BA0022,
+ 0x1D90, 0x31BB0021,
+ 0x1D90, 0x31BC0020,
+ 0x1D90, 0x31BD0002,
+ 0x1D90, 0x31BE0001,
+ 0x1D90, 0x31BF0000,
+ 0x1D70, 0x22222222,
+ 0x1D70, 0x20202020,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8822c_agc, rtw_phy_cfg_agc);
+
+static const u32 rtw8822c_bb[] = {
+ 0x1D0C, 0x00410000,
+ 0x1C3C, 0x01038040,
+ 0x1C90, 0x00E49708,
+ 0x800, 0x00000000,
+ 0x804, 0xD6300000,
+ 0x808, 0x60956093,
+ 0x80C, 0x00000025,
+ 0x810, 0x11B019B0,
+ 0x814, 0x00904080,
+ 0x818, 0xC30056F1,
+ 0x81C, 0x00050000,
+ 0x820, 0x11111133,
+ 0x824, 0xC3C3CCC4,
+ 0x828, 0x30FB186C,
+ 0x82C, 0x185D6556,
+ 0x830, 0x1751145B,
+ 0x834, 0x776995D7,
+ 0x838, 0x74777A7D,
+ 0x83C, 0xF9AA9982,
+ 0x840, 0x89AA9ABB,
+ 0x844, 0x0DEEDDC1,
+ 0x848, 0xCDEEDEFF,
+ 0x84C, 0xFFFF5555,
+ 0x850, 0x6F7A727D,
+ 0x854, 0x6C776F7A,
+ 0x858, 0x6F7A6C77,
+ 0x85C, 0x69746974,
+ 0x860, 0x6F7A6C77,
+ 0x864, 0x6C776C77,
+ 0x868, 0x727D6F7A,
+ 0x86C, 0x69D7B196,
+ 0x870, 0x1A6D769B,
+ 0x874, 0x55823917,
+ 0x878, 0x00C025BD,
+ 0x87C, 0x4140557D,
+ 0x880, 0x9A1D9D47,
+ 0x884, 0x1DE7134F,
+ 0x888, 0x2857A857,
+ 0x88C, 0x520E8A24,
+ 0x890, 0x8F628C44,
+ 0x894, 0x72745F43,
+ 0x898, 0x03F02F0D,
+ 0x89C, 0x5DB6886F,
+ 0x8A0, 0x07DC309F,
+ 0x8A4, 0x09412495,
+ 0x8A8, 0x222222A9,
+ 0x8AC, 0x89628C44,
+ 0x8B0, 0x72745F43,
+ 0x8B4, 0x03F02F0D,
+ 0x8B8, 0x55B6886F,
+ 0x8BC, 0x07D0309F,
+ 0x8C0, 0x70404023,
+ 0x8C4, 0x00440001,
+ 0x8C8, 0x7A7A2E26,
+ 0x8CC, 0x25297777,
+ 0x8D0, 0x6CEB6DCE,
+ 0x8D4, 0x0005A632,
+ 0x8D8, 0x00000000,
+ 0x8DC, 0x00000000,
+ 0x8E0, 0x00000000,
+ 0x8E4, 0x00000000,
+ 0x8E8, 0x00000000,
+ 0x8EC, 0x00000000,
+ 0x8F0, 0x00000000,
+ 0x8F4, 0x00000000,
+ 0x8F8, 0x25239843,
+ 0x900, 0x00000000,
+ 0x904, 0x00000000,
+ 0x908, 0x000008CB,
+ 0x90C, 0x00000000,
+ 0x910, 0x00000000,
+ 0x914, 0x20000000,
+ 0x918, 0x20000000,
+ 0x91C, 0x20000000,
+ 0x920, 0x20000000,
+ 0x924, 0x00000000,
+ 0x928, 0x0000003A,
+ 0x92C, 0x0000003A,
+ 0x930, 0x0000003A,
+ 0x934, 0x0000003A,
+ 0x938, 0x0000000F,
+ 0x93C, 0x00000000,
+ 0x940, 0x4E1F3E81,
+ 0x944, 0x4E1F3E81,
+ 0x948, 0x4E1F3E81,
+ 0x94C, 0x4E1F3E81,
+ 0x950, 0x03020100,
+ 0x954, 0x07060504,
+ 0x958, 0x0B0A0908,
+ 0x95C, 0x0F0E0D0C,
+ 0x960, 0x13121110,
+ 0x964, 0x17161514,
+ 0x968, 0x03020100,
+ 0x96C, 0x07060504,
+ 0x970, 0x0B0A0908,
+ 0x974, 0x0F0E0D0C,
+ 0x978, 0x13121110,
+ 0x97C, 0x17161514,
+ 0x980, 0x03020100,
+ 0x984, 0x07060504,
+ 0x988, 0x0B0A0908,
+ 0x98C, 0x0F0E0D0C,
+ 0x990, 0x13121110,
+ 0x994, 0x17161514,
+ 0x998, 0x03020100,
+ 0x99C, 0x07060504,
+ 0x9A0, 0x0B0A0908,
+ 0x9A4, 0x0F0E0D0C,
+ 0x9A8, 0x13121110,
+ 0x9AC, 0x17161514,
+ 0x9B0, 0x00002200,
+ 0x9B4, 0xDB6FFF00,
+ 0x9B8, 0x00400064,
+ 0x9BC, 0x00000000,
+ 0x9C0, 0x01010101,
+ 0x9C4, 0x00640064,
+ 0x9C8, 0x00640064,
+ 0x9CC, 0x00007777,
+ 0x9D0, 0x00000000,
+ 0x9D4, 0x00000000,
+ 0x9D8, 0x00000000,
+ 0x9DC, 0x00000000,
+ 0x9E0, 0x00000000,
+ 0x9E4, 0x00000000,
+ 0x9E8, 0x00000000,
+ 0x9EC, 0x00000000,
+ 0x9F0, 0x100024E0,
+ 0x9F4, 0x00000000,
+ 0x9F8, 0x00000000,
+ 0xA00, 0x02001208,
+ 0xA04, 0x00000000,
+ 0xA08, 0x00000000,
+ 0xA0C, 0x00000000,
+ 0xA10, 0x00000000,
+ 0xA14, 0x00000000,
+ 0xA18, 0x00000000,
+ 0xA1C, 0x00000000,
+ 0xA20, 0xEB31B333,
+ 0xA24, 0x00275485,
+ 0xA28, 0x00166366,
+ 0xA2C, 0x00275485,
+ 0xA30, 0x00166366,
+ 0xA34, 0x00275485,
+ 0xA38, 0x00200400,
+ 0xA3C, 0x00200400,
+ 0xA40, 0xB35DC5BD,
+ 0xA44, 0x3033BEBD,
+ 0xA48, 0x2A521254,
+ 0xA4C, 0xA2733345,
+ 0xA50, 0x617BE003,
+ 0xA54, 0x50000968,
+ 0xA58, 0x00020000,
+ 0xA5C, 0x01000000,
+ 0xA60, 0x02000000,
+ 0xA64, 0x03000000,
+ 0xA68, 0x00020000,
+ 0xA6C, 0x00000000,
+ 0xA70, 0x00000000,
+ 0xA74, 0x00000000,
+ 0xA78, 0x00000000,
+ 0xA7C, 0x00000000,
+ 0xA80, 0x00000000,
+ 0xA84, 0x00000000,
+ 0xA88, 0x00000000,
+ 0xA8C, 0x00000000,
+ 0xA90, 0x00000000,
+ 0xA94, 0x00000000,
+ 0xA98, 0x00000000,
+ 0xA9C, 0x00000000,
+ 0xAA0, 0x00000000,
+ 0xAA4, 0x00000000,
+ 0xAA8, 0x00000000,
+ 0xAAC, 0x00000000,
+ 0xAB0, 0x00000000,
+ 0xAB4, 0x00000000,
+ 0xAB8, 0x00000000,
+ 0xABC, 0x00000000,
+ 0xAC0, 0x00000000,
+ 0xAC4, 0x00000000,
+ 0xAC8, 0x00000000,
+ 0xACC, 0x00000000,
+ 0xAD0, 0x00000000,
+ 0xAD4, 0x00000000,
+ 0xAD8, 0x00000000,
+ 0xADC, 0x00000000,
+ 0xAE0, 0x00000000,
+ 0xAE4, 0x00000000,
+ 0xAE8, 0x00000000,
+ 0xAEC, 0x00000000,
+ 0xAF0, 0x00000000,
+ 0xAF4, 0x00000000,
+ 0xAF8, 0x00000000,
+ 0xB00, 0x00000000,
+ 0xB04, 0x00000000,
+ 0xB08, 0x00000000,
+ 0xB0C, 0x00000000,
+ 0xB10, 0x00000000,
+ 0xB14, 0x00000000,
+ 0xB18, 0x00000000,
+ 0xB1C, 0x00000000,
+ 0xB20, 0x00000000,
+ 0xB24, 0x00000000,
+ 0xB28, 0x00000000,
+ 0xB2C, 0x00000000,
+ 0xB30, 0x00000000,
+ 0xB34, 0x00000000,
+ 0xB38, 0x00000000,
+ 0xB3C, 0x00000000,
+ 0xB40, 0x00000000,
+ 0xB44, 0x00000000,
+ 0xB48, 0x00000000,
+ 0xB4C, 0x00000000,
+ 0xB50, 0x00000000,
+ 0xB54, 0x00000000,
+ 0xB58, 0x00060100,
+ 0xB5C, 0x00000000,
+ 0xB60, 0x00000000,
+ 0xB64, 0x00000000,
+ 0xB68, 0x00000000,
+ 0xB6C, 0x00000000,
+ 0xB70, 0x00000000,
+ 0xB74, 0x00000000,
+ 0xB78, 0x00000000,
+ 0xB7C, 0x00000000,
+ 0xB80, 0x00000000,
+ 0xB84, 0x00000000,
+ 0xB88, 0x00000000,
+ 0xB8C, 0x00000000,
+ 0xB90, 0x00000000,
+ 0xB94, 0x00000000,
+ 0xB98, 0x00000000,
+ 0xB9C, 0x00000000,
+ 0xBA0, 0x00000000,
+ 0xBA4, 0x00000000,
+ 0xBA8, 0x00000000,
+ 0xBAC, 0x00000000,
+ 0xBB0, 0x00000000,
+ 0xBB4, 0x00000000,
+ 0xBB8, 0x00000000,
+ 0xBBC, 0x00000000,
+ 0xBC0, 0x00000000,
+ 0xBC4, 0x00000000,
+ 0xBC8, 0x00000000,
+ 0xBCC, 0x00000000,
+ 0xBD0, 0x00000000,
+ 0xBD4, 0x00000000,
+ 0xBD8, 0x00000000,
+ 0xBDC, 0x00000000,
+ 0xBE0, 0x00000000,
+ 0xBE4, 0x00000000,
+ 0xBE8, 0x00000000,
+ 0xBEC, 0x00000000,
+ 0xBF0, 0x00000000,
+ 0xBF4, 0x00000000,
+ 0xBF8, 0x00000000,
+ 0xC00, 0x1C8BA0D6,
+ 0xC04, 0x00000001,
+ 0xC08, 0x00000000,
+ 0xC0C, 0x02F1D8B7,
+ 0xC10, 0x000000B0,
+ 0xC14, 0x0000D891,
+ 0xC18, 0x00087672,
+ 0xC1C, 0x15260000,
+ 0xC20, 0x00000000,
+ 0xC24, 0x40600000,
+ 0xC28, 0x06400F76,
+ 0xC2C, 0xE30020E1,
+ 0xC30, 0x140C9494,
+ 0xC34, 0x00A04946,
+ 0xC38, 0x011D4820,
+ 0xC3C, 0x168DB61B,
+ 0xC40, 0x009C50F8,
+ 0xC44, 0x2013BAD1,
+ 0xC48, 0xFFFFF7CC,
+ 0xC4C, 0xA000FFFF,
+ 0xC50, 0x20D0F800,
+ 0xC54, 0x941A0200,
+ 0xC58, 0x18380111,
+ 0xC5C, 0x006E01B8,
+ 0xC60, 0x2CA5555B,
+ 0xC64, 0x0210005F,
+ 0xC68, 0x039A5300,
+ 0xC6C, 0x0265C2BA,
+ 0xC70, 0x000CEB21,
+ 0xC74, 0x0E149CA1,
+ 0xC78, 0x1AB4956B,
+ 0xC7C, 0x00000ABF,
+ 0xC80, 0xC02A8799,
+ 0xC84, 0x06C636C6,
+ 0xC88, 0x08090202,
+ 0xC8C, 0x00204048,
+ 0xC90, 0x00F85F85,
+ 0xC94, 0x00000F85,
+ 0xC98, 0x58385858,
+ 0xC9C, 0x18382838,
+ 0xCA0, 0x00002838,
+ 0xCA4, 0x3A253A3A,
+ 0xCA8, 0x10251A25,
+ 0xCAC, 0x00001025,
+ 0xCB0, 0x3A133A3A,
+ 0xCB4, 0x08130D13,
+ 0xCB8, 0x00000813,
+ 0xCBC, 0x001F1066,
+ 0xCC0, 0x88A00400,
+ 0xCC4, 0x00200400,
+ 0xCC8, 0x0B200400,
+ 0xCCC, 0x00600400,
+ 0xCD0, 0x00000092,
+ 0xCD4, 0x22220000,
+ 0xCD8, 0x22222222,
+ 0xCDC, 0x22222222,
+ 0xCE0, 0x22222222,
+ 0xCE4, 0x22222222,
+ 0xCE8, 0x00002222,
+ 0xCEC, 0x00000000,
+ 0xCF0, 0x00000000,
+ 0xCF4, 0x00000000,
+ 0xCF8, 0x00000000,
+ 0xD00, 0x1083A10A,
+ 0xD04, 0x0EC42948,
+ 0xD08, 0x10852108,
+ 0xD0C, 0x0CC41D08,
+ 0xD10, 0x108620EC,
+ 0xD14, 0x0CA42108,
+ 0xD18, 0x107620E8,
+ 0xD1C, 0x0E742108,
+ 0xD20, 0x0E8618C8,
+ 0xD24, 0x00000108,
+ 0xD28, 0x288C224C,
+ 0xD2C, 0x11C6320C,
+ 0xD30, 0x30CEBD98,
+ 0xD34, 0x10C31908,
+ 0xD38, 0x310A318C,
+ 0xD3C, 0x18C41D08,
+ 0xD40, 0x28CC4190,
+ 0xD44, 0x19062108,
+ 0xD48, 0x294A5A17,
+ 0xD4C, 0x00000108,
+ 0xD50, 0x10A3A908,
+ 0xD54, 0x10842148,
+ 0xD58, 0x14C5314A,
+ 0xD5C, 0x1086258C,
+ 0xD60, 0x10A42948,
+ 0xD64, 0x10842108,
+ 0xD68, 0x08C42108,
+ 0xD6C, 0x10842148,
+ 0xD70, 0x08822084,
+ 0xD74, 0x10841D04,
+ 0xD78, 0x08421088,
+ 0xD7C, 0x1083A104,
+ 0xD80, 0x10842108,
+ 0xD84, 0x1085294A,
+ 0xD88, 0x08822104,
+ 0xD8C, 0x10852948,
+ 0xD90, 0x08421084,
+ 0xD94, 0x10852104,
+ 0xD98, 0x08421084,
+ 0xD9C, 0x10863184,
+ 0xDA0, 0x1083B10A,
+ 0xDA4, 0x10842148,
+ 0xDA8, 0x1984718C,
+ 0xDAC, 0x108C33AF,
+ 0xDB0, 0x00000000,
+ 0xDB4, 0x00000000,
+ 0xDB8, 0x00000000,
+ 0xDBC, 0x00000000,
+ 0xDC0, 0x00000000,
+ 0xDC4, 0x00000000,
+ 0xDC8, 0x00000000,
+ 0xDCC, 0x00000000,
+ 0xDD0, 0x00000000,
+ 0xDD4, 0x00000000,
+ 0xDD8, 0x00000000,
+ 0xDDC, 0x00000000,
+ 0xDE0, 0x00000000,
+ 0xDE4, 0x00000000,
+ 0xDE8, 0x00000000,
+ 0xDEC, 0x00000000,
+ 0xDF0, 0x00000000,
+ 0xDF4, 0x00000000,
+ 0xDF8, 0x00000000,
+ 0x1800, 0x00033312,
+ 0x1804, 0x00033312,
+ 0x180C, 0x17F40060,
+ 0x1810, 0x62F508C4,
+ 0x1814, 0x506AA5B4,
+ 0x1818, 0x000014FF,
+ 0x181C, 0x00000000,
+ 0x1820, 0x02D508CC,
+ 0x1824, 0x506AA5B4,
+ 0x1828, 0x000004FD,
+ 0x182C, 0x00000000,
+ 0x1834, 0x00000000,
+ 0x1838, 0x20000000,
+ 0x183C, 0x00000000,
+ 0x1840, 0x00000000,
+ 0x1844, 0x00000000,
+ 0x1848, 0x00000000,
+ 0x184C, 0x00000000,
+ 0x1850, 0x00000000,
+ 0x1854, 0x00000000,
+ 0x1858, 0x00000000,
+ 0x185C, 0x00000000,
+ 0x1860, 0xF0040FF8,
+ 0x1864, 0x7F000000,
+ 0x1868, 0x00000000,
+ 0x186C, 0x0000FF00,
+ 0x1870, 0x00000000,
+ 0x1874, 0x00000000,
+ 0x1878, 0x00000000,
+ 0x187C, 0x00000000,
+ 0x1880, 0x00000000,
+ 0x1884, 0x02B00000,
+ 0x1888, 0x00000000,
+ 0x188C, 0x00000000,
+ 0x1890, 0x00000000,
+ 0x1894, 0x00000000,
+ 0x1898, 0x00000000,
+ 0x18A0, 0x00510000,
+ 0x18A4, 0x183C1F7F,
+ 0x18A8, 0x0A02C99A,
+ 0x18AC, 0x00004200,
+ 0x18B0, 0x0809FB08,
+ 0x18B0, 0x0809FB09,
+ 0x18B4, 0x00000000,
+ 0x18B8, 0x00000000,
+ 0x18BC, 0x00C3FF80,
+ 0x18C0, 0x0002D100,
+ 0x18C4, 0x00000004,
+ 0x18C8, 0x001FFFE0,
+ 0x18CC, 0x0809FB08,
+ 0x18CC, 0x0809FB09,
+ 0x18D0, 0x00000000,
+ 0x18D4, 0x00000000,
+ 0x18D8, 0x00C3FF80,
+ 0x18DC, 0x0002D100,
+ 0x18E0, 0x00000004,
+ 0x18E4, 0x001FFFE0,
+ 0x18E8, 0x00800000,
+ 0x18EC, 0x1EC08000,
+ 0x18F0, 0x7F000064,
+ 0x18F4, 0x1F7DE75C,
+ 0x18F8, 0x7F7F7F7F,
+ 0x18FC, 0x7F7F7F7F,
+ 0x1900, 0xA7A7A7A7,
+ 0x1904, 0x95959595,
+ 0x1908, 0x00777788,
+ 0x190C, 0x77776666,
+ 0x1910, 0x00033333,
+ 0x1914, 0xAAAC875A,
+ 0x1918, 0x2AA2A8A2,
+ 0x191C, 0x2AAAA8A2,
+ 0x1920, 0x00878766,
+ 0x1924, 0x000C4924,
+ 0x1928, 0x5669B6C0,
+ 0x192C, 0x00409190,
+ 0x1930, 0xB85C0492,
+ 0x1934, 0x00B4A298,
+ 0x1938, 0x00030151,
+ 0x193C, 0x0058C618,
+ 0x1940, 0x41000000,
+ 0x1944, 0x00000BCB,
+ 0x1948, 0xAAAAAAAA,
+ 0x194C, 0x00B99999,
+ 0x1950, 0x88886665,
+ 0x1954, 0x08888888,
+ 0x1958, 0x00000618,
+ 0x195C, 0x00000000,
+ 0x1960, 0x00000000,
+ 0x1964, 0x00000000,
+ 0x1968, 0x00000000,
+ 0x196C, 0x00000000,
+ 0x1970, 0x00000000,
+ 0x1974, 0x00000000,
+ 0x1978, 0x00000000,
+ 0x197C, 0x00000000,
+ 0x1980, 0x00000000,
+ 0x1984, 0x00000000,
+ 0x1988, 0x00000000,
+ 0x198C, 0x00000000,
+ 0x1990, 0x00000000,
+ 0x1994, 0x00000000,
+ 0x1998, 0x00000000,
+ 0x199C, 0x00000000,
+ 0x19A0, 0x00000000,
+ 0x19A4, 0x00000000,
+ 0x19A8, 0x00000000,
+ 0x19AC, 0x00000000,
+ 0x19B0, 0x00000000,
+ 0x19B4, 0x00000000,
+ 0x19B8, 0x00000000,
+ 0x19BC, 0x00000000,
+ 0x19C0, 0x00000000,
+ 0x19C4, 0x00000000,
+ 0x19C8, 0x00000000,
+ 0x19CC, 0x00000000,
+ 0x19D0, 0x00000000,
+ 0x19D4, 0x00000000,
+ 0x19D8, 0x00000000,
+ 0x19DC, 0x00000000,
+ 0x19E0, 0x00000000,
+ 0x19E4, 0x00000000,
+ 0x19E8, 0x00000000,
+ 0x19EC, 0x00000000,
+ 0x19F0, 0x00000000,
+ 0x19F4, 0x00000000,
+ 0x19F8, 0x00000000,
+ 0x1C00, 0x00000000,
+ 0x1C04, 0x00000000,
+ 0x1C08, 0x00000000,
+ 0x1C0C, 0x00000000,
+ 0x1C10, 0x00000000,
+ 0x1C14, 0x00000000,
+ 0x1C18, 0x00000000,
+ 0x1C1C, 0x00000000,
+ 0x1C20, 0x03C23F00,
+ 0x1C24, 0xF101F002,
+ 0x1C28, 0x0FFE0010,
+ 0x1C2C, 0x453090FF,
+ 0x1C30, 0xFE0090FE,
+ 0x1C34, 0xE4E42000,
+ 0x1C38, 0xFFA1005E,
+ 0x1C40, 0x8F588837,
+ 0x1C44, 0x04400300,
+ 0x1C48, 0x00000000,
+ 0x1C4C, 0x00000200,
+ 0x1C50, 0x8E588837,
+ 0x1C54, 0x04400300,
+ 0x1C58, 0x00000000,
+ 0x1C5C, 0xFFFFFFFF,
+ 0x1C60, 0x0F030032,
+ 0x1C64, 0x360F0000,
+ 0x1C68, 0x007F0000,
+ 0x1C6C, 0x00010000,
+ 0x1C70, 0x00037FFE,
+ 0x1C74, 0x00000000,
+ 0x1C78, 0x00020000,
+ 0x1C7C, 0x00310000,
+ 0x1C80, 0x0E38E000,
+ 0x1C84, 0x245120D4,
+ 0x1C88, 0xC8400483,
+ 0x1C8C, 0x40005A20,
+ 0x1C94, 0x00000000,
+ 0x1C98, 0x00000000,
+ 0x1C9C, 0x00000000,
+ 0x1CA0, 0x00000000,
+ 0x1CA4, 0x20000000,
+ 0x1CA8, 0x0E000000,
+ 0x1CAC, 0xE424A2CC,
+ 0x1CB0, 0x00000000,
+ 0x1CB4, 0x00000000,
+ 0x1CB8, 0x24800000,
+ 0x1CBC, 0x60004800,
+ 0x1CC0, 0x24800000,
+ 0x1CC4, 0x60004800,
+ 0x1CC8, 0xF0444900,
+ 0x1CCC, 0x030300F1,
+ 0x1CD0, 0x0F000000,
+ 0x1CD4, 0x02024B00,
+ 0x1CD8, 0x04000000,
+ 0x1CDC, 0x10000000,
+ 0x1CE0, 0x60000000,
+ 0x1CE4, 0x00000000,
+ 0x1CE8, 0xC0000000,
+ 0x1CEC, 0x00000000,
+ 0x1CF0, 0x00000000,
+ 0x1CF4, 0xE4000000,
+ 0x1CF8, 0x00000000,
+ 0x1D00, 0x00000000,
+ 0x1D04, 0x08A3C000,
+ 0x1D08, 0xA0000000,
+ 0x1D10, 0x08B5BBBB,
+ 0x1D14, 0x77777777,
+ 0x1D18, 0x99999999,
+ 0x1D1C, 0x99999999,
+ 0x1D20, 0x000081E0,
+ 0x1D24, 0x00000000,
+ 0x1D28, 0x00000000,
+ 0x1D2C, 0xC0000000,
+ 0x1D30, 0x50009C00,
+ 0x1D34, 0x00000000,
+ 0x1D38, 0x00000000,
+ 0x1D3C, 0xF8000000,
+ 0x1D40, 0x00000000,
+ 0x1D44, 0x74740000,
+ 0x1D48, 0x14147474,
+ 0x1D4C, 0x00FFFF14,
+ 0x1D50, 0x00000000,
+ 0x1D54, 0x03A00000,
+ 0x1D58, 0x80800000,
+ 0x1D5C, 0x00000000,
+ 0x1D60, 0x00000000,
+ 0x1D64, 0x88000000,
+ 0x1D68, 0x00000000,
+ 0x1D6C, 0x666D8001,
+ 0x1D70, 0x20202020,
+ 0x1D74, 0x4E4E4E4E,
+ 0x1D78, 0x18189818,
+ 0x1D7C, 0x0005A000,
+ 0x1D80, 0x00080000,
+ 0x1D84, 0x00080000,
+ 0x1D88, 0x000000EF,
+ 0x1D8C, 0x0C0C0C0C,
+ 0x1D90, 0x103F003F,
+ 0x1D94, 0x00000000,
+ 0x1D98, 0x00000000,
+ 0x1D9C, 0x00000000,
+ 0x1DA0, 0x00000000,
+ 0x1DA4, 0x00000000,
+ 0x1DA8, 0x00000000,
+ 0x1DAC, 0x00000000,
+ 0x1DB0, 0x00000000,
+ 0x1DB4, 0x00000000,
+ 0x1DB8, 0x00000000,
+ 0x1DBC, 0x00000000,
+ 0x1DC0, 0x00000000,
+ 0x1DC4, 0x00000000,
+ 0x1DC8, 0x00000000,
+ 0x1DCC, 0x00000000,
+ 0x1DD0, 0x00000000,
+ 0x1DD4, 0x00000000,
+ 0x1DD8, 0x00000000,
+ 0x1DDC, 0x1FDF0000,
+ 0x1DE0, 0x01010000,
+ 0x1DE4, 0x05210123,
+ 0x1DE8, 0xFFFF4848,
+ 0x1DEC, 0x00000000,
+ 0x1DF0, 0x00000000,
+ 0x1DF4, 0x80000002,
+ 0x1DF8, 0x00000000,
+ 0x1E00, 0x00000000,
+ 0x1E04, 0x00000000,
+ 0x1E08, 0x00000000,
+ 0x1E0C, 0x00000000,
+ 0x1E10, 0x00000000,
+ 0x1E14, 0x00000000,
+ 0x1E18, 0x00000000,
+ 0x1E1C, 0x00000000,
+ 0x1E20, 0x00000000,
+ 0x1E24, 0x80003000,
+ 0x1E28, 0x000CC0C3,
+ 0x1E2C, 0xE4E40404,
+ 0x1E30, 0xE4E4E4E4,
+ 0x1E34, 0xF3001234,
+ 0x1E38, 0x00000000,
+ 0x1E3C, 0x00000000,
+ 0x1E40, 0x00000000,
+ 0x1E44, 0x00000000,
+ 0x1E48, 0x00000000,
+ 0x1E4C, 0x00000000,
+ 0x1E50, 0x00000000,
+ 0x1E54, 0x00000000,
+ 0x1E58, 0x00000000,
+ 0x1E5C, 0xC1000000,
+ 0x1E60, 0x00000000,
+ 0x1E64, 0xF3A00001,
+ 0x1E68, 0x0028846E,
+ 0x1E6C, 0x40274906,
+ 0x1E70, 0x00001000,
+ 0x1E74, 0x00000000,
+ 0x1E78, 0x00000000,
+ 0x1E7C, 0x00000000,
+ 0x1E80, 0x00000000,
+ 0x1E84, 0x00000000,
+ 0x1E84, 0x40000000,
+ 0x1E84, 0x41000000,
+ 0x1E84, 0x42000000,
+ 0x1E84, 0x43000000,
+ 0x1E84, 0x44000000,
+ 0x1E84, 0x45000000,
+ 0x1E84, 0x46000000,
+ 0x1E84, 0x47000000,
+ 0x1E84, 0x48000000,
+ 0x1E84, 0x49000000,
+ 0x1E84, 0x4A000000,
+ 0x1E84, 0x4B000000,
+ 0x1E84, 0x4C000000,
+ 0x1E84, 0x4D000000,
+ 0x1E84, 0x4E000000,
+ 0x1E84, 0x4F000000,
+ 0x1E84, 0x50000000,
+ 0x1E84, 0x51000000,
+ 0x1E84, 0x52000000,
+ 0x1E84, 0x53000000,
+ 0x1E84, 0x54000000,
+ 0x1E84, 0x55000000,
+ 0x1E84, 0x56000000,
+ 0x1E84, 0x57000000,
+ 0x1E84, 0x58000000,
+ 0x1E84, 0x59000000,
+ 0x1E84, 0x5A000000,
+ 0x1E84, 0x5B000000,
+ 0x1E84, 0x5C000000,
+ 0x1E84, 0x5D000000,
+ 0x1E84, 0x5E000000,
+ 0x1E84, 0x5F000000,
+ 0x1E84, 0x60000000,
+ 0x1E84, 0x61000000,
+ 0x1E84, 0x62000000,
+ 0x1E84, 0x63000000,
+ 0x1E84, 0x64000000,
+ 0x1E84, 0x65000000,
+ 0x1E84, 0x66000000,
+ 0x1E84, 0x67000000,
+ 0x1E84, 0x68000000,
+ 0x1E84, 0x69000000,
+ 0x1E84, 0x6A000000,
+ 0x1E84, 0x6B000000,
+ 0x1E84, 0x6C000000,
+ 0x1E84, 0x6D000000,
+ 0x1E84, 0x6E000000,
+ 0x1E84, 0x6F000000,
+ 0x1E84, 0x70000000,
+ 0x1E84, 0x71000000,
+ 0x1E84, 0x72000000,
+ 0x1E84, 0x73000000,
+ 0x1E84, 0x74000000,
+ 0x1E84, 0x75000000,
+ 0x1E84, 0x76000000,
+ 0x1E84, 0x77000000,
+ 0x1E84, 0x78000000,
+ 0x1E84, 0x79000000,
+ 0x1E84, 0x7A000000,
+ 0x1E84, 0x7B000000,
+ 0x1E84, 0x7C000000,
+ 0x1E84, 0x7D000000,
+ 0x1E84, 0x7E000000,
+ 0x1E84, 0x7F000000,
+ 0x1E84, 0x80000000,
+ 0x1E84, 0x00000000,
+ 0x1E88, 0x0200FC1C,
+ 0x1E8C, 0x00000000,
+ 0x1E90, 0x00000000,
+ 0x1E94, 0x04000000,
+ 0x1E98, 0x00000000,
+ 0x1E9C, 0x00000000,
+ 0x1EA0, 0x00000000,
+ 0x1EA4, 0x00000000,
+ 0x1EA8, 0xAA464646,
+ 0x1EAC, 0x01800030,
+ 0x1EB0, 0x00003002,
+ 0x1EB4, 0x31800002,
+ 0x1EB8, 0x00000000,
+ 0x1EBC, 0x00000000,
+ 0x1EC0, 0x00000000,
+ 0x1EC4, 0x00000000,
+ 0x1EC8, 0x00000000,
+ 0x1ECC, 0x00000000,
+ 0x1ED0, 0x00000000,
+ 0x1ED4, 0x8000000A,
+ 0x1ED8, 0x800B03E8,
+ 0x1EDC, 0x83E90FFF,
+ 0x1EE0, 0x8000FFFF,
+ 0x1EE4, 0x70000000,
+ 0x1EE8, 0x00000000,
+ 0x1EEC, 0x0280A933,
+ 0x1EF0, 0x00000A80,
+ 0x1EF4, 0x00001266,
+ 0x1EF8, 0x01000100,
+ 0x3A00, 0x0004080C,
+ 0x3A04, 0x1C202428,
+ 0x3A08, 0x0C101418,
+ 0x3A0C, 0x181C2024,
+ 0x3A10, 0x080C1014,
+ 0x3A14, 0x181C2024,
+ 0x3A18, 0x080C1014,
+ 0x3A1C, 0x00000000,
+ 0x3A20, 0x00000000,
+ 0x3A24, 0x00000000,
+ 0x3A28, 0x00000000,
+ 0x3A2C, 0x181C2024,
+ 0x3A30, 0x080C1014,
+ 0x3A34, 0x20240004,
+ 0x3A38, 0x1014181C,
+ 0x3A3C, 0x0004080C,
+ 0x3A40, 0x00000000,
+ 0x3A44, 0x00000000,
+ 0x3A48, 0x00000000,
+ 0x3A4C, 0x00000000,
+ 0x3A50, 0x00000000,
+ 0x3A54, 0x00000000,
+ 0x3A58, 0x00000000,
+ 0x3A5C, 0x00000000,
+ 0x3A60, 0x00000000,
+ 0x3A64, 0x00000000,
+ 0x3A68, 0x00000000,
+ 0x3A6C, 0x00000000,
+ 0x3A70, 0x00000000,
+ 0x3A74, 0x00000000,
+ 0x3A78, 0x00000000,
+ 0x3A7C, 0x00000000,
+ 0x3A80, 0x00000000,
+ 0x3A84, 0x00000000,
+ 0x3A88, 0x00000000,
+ 0x3A8C, 0x00000000,
+ 0x3A90, 0x00000000,
+ 0x3A94, 0x00000000,
+ 0x3A98, 0x00000000,
+ 0x3A9C, 0x00000000,
+ 0x3AA0, 0x00000000,
+ 0x3AA4, 0x00000000,
+ 0x4000, 0xA6A6A6A6,
+ 0x4004, 0x95959595,
+ 0x4008, 0x00777777,
+ 0x400C, 0x77776666,
+ 0x4010, 0x00033333,
+ 0x4014, 0xAAAC875A,
+ 0x4018, 0x2AA2A8A2,
+ 0x401C, 0x2AAAA8A2,
+ 0x4020, 0x00878766,
+ 0x4024, 0x000C4924,
+ 0x4028, 0x5669B6C0,
+ 0x402C, 0x00409190,
+ 0x4030, 0xB85C0492,
+ 0x4034, 0x00B4A298,
+ 0x4038, 0x00030151,
+ 0x403C, 0x0058C618,
+ 0x4040, 0x41000000,
+ 0x4044, 0x00000BCB,
+ 0x4048, 0xAAAAAAAA,
+ 0x404C, 0x00B98989,
+ 0x4050, 0x88886665,
+ 0x4054, 0x08888888,
+ 0x4058, 0x00000618,
+ 0x405C, 0x00000000,
+ 0x4060, 0x00000000,
+ 0x4064, 0x00000000,
+ 0x4068, 0x00000000,
+ 0x406C, 0x00000000,
+ 0x4070, 0x00000000,
+ 0x4074, 0x00000000,
+ 0x4078, 0x00000000,
+ 0x407C, 0x00000000,
+ 0x4080, 0x00000000,
+ 0x4084, 0x00000000,
+ 0x4088, 0x00000000,
+ 0x408C, 0x00000000,
+ 0x4090, 0x00000000,
+ 0x4094, 0x00000000,
+ 0x4098, 0x00000000,
+ 0x409C, 0x00000000,
+ 0x40A0, 0x00000000,
+ 0x40A4, 0x00000000,
+ 0x40A8, 0x00000000,
+ 0x40AC, 0x00000000,
+ 0x40B0, 0x00000000,
+ 0x40B4, 0x00000000,
+ 0x40B8, 0x00000000,
+ 0x40BC, 0x00000000,
+ 0x40C0, 0x00000000,
+ 0x40C4, 0x00000000,
+ 0x40C8, 0x00000000,
+ 0x40CC, 0x00000000,
+ 0x40D0, 0x00000000,
+ 0x40D4, 0x00000000,
+ 0x40D8, 0x00000000,
+ 0x40DC, 0x00000000,
+ 0x40E0, 0x00000000,
+ 0x40E4, 0x00000000,
+ 0x40E8, 0x00000000,
+ 0x40EC, 0x00000000,
+ 0x40F0, 0x00000000,
+ 0x40F4, 0x00000000,
+ 0x40F8, 0x00000000,
+ 0x4100, 0x00033312,
+ 0x4104, 0x00033312,
+ 0x410C, 0x17F40060,
+ 0x4110, 0x62D508C4,
+ 0x4114, 0x506AA5B4,
+ 0x4118, 0x000014FF,
+ 0x411C, 0x00000000,
+ 0x4120, 0x02D508CC,
+ 0x4124, 0x506AA5B4,
+ 0x4128, 0x000004FD,
+ 0x412C, 0x00000000,
+ 0x4134, 0x00000000,
+ 0x4138, 0x20000000,
+ 0x413C, 0x00000000,
+ 0x4140, 0x00000000,
+ 0x4144, 0x00000000,
+ 0x4148, 0x00000000,
+ 0x414C, 0x00000000,
+ 0x4150, 0x00000000,
+ 0x4154, 0x00000000,
+ 0x4158, 0x00000000,
+ 0x415C, 0x00000000,
+ 0x4160, 0xF0040FF8,
+ 0x4164, 0x7F000000,
+ 0x4168, 0x00000000,
+ 0x416C, 0x00008000,
+ 0x4170, 0x00000000,
+ 0x4174, 0x00000000,
+ 0x4178, 0x00000000,
+ 0x417C, 0x00000000,
+ 0x4180, 0x00000000,
+ 0x4184, 0x02B00000,
+ 0x4188, 0x00000000,
+ 0x418C, 0x00000000,
+ 0x4190, 0x00000000,
+ 0x4194, 0x00000000,
+ 0x4198, 0x00000000,
+ 0x41A0, 0x00510000,
+ 0x41A4, 0x183C1F7F,
+ 0x41A8, 0x1402C99A,
+ 0x41AC, 0x00004200,
+ 0x41B0, 0x0809FB08,
+ 0x41B0, 0x0809FB09,
+ 0x41B4, 0x00000000,
+ 0x41B8, 0x00000000,
+ 0x41BC, 0x00C3FF80,
+ 0x41C0, 0x0002D100,
+ 0x41C4, 0x00000004,
+ 0x41C8, 0x001FFFE0,
+ 0x41CC, 0x0809FB08,
+ 0x41CC, 0x0809FB09,
+ 0x41D0, 0x00000000,
+ 0x41D4, 0x00000000,
+ 0x41D8, 0x00C3FF80,
+ 0x41DC, 0x0002D100,
+ 0x41E0, 0x00000004,
+ 0x41E4, 0x001FFFE0,
+ 0x41E8, 0x00000200,
+ 0x41EC, 0x1E008000,
+ 0x41F0, 0x7F000064,
+ 0x41F4, 0x1F7DE75C,
+ 0x41F8, 0x7F7F7F7F,
+ 0x41FC, 0x7F7F7F7F,
+ 0x1830, 0x700B8001,
+ 0x1830, 0x700B8001,
+ 0x1830, 0x70144001,
+ 0x1830, 0x70244001,
+ 0x1830, 0x70344001,
+ 0x1830, 0x70444001,
+ 0x1830, 0x705B8001,
+ 0x1830, 0x70644001,
+ 0x1830, 0x707B8001,
+ 0x1830, 0x708B8001,
+ 0x1830, 0x709B8001,
+ 0x1830, 0x70AB8001,
+ 0x1830, 0x70BB8001,
+ 0x1830, 0x70CB8001,
+ 0x1830, 0x70DB8001,
+ 0x1830, 0x70EB8001,
+ 0x1830, 0x70FB8001,
+ 0x1830, 0x70FB8001,
+ 0x4130, 0x700B8001,
+ 0x4130, 0x700B8001,
+ 0x4130, 0x70144001,
+ 0x4130, 0x70244001,
+ 0x4130, 0x70344001,
+ 0x4130, 0x70444001,
+ 0x4130, 0x705B8001,
+ 0x4130, 0x70644001,
+ 0x4130, 0x707B8001,
+ 0x4130, 0x708B8001,
+ 0x4130, 0x709B8001,
+ 0x4130, 0x70AB8001,
+ 0x4130, 0x70BB8001,
+ 0x4130, 0x70CB8001,
+ 0x4130, 0x70DB8001,
+ 0x4130, 0x70EB8001,
+ 0x4130, 0x70FB8001,
+ 0x4130, 0x70FB8001,
+ 0x1A00, 0x00D047C8,
+ 0x1A04, 0xC0000008,
+ 0x1A08, 0x88838300,
+ 0x1A0C, 0x2E20100F,
+ 0x1A10, 0x9500BB78,
+ 0x1A14, 0x111440A8,
+ 0x1A18, 0x00881117,
+ 0x1A1C, 0x89140F00,
+ 0x1A20, 0x52840000,
+ 0x1A24, 0x3E18FEC8,
+ 0x1A28, 0x00150A88,
+ 0x1A2C, 0x12988000,
+ 0x1A30, 0x10114007,
+ 0x1A34, 0x1011C007,
+ 0x1A38, 0x00000000,
+ 0x1A3C, 0x00000000,
+ 0x1A40, 0x00000000,
+ 0x1A44, 0x00000000,
+ 0x1A48, 0x000C0000,
+ 0x1A4C, 0xB00000C0,
+ 0x1A50, 0x22040700,
+ 0x1A54, 0x09003000,
+ 0x1A58, 0x00000881,
+ 0x1A5C, 0x00000128,
+ 0x1A60, 0x85830000,
+ 0x1A64, 0x00000128,
+ 0x1A68, 0x00222211,
+ 0x1A6C, 0x00000000,
+ 0x1A70, 0x00008000,
+ 0x1A74, 0x00000048,
+ 0x1A78, 0x000089F0,
+ 0x1A7C, 0x225B0606,
+ 0x1A80, 0x208A7532,
+ 0x1A84, 0x85200200,
+ 0x1A88, 0x048C0000,
+ 0x1A8C, 0x00000000,
+ 0x1A90, 0x00000000,
+ 0x1A94, 0x00000000,
+ 0x1A98, 0xACC4C040,
+ 0x1A9C, 0x0016C8B2,
+ 0x1AA0, 0x00FAF0DE,
+ 0x1AA4, 0x00020000,
+ 0x1AA8, 0xBA0F0004,
+ 0x1AAC, 0x00122344,
+ 0x1AB0, 0x0FFFFFFF,
+ 0x1AB4, 0x0F201402,
+ 0x1AB8, 0x00000000,
+ 0x1ABC, 0xC2008080,
+ 0x1AC0, 0x54D0A742,
+ 0x1AC4, 0x00000000,
+ 0x1AC8, 0x00000807,
+ 0x1ACC, 0x00000707,
+ 0x1AD0, 0xA33529AD,
+ 0x1AD4, 0x0D8D8452,
+ 0x1AD8, 0x08024024,
+ 0x1ADC, 0x000DB001,
+ 0x1AE0, 0x00600391,
+ 0x1AE4, 0x08000080,
+ 0x1AE8, 0x00000002,
+ 0x1AEC, 0x00000000,
+ 0x1AF0, 0x00000000,
+ 0x1AF4, 0x00000000,
+ 0x1AF8, 0x00000000,
+ 0x1AFC, 0x00000000,
+ 0x1D0C, 0x00400000,
+ 0x1D0C, 0x00410000,
+ 0x1EE8, 0x00000003,
+ 0xC0C, 0x02F1D8BF,
+ 0x1D94, 0x40000000,
+ 0x1D94, 0x40010000,
+ 0x1D94, 0x40020000,
+ 0x1D94, 0x40030000,
+ 0x1D94, 0x40040000,
+ 0x1D94, 0x40050000,
+ 0x1D94, 0x40060000,
+ 0x1D94, 0x40070000,
+ 0x1D94, 0x40080000,
+ 0x1D94, 0x40090000,
+ 0x1D94, 0x400A0000,
+ 0x1D94, 0x400B0000,
+ 0x1D94, 0x400C0000,
+ 0x1D94, 0x400D0000,
+ 0x1D94, 0x400E0000,
+ 0x1D94, 0x400F0000,
+ 0x1D94, 0x40100000,
+ 0x1D94, 0x40110000,
+ 0x1D94, 0x40120000,
+ 0x1D94, 0x40130000,
+ 0x1D94, 0x40140000,
+ 0x1D94, 0x40150000,
+ 0x1D94, 0x40160000,
+ 0x1D94, 0x40170000,
+ 0x1D94, 0x40180000,
+ 0x1D94, 0x40190000,
+ 0x1D94, 0x401A0000,
+ 0x1D94, 0x401B0000,
+ 0x1D94, 0x401C0000,
+ 0x1D94, 0x401D0000,
+ 0x1D94, 0x401E0000,
+ 0x1D94, 0x401F0000,
+ 0x1D94, 0x40200000,
+ 0x1D94, 0x40210000,
+ 0x1D94, 0x40220000,
+ 0x1D94, 0x40230000,
+ 0x1D94, 0x40240000,
+ 0x1D94, 0x40250000,
+ 0x1D94, 0x40260000,
+ 0x1D94, 0x40270000,
+ 0x1D94, 0x40280000,
+ 0x1D94, 0x40290000,
+ 0x1D94, 0x402A0000,
+ 0x1D94, 0x402B0000,
+ 0x1D94, 0x402C0000,
+ 0x1D94, 0x402D0000,
+ 0x1D94, 0x402E0000,
+ 0x1D94, 0x402F0000,
+ 0x1D94, 0x40300000,
+ 0x1D94, 0x40310000,
+ 0x1D94, 0x40320000,
+ 0x1D94, 0x40330000,
+ 0x1D94, 0x40340000,
+ 0x1D94, 0x40350000,
+ 0x1D94, 0x40360000,
+ 0x1D94, 0x40370000,
+ 0x1D94, 0x40380000,
+ 0x1D94, 0x40390000,
+ 0x1D94, 0x403A0000,
+ 0x1D94, 0x403B0000,
+ 0x1D94, 0x403C0000,
+ 0x1D94, 0x403D0000,
+ 0x1D94, 0x403E0000,
+ 0x1D94, 0x403F0000,
+ 0x1D94, 0x40400000,
+ 0x1D94, 0x40410000,
+ 0x1D94, 0x40420000,
+ 0x1D94, 0x40430000,
+ 0x1D94, 0x40440000,
+ 0x1D94, 0x40450000,
+ 0x1D94, 0x40460000,
+ 0x1D94, 0x40470000,
+ 0x1D94, 0x40480000,
+ 0x1D94, 0x40490000,
+ 0x1D94, 0x404A0000,
+ 0x1D94, 0x404B0000,
+ 0x1D94, 0x404C0000,
+ 0x1D94, 0x404D0000,
+ 0x1D94, 0x404E0000,
+ 0x1D94, 0x404F0000,
+ 0x1D94, 0x40500000,
+ 0x1D94, 0x40510000,
+ 0x1D94, 0x40520000,
+ 0x1D94, 0x40530000,
+ 0x1D94, 0x40540000,
+ 0x1D94, 0x40550000,
+ 0x1D94, 0x40560000,
+ 0x1D94, 0x40570000,
+ 0x1D94, 0x40580000,
+ 0x1D94, 0x40590000,
+ 0x1D94, 0x405A0000,
+ 0x1D94, 0x405B0000,
+ 0x1D94, 0x405C0000,
+ 0x1D94, 0x405D0000,
+ 0x1D94, 0x405E0000,
+ 0x1D94, 0x405F0000,
+ 0x1D94, 0x40600000,
+ 0x1D94, 0x40610000,
+ 0x1D94, 0x40620000,
+ 0x1D94, 0x40630000,
+ 0x1D94, 0x40640000,
+ 0x1D94, 0x40650000,
+ 0x1D94, 0x40660000,
+ 0x1D94, 0x40670000,
+ 0x1D94, 0x40680000,
+ 0x1D94, 0x40690000,
+ 0x1D94, 0x406A0000,
+ 0x1D94, 0x406B0000,
+ 0x1D94, 0x406C0000,
+ 0x1D94, 0x406D0000,
+ 0x1D94, 0x406E0000,
+ 0x1D94, 0x406F0000,
+ 0x1D94, 0x40700000,
+ 0x1D94, 0x40710000,
+ 0x1D94, 0x40720000,
+ 0x1D94, 0x40730000,
+ 0x1D94, 0x40740000,
+ 0x1D94, 0x40750000,
+ 0x1D94, 0x40760000,
+ 0x1D94, 0x40770000,
+ 0x1D94, 0x40780000,
+ 0x1D94, 0x40790000,
+ 0x1D94, 0x407A0000,
+ 0x1D94, 0x407B0000,
+ 0x1D94, 0x407C0000,
+ 0x1D94, 0x407D0000,
+ 0x1D94, 0x407E0000,
+ 0x1D94, 0x407F0000,
+ 0x1D94, 0x40800000,
+ 0x1D94, 0x40810000,
+ 0x1D94, 0x40820000,
+ 0x1D94, 0x40830000,
+ 0x1D94, 0x40840000,
+ 0x1D94, 0x40850000,
+ 0x1D94, 0x40860000,
+ 0x1D94, 0x40870000,
+ 0x1D94, 0x40880000,
+ 0x1D94, 0x40890000,
+ 0x1D94, 0x408A0000,
+ 0x1D94, 0x408B0000,
+ 0x1D94, 0x408C0000,
+ 0x1D94, 0x408D0000,
+ 0x1D94, 0x408E0000,
+ 0x1D94, 0x408F0000,
+ 0x1D94, 0x40900000,
+ 0x1D94, 0x40910000,
+ 0x1D94, 0x40920000,
+ 0x1D94, 0x40930000,
+ 0x1D94, 0x40940000,
+ 0x1D94, 0x40950000,
+ 0x1D94, 0x40960000,
+ 0x1D94, 0x40970000,
+ 0x1D94, 0x40980000,
+ 0x1D94, 0x40990000,
+ 0x1D94, 0x409A0000,
+ 0x1D94, 0x409B0000,
+ 0x1D94, 0x409C0000,
+ 0x1D94, 0x409D0000,
+ 0x1D94, 0x409E0000,
+ 0x1D94, 0x409F0000,
+ 0x1D94, 0x40A00000,
+ 0x1D94, 0x40A10000,
+ 0x1D94, 0x40A20000,
+ 0x1D94, 0x40A30000,
+ 0x1D94, 0x40A40000,
+ 0x1D94, 0x40A50000,
+ 0x1D94, 0x40A60000,
+ 0x1D94, 0x40A70000,
+ 0x1D94, 0x40A80000,
+ 0x1D94, 0x40A90000,
+ 0x1D94, 0x40AA0000,
+ 0x1D94, 0x40AB0000,
+ 0x1D94, 0x40AC0000,
+ 0x1D94, 0x40AD0000,
+ 0x1D94, 0x40AE0000,
+ 0x1D94, 0x40AF0000,
+ 0x1D94, 0x40B00000,
+ 0x1D94, 0x40B10000,
+ 0x1D94, 0x40B20000,
+ 0x1D94, 0x40B30000,
+ 0x1D94, 0x40B40000,
+ 0x1D94, 0x40B50000,
+ 0x1D94, 0x40B60000,
+ 0x1D94, 0x40B70000,
+ 0x1D94, 0x40B80000,
+ 0x1D94, 0x40B90000,
+ 0x1D94, 0x40BA0000,
+ 0x1D94, 0x40BB0000,
+ 0x1D94, 0x40BC0000,
+ 0x1D94, 0x40BD0000,
+ 0x1D94, 0x40BE0000,
+ 0x1D94, 0x40BF0000,
+ 0x1D94, 0x40C00000,
+ 0x1D94, 0x40C10000,
+ 0x1D94, 0x40C20000,
+ 0x1D94, 0x40C30000,
+ 0x1D94, 0x40C40000,
+ 0x1D94, 0x40C50000,
+ 0x1D94, 0x40C60000,
+ 0x1D94, 0x40C70000,
+ 0x1D94, 0x40C80000,
+ 0x1D94, 0x40C90000,
+ 0x1D94, 0x40CA0000,
+ 0x1D94, 0x40CB0000,
+ 0x1D94, 0x40CC0000,
+ 0x1D94, 0x40CD0000,
+ 0x1D94, 0x40CE0000,
+ 0x1D94, 0x40CF0000,
+ 0x1D94, 0x40D00000,
+ 0x1D94, 0x40D10000,
+ 0x1D94, 0x40D20000,
+ 0x1D94, 0x40D30000,
+ 0x1D94, 0x40D40000,
+ 0x1D94, 0x40D50000,
+ 0x1D94, 0x40D60000,
+ 0x1D94, 0x40D70000,
+ 0x1D94, 0x40D80000,
+ 0x1D94, 0x40D90000,
+ 0x1D94, 0x40DA0000,
+ 0x1D94, 0x40DB0000,
+ 0x1D94, 0x40DC0000,
+ 0x1D94, 0x40DD0000,
+ 0x1D94, 0x40DE0000,
+ 0x1D94, 0x40DF0000,
+ 0x1D94, 0x40E00000,
+ 0x1D94, 0x40E10000,
+ 0x1D94, 0x40E20000,
+ 0x1D94, 0x40E30000,
+ 0x1D94, 0x40E40000,
+ 0x1D94, 0x40E50000,
+ 0x1D94, 0x40E60000,
+ 0x1D94, 0x40E70000,
+ 0x1D94, 0x40E80000,
+ 0x1D94, 0x40E90000,
+ 0x1D94, 0x40EA0000,
+ 0x1D94, 0x40EB0000,
+ 0x1D94, 0x40EC0000,
+ 0x1D94, 0x40ED0000,
+ 0x1D94, 0x40EE0000,
+ 0x1D94, 0x40EF0000,
+ 0x1D94, 0x40F00000,
+ 0x1D94, 0x40F10000,
+ 0x1D94, 0x40F20000,
+ 0x1D94, 0x40F30000,
+ 0x1D94, 0x40F40000,
+ 0x1D94, 0x40F50000,
+ 0x1D94, 0x40F60000,
+ 0x1D94, 0x40F70000,
+ 0x1D94, 0x40F80000,
+ 0x1D94, 0x40F90000,
+ 0x1D94, 0x40FA0000,
+ 0x1D94, 0x40FB0000,
+ 0x1D94, 0x40FC0000,
+ 0x1D94, 0x40FD0000,
+ 0x1D94, 0x40FE0000,
+ 0x1D94, 0x40FF0000,
+ 0xC0C, 0x02F1D8B7,
+ 0x1EE8, 0x00000000,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8822c_bb, rtw_phy_cfg_bb);
+
+static const u32 rtw8822c_bb_pg_type0[] = {
+ 0, 0, 0, 0x00000c20, 0xffffffff, 0x484c5054,
+ 0, 0, 0, 0x00000c24, 0xffffffff, 0x54585c60,
+ 0, 0, 0, 0x00000c28, 0xffffffff, 0x44484c50,
+ 0, 0, 0, 0x00000c2c, 0xffffffff, 0x5054585c,
+ 0, 0, 0, 0x00000c30, 0xffffffff, 0x4044484c,
+ 0, 0, 1, 0x00000c34, 0xffffffff, 0x5054585c,
+ 0, 0, 1, 0x00000c38, 0xffffffff, 0x4044484c,
+ 0, 0, 0, 0x00000c3c, 0xffffffff, 0x5054585c,
+ 0, 0, 0, 0x00000c40, 0xffffffff, 0x4044484c,
+ 0, 0, 0, 0x00000c44, 0xffffffff, 0x585c383c,
+ 0, 0, 1, 0x00000c48, 0xffffffff, 0x484c5054,
+ 0, 0, 1, 0x00000c4c, 0xffffffff, 0x383c4044,
+ 0, 1, 0, 0x00000e20, 0xffffffff, 0x484c5054,
+ 0, 1, 0, 0x00000e24, 0xffffffff, 0x54585c60,
+ 0, 1, 0, 0x00000e28, 0xffffffff, 0x44484c50,
+ 0, 1, 0, 0x00000e2c, 0xffffffff, 0x5054585c,
+ 0, 1, 0, 0x00000e30, 0xffffffff, 0x4044484c,
+ 0, 1, 1, 0x00000e34, 0xffffffff, 0x5054585c,
+ 0, 1, 1, 0x00000e38, 0xffffffff, 0x4044484c,
+ 0, 1, 0, 0x00000e3c, 0xffffffff, 0x5054585c,
+ 0, 1, 0, 0x00000e40, 0xffffffff, 0x4044484c,
+ 0, 1, 0, 0x00000e44, 0xffffffff, 0x585c383c,
+ 0, 1, 1, 0x00000e48, 0xffffffff, 0x484c5054,
+ 0, 1, 1, 0x00000e4c, 0xffffffff, 0x383c4044,
+ 1, 0, 0, 0x00000c24, 0xffffffff, 0x54585c60,
+ 1, 0, 0, 0x00000c28, 0xffffffff, 0x44484c50,
+ 1, 0, 0, 0x00000c2c, 0xffffffff, 0x5054585c,
+ 1, 0, 0, 0x00000c30, 0xffffffff, 0x4044484c,
+ 1, 0, 1, 0x00000c34, 0xffffffff, 0x5054585c,
+ 1, 0, 1, 0x00000c38, 0xffffffff, 0x4044484c,
+ 1, 0, 0, 0x00000c3c, 0xffffffff, 0x5054585c,
+ 1, 0, 0, 0x00000c40, 0xffffffff, 0x4044484c,
+ 1, 0, 0, 0x00000c44, 0xffffffff, 0x585c383c,
+ 1, 0, 1, 0x00000c48, 0xffffffff, 0x484c5054,
+ 1, 0, 1, 0x00000c4c, 0xffffffff, 0x383c4044,
+ 1, 1, 0, 0x00000e24, 0xffffffff, 0x54585c60,
+ 1, 1, 0, 0x00000e28, 0xffffffff, 0x44484c50,
+ 1, 1, 0, 0x00000e2c, 0xffffffff, 0x5054585c,
+ 1, 1, 0, 0x00000e30, 0xffffffff, 0x4044484c,
+ 1, 1, 1, 0x00000e34, 0xffffffff, 0x5054585c,
+ 1, 1, 1, 0x00000e38, 0xffffffff, 0x4044484c,
+ 1, 1, 0, 0x00000e3c, 0xffffffff, 0x5054585c,
+ 1, 1, 0, 0x00000e40, 0xffffffff, 0x4044484c,
+ 1, 1, 0, 0x00000e44, 0xffffffff, 0x585c383c,
+ 1, 1, 1, 0x00000e48, 0xffffffff, 0x484c5054,
+ 1, 1, 1, 0x00000e4c, 0xffffffff, 0x383c4044
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8822c_bb_pg_type0);
+
+static const u32 rtw8822c_rf_a[] = {
+ 0x000, 0x00030000,
+ 0x018, 0x00013124,
+ 0x093, 0x0008483F,
+ 0x0DE, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000B9140,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000B9140,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0xA0000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0xB0000000, 0x00000000,
+ 0x081, 0x0000FC01,
+ 0x081, 0x0002FC01,
+ 0x081, 0x0003FC01,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0xA0000000, 0x00000000,
+ 0x085, 0x0006A06C,
+ 0xB0000000, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000003F,
+ 0x0EE, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000013,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000033,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000043,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000053,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x08A, 0x000E7DE3,
+ 0x08B, 0x0008FE00,
+ 0x0EE, 0x00000008,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000023,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000023,
+ 0x0EE, 0x00000000,
+ 0x0EF, 0x00004000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000000F,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x01B, 0x00003A40,
+ 0x061, 0x0000D233,
+ 0x062, 0x0004D232,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0xA0000000, 0x00000000,
+ 0x063, 0x00000C02,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0xA0000000, 0x00000000,
+ 0x030, 0x00000233,
+ 0x030, 0x00001233,
+ 0x030, 0x00002233,
+ 0x030, 0x00003233,
+ 0x030, 0x00004203,
+ 0x030, 0x00005233,
+ 0x030, 0x00006233,
+ 0x030, 0x00007233,
+ 0x030, 0x00008203,
+ 0x030, 0x00009233,
+ 0x030, 0x0000A233,
+ 0x030, 0x0000B233,
+ 0x030, 0x0000C233,
+ 0x030, 0x0000D233,
+ 0x030, 0x0000E203,
+ 0x030, 0x0000F233,
+ 0x030, 0x00010233,
+ 0x030, 0x00011233,
+ 0x030, 0x00012203,
+ 0x030, 0x00013233,
+ 0x030, 0x00014233,
+ 0x030, 0x00015233,
+ 0x030, 0x00016203,
+ 0x030, 0x00017233,
+ 0x030, 0x00018203,
+ 0x030, 0x00019233,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0xA0000000, 0x00000000,
+ 0x030, 0x00000232,
+ 0x030, 0x00001232,
+ 0x030, 0x00002232,
+ 0x030, 0x00003232,
+ 0x030, 0x00004232,
+ 0x030, 0x00005232,
+ 0x030, 0x00006232,
+ 0x030, 0x00007232,
+ 0x030, 0x00008232,
+ 0x030, 0x00009232,
+ 0x030, 0x0000A232,
+ 0x030, 0x0000B232,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000770,
+ 0x030, 0x00001770,
+ 0x030, 0x00002440,
+ 0x030, 0x00003440,
+ 0x030, 0x00004330,
+ 0x030, 0x00005330,
+ 0x030, 0x00008770,
+ 0x030, 0x0000A440,
+ 0x030, 0x0000C330,
+ 0x0EF, 0x00000000,
+ 0x0EE, 0x00010000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x051, 0x0003C800,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0xA0000000, 0x00000000,
+ 0x052, 0x000942CA,
+ 0xB0000000, 0x00000000,
+ 0x053, 0x000090F9,
+ 0x054, 0x00088000,
+ 0x057, 0x0004C80A,
+ 0x0EF, 0x00000020,
+ 0x033, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00010E46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000010,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00030246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000018,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000028,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00031E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0000EA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00021E46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00002A46,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EE, 0x00010000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000487,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000887,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000947,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D48,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D88,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000DE8,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0xB0000000, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000487,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000887,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000947,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D48,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D88,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000DE8,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0xB0000000, 0x00000000,
+ 0x0EE, 0x00000000,
+ 0x05C, 0x000FCC00,
+ 0x067, 0x0000A505,
+ 0x0D3, 0x00000542,
+ 0x043, 0x00005000,
+ 0x07F, 0x00000000,
+ 0x0B0, 0x0001F0FC,
+ 0x0B1, 0x0007DBE4,
+ 0x0B2, 0x00022400,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0xA0000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0xB0000000, 0x00000000,
+ 0x0B4, 0x00099D40,
+ 0x0B5, 0x0004103F,
+ 0x0B6, 0x000187F8,
+ 0x0B7, 0x00030018,
+ 0x0BC, 0x00000008,
+ 0x0D3, 0x00000542,
+ 0x0DD, 0x00000500,
+ 0x0BB, 0x00040010,
+ 0x0B0, 0x0001F0FA,
+ 0x0FE, 0x00000000,
+ 0x0CA, 0x00080000,
+ 0x0CA, 0x00080001,
+ 0x0FE, 0x00000000,
+ 0x0B0, 0x0001F0F8,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C700,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C700,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C700,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C700,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C700,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C700,
+ 0xA0000000, 0x00000000,
+ 0x0B3, 0x0007C700,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x0001B124,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0xA0000000, 0x00000000,
+ 0x0B3, 0x0007C760,
+ 0xB0000000, 0x00000000,
+ 0x018, 0x00013124,
+ 0x0CC, 0x0000F000,
+ 0x0CD, 0x00089600,
+ 0x018, 0x00013108,
+ 0x0FE, 0x00000000,
+ 0x0B8, 0x000C0440,
+ 0x0BA, 0x000E840D,
+ 0x0FE, 0x00000000,
+ 0x018, 0x00013124,
+ 0x059, 0x000A0000,
+ 0x05A, 0x00060000,
+ 0x05B, 0x00014000,
+ 0x0ED, 0x00000008,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000000F,
+ 0x0ED, 0x00000000,
+ 0x0EE, 0x00000002,
+ 0x033, 0x00000017,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000018,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000000,
+ 0x033, 0x0000001A,
+ 0x03F, 0x0000003F,
+ 0x033, 0x0000001B,
+ 0x03F, 0x0000003F,
+ 0x033, 0x0000001C,
+ 0x03F, 0x0000003F,
+ 0x0EE, 0x00000000,
+ 0x0ED, 0x00000200,
+ 0x033, 0x00000000,
+ 0x03F, 0x000F45A4,
+ 0x033, 0x00000001,
+ 0x03F, 0x000F49A4,
+ 0x033, 0x00000002,
+ 0x03F, 0x000F49A4,
+ 0x033, 0x00000003,
+ 0x03F, 0x000F69A4,
+ 0x033, 0x00000004,
+ 0x03F, 0x000F69A4,
+ 0x033, 0x00000005,
+ 0x03F, 0x000F69A4,
+ 0x033, 0x00000006,
+ 0x03F, 0x000F6DA4,
+ 0x033, 0x00000007,
+ 0x03F, 0x000F6DA4,
+ 0x033, 0x00000008,
+ 0x03F, 0x000F6DA4,
+ 0x033, 0x00000009,
+ 0x03F, 0x000F8DA4,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000F8DA4,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000F8DA4,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000F91A4,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000F91A4,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000F91A4,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000FB1A4,
+ 0x033, 0x00000010,
+ 0x03F, 0x000FB1A4,
+ 0x033, 0x00000011,
+ 0x03F, 0x000FB1A4,
+ 0x033, 0x00000012,
+ 0x03F, 0x000FB5A4,
+ 0x033, 0x00000013,
+ 0x03F, 0x000FB5A4,
+ 0x033, 0x00000014,
+ 0x03F, 0x000FD9A4,
+ 0x033, 0x00000015,
+ 0x03F, 0x000FD9A4,
+ 0x033, 0x00000016,
+ 0x03F, 0x000FF9A4,
+ 0x033, 0x00000017,
+ 0x03F, 0x000FF9A4,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FFDA4,
+ 0x033, 0x00000019,
+ 0x03F, 0x000FFDA4,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000FFDA4,
+ 0x0ED, 0x00000000,
+ 0x092, 0x00084800,
+ 0x092, 0x00084801,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x092, 0x00084800,
+ 0x08F, 0x0000182C,
+ 0x088, 0x0004326B,
+ 0x019, 0x00000005,
+};
+
+RTW_DECL_TABLE_RF_RADIO(rtw8822c_rf_a, A);
+
+static const u32 rtw8822c_rf_b[] = {
+ 0x000, 0x00030000,
+ 0x018, 0x00013124,
+ 0x093, 0x0008483F,
+ 0x0EF, 0x00080000,
+ 0x033, 0x00000001,
+ 0x03F, 0x00091020,
+ 0x0EF, 0x00000000,
+ 0x0DE, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000B9140,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000B9140,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0xA0000000, 0x00000000,
+ 0x08E, 0x000A5540,
+ 0xB0000000, 0x00000000,
+ 0x081, 0x0000FC01,
+ 0x081, 0x0002FC01,
+ 0x081, 0x0003FC01,
+ 0x085, 0x0006A06C,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000002A,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000002A,
+ 0x0EE, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EE, 0x00000010,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000001,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000002,
+ 0x03F, 0x0000003F,
+ 0x0EE, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000000B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00010000,
+ 0x033, 0x0000000F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000000E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000000D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000000C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000000B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000000A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000009,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000008,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000007,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000006,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000005,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000004,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000003,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000001B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000001E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000001D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000001C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000001B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000001A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000018,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000017,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000016,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000015,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000014,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000013,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000002B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x0000002F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000002E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000002D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000002C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000002B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000002A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000028,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000026,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000003B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x0000003F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000003E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000003D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000003C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000003B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000003A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000039,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000038,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000037,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000036,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000035,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000034,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000033,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000004B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x0000004F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000004E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000004D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000004C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000004B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000004A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000049,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000048,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000047,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000046,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000045,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000044,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000043,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773C0,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3C0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF3E8,
+ 0x033, 0x0000005B,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x0000005F,
+ 0x03F, 0x000773E8,
+ 0x033, 0x0000005E,
+ 0x03F, 0x000FF3A0,
+ 0x033, 0x0000005D,
+ 0x03F, 0x00000380,
+ 0x033, 0x0000005C,
+ 0x03F, 0x000FF380,
+ 0x033, 0x0000005B,
+ 0x03F, 0x00000300,
+ 0x033, 0x0000005A,
+ 0x03F, 0x000002A8,
+ 0x033, 0x00000059,
+ 0x03F, 0x00000280,
+ 0x033, 0x00000058,
+ 0x03F, 0x000FF280,
+ 0x033, 0x00000057,
+ 0x03F, 0x00000200,
+ 0x033, 0x00000056,
+ 0x03F, 0x000001C0,
+ 0x033, 0x00000055,
+ 0x03F, 0x00000180,
+ 0x033, 0x00000054,
+ 0x03F, 0x00000040,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000053,
+ 0x03F, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x08A, 0x000E7DE3,
+ 0x08B, 0x0008FE00,
+ 0x0EE, 0x00000008,
+ 0x033, 0x00000000,
+ 0x03F, 0x00000023,
+ 0x033, 0x00000001,
+ 0x03F, 0x00000023,
+ 0x0EE, 0x00000000,
+ 0x0EF, 0x00004000,
+ 0x033, 0x00000000,
+ 0x03F, 0x0000000F,
+ 0x033, 0x00000002,
+ 0x03F, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002F81C,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001C86,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0xA0000000, 0x00000000,
+ 0x0EF, 0x00020000,
+ 0x033, 0x00000000,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000008,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000010,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000018,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000020,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x033, 0x00000028,
+ 0x03E, 0x00001910,
+ 0x03F, 0x00020000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00001C02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000F02,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002B,
+ 0x03E, 0x00000F00,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002C,
+ 0x03E, 0x00000086,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002D,
+ 0x03E, 0x00000002,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002E,
+ 0x03E, 0x00000000,
+ 0x03F, 0x00020000,
+ 0x033, 0x0000002F,
+ 0x03E, 0x00000000,
+ 0x03F, 0x0002C010,
+ 0x0EF, 0x00000000,
+ 0xB0000000, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x01B, 0x00003A40,
+ 0x061, 0x0000D233,
+ 0x062, 0x0004D232,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x063, 0x00000002,
+ 0xA0000000, 0x00000000,
+ 0x063, 0x00000C02,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000200,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000237,
+ 0x030, 0x00001237,
+ 0x030, 0x00002237,
+ 0x030, 0x00003237,
+ 0x030, 0x00004207,
+ 0x030, 0x00005237,
+ 0x030, 0x00006237,
+ 0x030, 0x00007237,
+ 0x030, 0x00008207,
+ 0x030, 0x00009237,
+ 0x030, 0x0000A237,
+ 0x030, 0x0000B237,
+ 0x030, 0x0000C237,
+ 0x030, 0x0000D237,
+ 0x030, 0x0000E207,
+ 0x030, 0x0000F237,
+ 0x030, 0x00010237,
+ 0x030, 0x00011237,
+ 0x030, 0x00012207,
+ 0x030, 0x00013237,
+ 0x030, 0x00014237,
+ 0x030, 0x00015237,
+ 0x030, 0x00016207,
+ 0x030, 0x00017237,
+ 0x030, 0x00018207,
+ 0x030, 0x00019237,
+ 0xA0000000, 0x00000000,
+ 0x030, 0x00000233,
+ 0x030, 0x00001233,
+ 0x030, 0x00002233,
+ 0x030, 0x00003233,
+ 0x030, 0x00004203,
+ 0x030, 0x00005233,
+ 0x030, 0x00006233,
+ 0x030, 0x00007233,
+ 0x030, 0x00008203,
+ 0x030, 0x00009233,
+ 0x030, 0x0000A233,
+ 0x030, 0x0000B233,
+ 0x030, 0x0000C233,
+ 0x030, 0x0000D233,
+ 0x030, 0x0000E203,
+ 0x030, 0x0000F233,
+ 0x030, 0x00010233,
+ 0x030, 0x00011233,
+ 0x030, 0x00012203,
+ 0x030, 0x00013233,
+ 0x030, 0x00014233,
+ 0x030, 0x00015233,
+ 0x030, 0x00016203,
+ 0x030, 0x00017233,
+ 0x030, 0x00018203,
+ 0x030, 0x00019233,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000080,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x030, 0x00000334,
+ 0x030, 0x00001334,
+ 0x030, 0x00002334,
+ 0x030, 0x00003334,
+ 0x030, 0x00004334,
+ 0x030, 0x00005334,
+ 0x030, 0x00006334,
+ 0x030, 0x00007334,
+ 0x030, 0x00008334,
+ 0x030, 0x00009334,
+ 0x030, 0x0000A334,
+ 0x030, 0x0000B334,
+ 0xA0000000, 0x00000000,
+ 0x030, 0x00000232,
+ 0x030, 0x00001232,
+ 0x030, 0x00002232,
+ 0x030, 0x00003232,
+ 0x030, 0x00004232,
+ 0x030, 0x00005232,
+ 0x030, 0x00006232,
+ 0x030, 0x00007232,
+ 0x030, 0x00008232,
+ 0x030, 0x00009232,
+ 0x030, 0x0000A232,
+ 0x030, 0x0000B232,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000040,
+ 0x030, 0x00000770,
+ 0x030, 0x00001770,
+ 0x030, 0x00002440,
+ 0x030, 0x00003440,
+ 0x030, 0x00004330,
+ 0x030, 0x00005330,
+ 0x030, 0x00008770,
+ 0x030, 0x0000A440,
+ 0x030, 0x0000C330,
+ 0x0EF, 0x00000000,
+ 0x0EE, 0x00010000,
+ 0x033, 0x00000200,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000201,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000202,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000203,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000204,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000205,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000206,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000207,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000208,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000209,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000020A,
+ 0x03F, 0x00000CF7,
+ 0x033, 0x00000280,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000281,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000282,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000283,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000284,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000285,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000286,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000287,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000288,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000289,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000028A,
+ 0x03F, 0x00000CF7,
+ 0x033, 0x00000300,
+ 0x03F, 0x0000006A,
+ 0x033, 0x00000301,
+ 0x03F, 0x0000006D,
+ 0x033, 0x00000302,
+ 0x03F, 0x0000046A,
+ 0x033, 0x00000303,
+ 0x03F, 0x0000086A,
+ 0x033, 0x00000304,
+ 0x03F, 0x00000C89,
+ 0x033, 0x00000305,
+ 0x03F, 0x00000CE8,
+ 0x033, 0x00000306,
+ 0x03F, 0x00000CEB,
+ 0x033, 0x00000307,
+ 0x03F, 0x00000CEE,
+ 0x033, 0x00000308,
+ 0x03F, 0x00000CF1,
+ 0x033, 0x00000309,
+ 0x03F, 0x00000CF4,
+ 0x033, 0x0000030A,
+ 0x03F, 0x00000CF7,
+ 0x0EE, 0x00000000,
+ 0x051, 0x0003C800,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x052, 0x000902CA,
+ 0xA0000000, 0x00000000,
+ 0x052, 0x000942C0,
+ 0xB0000000, 0x00000000,
+ 0x053, 0x000090F9,
+ 0x054, 0x00088000,
+ 0x057, 0x0004C80A,
+ 0x0EF, 0x00000020,
+ 0x033, 0x00000000,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000001,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000002,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000003,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000004,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000005,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000006,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000007,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000008,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x0000C246,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000009,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000A,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000B,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000C,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000D,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000E,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000000F,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000010,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000011,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x00024246,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002C246,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000012,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000013,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000014,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000015,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000016,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000017,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000018,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000019,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001A,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001B,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001C,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001D,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001E,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000001F,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000021,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000022,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000023,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000024,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000025,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000026,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000027,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000028,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x00000029,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0002CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x033, 0x0000002A,
+ 0x03E, 0x00000020,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x03F, 0x0001CA46,
+ 0xA0000000, 0x00000000,
+ 0x03F, 0x00008E46,
+ 0xB0000000, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x0EE, 0x00010000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000060,
+ 0x03F, 0x00000487,
+ 0x033, 0x00000061,
+ 0x03F, 0x00000887,
+ 0x033, 0x00000062,
+ 0x03F, 0x00000947,
+ 0x033, 0x00000063,
+ 0x03F, 0x00000D48,
+ 0x033, 0x00000064,
+ 0x03F, 0x00000D88,
+ 0x033, 0x00000065,
+ 0x03F, 0x00000DE8,
+ 0x033, 0x00000066,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000067,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000068,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000069,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000006A,
+ 0x03F, 0x00000DF7,
+ 0xB0000000, 0x00000000,
+ 0x81000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x91000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x92000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x92000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x93000001, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0x93000002, 0x00000000, 0x40000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000468,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000868,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000909,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D0A,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D4A,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000D8B,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0xA0000000, 0x00000000,
+ 0x033, 0x00000020,
+ 0x03F, 0x00000487,
+ 0x033, 0x00000021,
+ 0x03F, 0x00000887,
+ 0x033, 0x00000022,
+ 0x03F, 0x00000947,
+ 0x033, 0x00000023,
+ 0x03F, 0x00000D48,
+ 0x033, 0x00000024,
+ 0x03F, 0x00000D88,
+ 0x033, 0x00000025,
+ 0x03F, 0x00000DE8,
+ 0x033, 0x00000026,
+ 0x03F, 0x00000DEB,
+ 0x033, 0x00000027,
+ 0x03F, 0x00000DEE,
+ 0x033, 0x00000028,
+ 0x03F, 0x00000DF1,
+ 0x033, 0x00000029,
+ 0x03F, 0x00000DF4,
+ 0x033, 0x0000002A,
+ 0x03F, 0x00000DF7,
+ 0xB0000000, 0x00000000,
+ 0x0EE, 0x00000000,
+ 0x05C, 0x000FCC00,
+ 0x067, 0x0000A505,
+ 0x0D3, 0x00000542,
+ 0x043, 0x00005000,
+ 0x059, 0x000A0000,
+ 0x05A, 0x00060000,
+ 0x05B, 0x00014000,
+ 0x001, 0x00040000,
+ 0x0EE, 0x00000002,
+ 0x033, 0x00000017,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000018,
+ 0x03F, 0x0000003F,
+ 0x033, 0x00000019,
+ 0x03F, 0x00000000,
+ 0x033, 0x0000001A,
+ 0x03F, 0x0000003F,
+ 0x033, 0x0000001B,
+ 0x03F, 0x0000003F,
+ 0x033, 0x0000001C,
+ 0x03F, 0x0000003F,
+ 0x0EE, 0x00000000,
+ 0x092, 0x00084800,
+ 0x092, 0x00084801,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x0FE, 0x00000000,
+ 0x092, 0x00084800,
+ 0x08F, 0x0000182C,
+ 0x088, 0x0004326B,
+ 0x019, 0x00000005,
+};
+
+RTW_DECL_TABLE_RF_RADIO(rtw8822c_rf_b, B);
+
+static const u8 rtw8822c_txpwr_lmt_type0[] = {
+ 0, 0, 0, 0, 1, 72, 2, 0, 0, 0, 1, 60,
+ 0, 0, 0, 0, 2, 72, 2, 0, 0, 0, 2, 60,
+ 0, 0, 0, 0, 3, 76, 2, 0, 0, 0, 3, 60,
+ 0, 0, 0, 0, 4, 76, 2, 0, 0, 0, 4, 60,
+ 0, 0, 0, 0, 5, 76, 2, 0, 0, 0, 5, 60,
+ 0, 0, 0, 0, 6, 76, 2, 0, 0, 0, 6, 60,
+ 0, 0, 0, 0, 7, 76, 2, 0, 0, 0, 7, 60,
+ 0, 0, 0, 0, 8, 76, 2, 0, 0, 0, 8, 60,
+ 0, 0, 0, 0, 9, 76, 2, 0, 0, 0, 9, 60,
+ 0, 0, 0, 0, 10, 72, 2, 0, 0, 0, 10, 60,
+ 0, 0, 0, 0, 11, 72, 2, 0, 0, 0, 11, 60,
+ 0, 0, 0, 0, 12, 52, 2, 0, 0, 0, 12, 60,
+ 0, 0, 0, 0, 13, 48, 2, 0, 0, 0, 13, 60,
+ 0, 0, 0, 0, 14, 127, 2, 0, 0, 0, 14, 127,
+ 0, 0, 0, 1, 1, 52, 2, 0, 0, 1, 1, 60,
+ 0, 0, 0, 1, 2, 60, 2, 0, 0, 1, 2, 60,
+ 0, 0, 0, 1, 3, 64, 2, 0, 0, 1, 3, 60,
+ 0, 0, 0, 1, 4, 68, 2, 0, 0, 1, 4, 60,
+ 0, 0, 0, 1, 5, 76, 2, 0, 0, 1, 5, 60,
+ 0, 0, 0, 1, 6, 76, 2, 0, 0, 1, 6, 60,
+ 0, 0, 0, 1, 7, 76, 2, 0, 0, 1, 7, 60,
+ 0, 0, 0, 1, 8, 68, 2, 0, 0, 1, 8, 60,
+ 0, 0, 0, 1, 9, 64, 2, 0, 0, 1, 9, 60,
+ 0, 0, 0, 1, 10, 60, 2, 0, 0, 1, 10, 60,
+ 0, 0, 0, 1, 11, 52, 2, 0, 0, 1, 11, 60,
+ 0, 0, 0, 1, 12, 40, 2, 0, 0, 1, 12, 60,
+ 0, 0, 0, 1, 13, 28, 2, 0, 0, 1, 13, 60,
+ 0, 0, 0, 1, 14, 127, 2, 0, 0, 1, 14, 127,
+ 0, 0, 0, 2, 1, 52, 2, 0, 0, 2, 1, 60,
+ 0, 0, 0, 2, 2, 60, 2, 0, 0, 2, 2, 60,
+ 0, 0, 0, 2, 3, 64, 2, 0, 0, 2, 3, 60,
+ 0, 0, 0, 2, 4, 68, 2, 0, 0, 2, 4, 60,
+ 0, 0, 0, 2, 5, 76, 2, 0, 0, 2, 5, 60,
+ 0, 0, 0, 2, 6, 76, 2, 0, 0, 2, 6, 60,
+ 0, 0, 0, 2, 7, 76, 2, 0, 0, 2, 7, 60,
+ 0, 0, 0, 2, 8, 68, 2, 0, 0, 2, 8, 60,
+ 0, 0, 0, 2, 9, 64, 2, 0, 0, 2, 9, 60,
+ 0, 0, 0, 2, 10, 60, 2, 0, 0, 2, 10, 60,
+ 0, 0, 0, 2, 11, 52, 2, 0, 0, 2, 11, 60,
+ 0, 0, 0, 2, 12, 40, 2, 0, 0, 2, 12, 60,
+ 0, 0, 0, 2, 13, 28, 2, 0, 0, 2, 13, 60,
+ 0, 0, 0, 2, 14, 127, 2, 0, 0, 2, 14, 127,
+ 0, 0, 0, 3, 1, 52, 2, 0, 0, 3, 1, 36,
+ 0, 0, 0, 3, 2, 60, 2, 0, 0, 3, 2, 36,
+ 0, 0, 0, 3, 3, 64, 2, 0, 0, 3, 3, 36,
+ 0, 0, 0, 3, 4, 68, 2, 0, 0, 3, 4, 36,
+ 0, 0, 0, 3, 5, 76, 2, 0, 0, 3, 5, 36,
+ 0, 0, 0, 3, 6, 76, 2, 0, 0, 3, 6, 36,
+ 0, 0, 0, 3, 7, 76, 2, 0, 0, 3, 7, 36,
+ 0, 0, 0, 3, 8, 68, 2, 0, 0, 3, 8, 36,
+ 0, 0, 0, 3, 9, 64, 2, 0, 0, 3, 9, 36,
+ 0, 0, 0, 3, 10, 60, 2, 0, 0, 3, 10, 36,
+ 0, 0, 0, 3, 11, 52, 2, 0, 0, 3, 11, 36,
+ 0, 0, 0, 3, 12, 40, 2, 0, 0, 3, 12, 36,
+ 0, 0, 0, 3, 13, 28, 2, 0, 0, 3, 13, 36,
+ 0, 0, 0, 3, 14, 127, 2, 0, 0, 3, 14, 127,
+ 0, 0, 1, 2, 1, 127, 2, 0, 1, 2, 1, 127,
+ 0, 0, 1, 2, 2, 127, 2, 0, 1, 2, 2, 127,
+ 0, 0, 1, 2, 3, 52, 2, 0, 1, 2, 3, 60,
+ 0, 0, 1, 2, 4, 52, 2, 0, 1, 2, 4, 60,
+ 0, 0, 1, 2, 5, 60, 2, 0, 1, 2, 5, 60,
+ 0, 0, 1, 2, 6, 64, 2, 0, 1, 2, 6, 60,
+ 0, 0, 1, 2, 7, 60, 2, 0, 1, 2, 7, 60,
+ 0, 0, 1, 2, 8, 52, 2, 0, 1, 2, 8, 60,
+ 0, 0, 1, 2, 9, 52, 2, 0, 1, 2, 9, 60,
+ 0, 0, 1, 2, 10, 40, 2, 0, 1, 2, 10, 60,
+ 0, 0, 1, 2, 11, 28, 2, 0, 1, 2, 11, 60,
+ 0, 0, 1, 2, 12, 127, 2, 0, 1, 2, 12, 127,
+ 0, 0, 1, 2, 13, 127, 2, 0, 1, 2, 13, 127,
+ 0, 0, 1, 2, 14, 127, 2, 0, 1, 2, 14, 127,
+ 0, 0, 1, 3, 1, 127, 2, 0, 1, 3, 1, 127,
+ 0, 0, 1, 3, 2, 127, 2, 0, 1, 3, 2, 127,
+ 0, 0, 1, 3, 3, 48, 2, 0, 1, 3, 3, 36,
+ 0, 0, 1, 3, 4, 48, 2, 0, 1, 3, 4, 36,
+ 0, 0, 1, 3, 5, 60, 2, 0, 1, 3, 5, 36,
+ 0, 0, 1, 3, 6, 64, 2, 0, 1, 3, 6, 36,
+ 0, 0, 1, 3, 7, 60, 2, 0, 1, 3, 7, 36,
+ 0, 0, 1, 3, 8, 52, 2, 0, 1, 3, 8, 36,
+ 0, 0, 1, 3, 9, 52, 2, 0, 1, 3, 9, 36,
+ 0, 0, 1, 3, 10, 40, 2, 0, 1, 3, 10, 36,
+ 0, 0, 1, 3, 11, 26, 2, 0, 1, 3, 11, 36,
+ 0, 0, 1, 3, 12, 127, 2, 0, 1, 3, 12, 127,
+ 0, 0, 1, 3, 13, 127, 2, 0, 1, 3, 13, 127,
+ 0, 0, 1, 3, 14, 127, 2, 0, 1, 3, 14, 127,
+ 0, 1, 0, 1, 36, 74, 2, 1, 0, 1, 36, 62,
+ 0, 1, 0, 1, 40, 80, 2, 1, 0, 1, 40, 62,
+ 0, 1, 0, 1, 44, 80, 2, 1, 0, 1, 44, 62,
+ 0, 1, 0, 1, 48, 80, 2, 1, 0, 1, 48, 62,
+ 0, 1, 0, 1, 52, 80, 2, 1, 0, 1, 52, 62,
+ 0, 1, 0, 1, 56, 80, 2, 1, 0, 1, 56, 62,
+ 0, 1, 0, 1, 60, 80, 2, 1, 0, 1, 60, 62,
+ 0, 1, 0, 1, 64, 74, 2, 1, 0, 1, 64, 62,
+ 0, 1, 0, 1, 100, 72, 2, 1, 0, 1, 100, 62,
+ 0, 1, 0, 1, 104, 80, 2, 1, 0, 1, 104, 62,
+ 0, 1, 0, 1, 108, 80, 2, 1, 0, 1, 108, 62,
+ 0, 1, 0, 1, 112, 80, 2, 1, 0, 1, 112, 62,
+ 0, 1, 0, 1, 116, 80, 2, 1, 0, 1, 116, 62,
+ 0, 1, 0, 1, 120, 80, 2, 1, 0, 1, 120, 62,
+ 0, 1, 0, 1, 124, 80, 2, 1, 0, 1, 124, 62,
+ 0, 1, 0, 1, 128, 80, 2, 1, 0, 1, 128, 62,
+ 0, 1, 0, 1, 132, 80, 2, 1, 0, 1, 132, 62,
+ 0, 1, 0, 1, 136, 80, 2, 1, 0, 1, 136, 62,
+ 0, 1, 0, 1, 140, 72, 2, 1, 0, 1, 140, 62,
+ 0, 1, 0, 1, 144, 80, 2, 1, 0, 1, 144, 127,
+ 0, 1, 0, 1, 149, 80, 2, 1, 0, 1, 149, 127,
+ 0, 1, 0, 1, 153, 80, 2, 1, 0, 1, 153, 127,
+ 0, 1, 0, 1, 157, 80, 2, 1, 0, 1, 157, 127,
+ 0, 1, 0, 1, 161, 80, 2, 1, 0, 1, 161, 127,
+ 0, 1, 0, 1, 165, 80, 2, 1, 0, 1, 165, 127,
+ 0, 1, 0, 2, 36, 72, 2, 1, 0, 2, 36, 62,
+ 0, 1, 0, 2, 40, 80, 2, 1, 0, 2, 40, 62,
+ 0, 1, 0, 2, 44, 80, 2, 1, 0, 2, 44, 62,
+ 0, 1, 0, 2, 48, 80, 2, 1, 0, 2, 48, 62,
+ 0, 1, 0, 2, 52, 80, 2, 1, 0, 2, 52, 62,
+ 0, 1, 0, 2, 56, 80, 2, 1, 0, 2, 56, 62,
+ 0, 1, 0, 2, 60, 80, 2, 1, 0, 2, 60, 62,
+ 0, 1, 0, 2, 64, 74, 2, 1, 0, 2, 64, 62,
+ 0, 1, 0, 2, 100, 70, 2, 1, 0, 2, 100, 62,
+ 0, 1, 0, 2, 104, 80, 2, 1, 0, 2, 104, 62,
+ 0, 1, 0, 2, 108, 80, 2, 1, 0, 2, 108, 62,
+ 0, 1, 0, 2, 112, 80, 2, 1, 0, 2, 112, 62,
+ 0, 1, 0, 2, 116, 80, 2, 1, 0, 2, 116, 62,
+ 0, 1, 0, 2, 120, 80, 2, 1, 0, 2, 120, 62,
+ 0, 1, 0, 2, 124, 80, 2, 1, 0, 2, 124, 62,
+ 0, 1, 0, 2, 128, 80, 2, 1, 0, 2, 128, 62,
+ 0, 1, 0, 2, 132, 80, 2, 1, 0, 2, 132, 62,
+ 0, 1, 0, 2, 136, 80, 2, 1, 0, 2, 136, 62,
+ 0, 1, 0, 2, 140, 70, 2, 1, 0, 2, 140, 62,
+ 0, 1, 0, 2, 144, 80, 2, 1, 0, 2, 144, 127,
+ 0, 1, 0, 2, 149, 80, 2, 1, 0, 2, 149, 127,
+ 0, 1, 0, 2, 153, 80, 2, 1, 0, 2, 153, 127,
+ 0, 1, 0, 2, 157, 80, 2, 1, 0, 2, 157, 127,
+ 0, 1, 0, 2, 161, 80, 2, 1, 0, 2, 161, 127,
+ 0, 1, 0, 2, 165, 80, 2, 1, 0, 2, 165, 127,
+ 0, 1, 0, 3, 36, 68, 2, 1, 0, 3, 36, 38,
+ 0, 1, 0, 3, 40, 68, 2, 1, 0, 3, 40, 38,
+ 0, 1, 0, 3, 44, 68, 2, 1, 0, 3, 44, 38,
+ 0, 1, 0, 3, 48, 68, 2, 1, 0, 3, 48, 38,
+ 0, 1, 0, 3, 52, 68, 2, 1, 0, 3, 52, 38,
+ 0, 1, 0, 3, 56, 68, 2, 1, 0, 3, 56, 38,
+ 0, 1, 0, 3, 60, 66, 2, 1, 0, 3, 60, 38,
+ 0, 1, 0, 3, 64, 68, 2, 1, 0, 3, 64, 38,
+ 0, 1, 0, 3, 100, 60, 2, 1, 0, 3, 100, 38,
+ 0, 1, 0, 3, 104, 68, 2, 1, 0, 3, 104, 38,
+ 0, 1, 0, 3, 108, 68, 2, 1, 0, 3, 108, 38,
+ 0, 1, 0, 3, 112, 68, 2, 1, 0, 3, 112, 38,
+ 0, 1, 0, 3, 116, 68, 2, 1, 0, 3, 116, 38,
+ 0, 1, 0, 3, 120, 68, 2, 1, 0, 3, 120, 38,
+ 0, 1, 0, 3, 124, 68, 2, 1, 0, 3, 124, 38,
+ 0, 1, 0, 3, 128, 68, 2, 1, 0, 3, 128, 38,
+ 0, 1, 0, 3, 132, 68, 2, 1, 0, 3, 132, 38,
+ 0, 1, 0, 3, 136, 68, 2, 1, 0, 3, 136, 38,
+ 0, 1, 0, 3, 140, 60, 2, 1, 0, 3, 140, 38,
+ 0, 1, 0, 3, 144, 68, 2, 1, 0, 3, 144, 127,
+ 0, 1, 0, 3, 149, 80, 2, 1, 0, 3, 149, 127,
+ 0, 1, 0, 3, 153, 80, 2, 1, 0, 3, 153, 127,
+ 0, 1, 0, 3, 157, 80, 2, 1, 0, 3, 157, 127,
+ 0, 1, 0, 3, 161, 80, 2, 1, 0, 3, 161, 127,
+ 0, 1, 0, 3, 165, 80, 2, 1, 0, 3, 165, 127,
+ 0, 1, 1, 2, 38, 66, 2, 1, 1, 2, 38, 64,
+ 0, 1, 1, 2, 46, 72, 2, 1, 1, 2, 46, 64,
+ 0, 1, 1, 2, 54, 72, 2, 1, 1, 2, 54, 64,
+ 0, 1, 1, 2, 62, 64, 2, 1, 1, 2, 62, 64,
+ 0, 1, 1, 2, 102, 58, 2, 1, 1, 2, 102, 64,
+ 0, 1, 1, 2, 110, 74, 2, 1, 1, 2, 110, 64,
+ 0, 1, 1, 2, 118, 74, 2, 1, 1, 2, 118, 64,
+ 0, 1, 1, 2, 126, 74, 2, 1, 1, 2, 126, 64,
+ 0, 1, 1, 2, 134, 74, 2, 1, 1, 2, 134, 64,
+ 0, 1, 1, 2, 142, 74, 2, 1, 1, 2, 142, 127,
+ 0, 1, 1, 2, 151, 74, 2, 1, 1, 2, 151, 127,
+ 0, 1, 1, 2, 159, 74, 2, 1, 1, 2, 159, 127,
+ 0, 1, 1, 3, 38, 60, 2, 1, 1, 3, 38, 40,
+ 0, 1, 1, 3, 46, 68, 2, 1, 1, 3, 46, 40,
+ 0, 1, 1, 3, 54, 68, 2, 1, 1, 3, 54, 40,
+ 0, 1, 1, 3, 62, 58, 2, 1, 1, 3, 62, 40,
+ 0, 1, 1, 3, 102, 54, 2, 1, 1, 3, 102, 40,
+ 0, 1, 1, 3, 110, 68, 2, 1, 1, 3, 110, 40,
+ 0, 1, 1, 3, 118, 68, 2, 1, 1, 3, 118, 40,
+ 0, 1, 1, 3, 126, 68, 2, 1, 1, 3, 126, 40,
+ 0, 1, 1, 3, 134, 68, 2, 1, 1, 3, 134, 40,
+ 0, 1, 1, 3, 142, 68, 2, 1, 1, 3, 142, 127,
+ 0, 1, 1, 3, 151, 74, 2, 1, 1, 3, 151, 127,
+ 0, 1, 1, 3, 159, 74, 2, 1, 1, 3, 159, 127,
+ 0, 1, 2, 4, 42, 64, 2, 1, 2, 4, 42, 64,
+ 0, 1, 2, 4, 58, 62, 2, 1, 2, 4, 58, 64,
+ 0, 1, 2, 4, 106, 58, 2, 1, 2, 4, 106, 64,
+ 0, 1, 2, 4, 122, 72, 2, 1, 2, 4, 122, 64,
+ 0, 1, 2, 4, 138, 72, 2, 1, 2, 4, 138, 127,
+ 0, 1, 2, 4, 155, 72, 2, 1, 2, 4, 155, 127,
+ 0, 1, 2, 5, 42, 54, 2, 1, 2, 5, 42, 40,
+ 0, 1, 2, 5, 58, 52, 2, 1, 2, 5, 58, 40,
+ 0, 1, 2, 5, 106, 50, 2, 1, 2, 5, 106, 40,
+ 0, 1, 2, 5, 122, 66, 2, 1, 2, 5, 122, 40,
+ 0, 1, 2, 5, 138, 66, 2, 1, 2, 5, 138, 127,
+ 0, 1, 2, 5, 155, 62, 2, 1, 2, 5, 155, 127
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8822c_txpwr_lmt_type0);
+
+static const u32 rtw8822c_array_mp_cal_init[] = {
+ 0x1b00, 0x00000008,
+ 0x1b00, 0x00A70008,
+ 0x1b00, 0x00150008,
+ 0x1b00, 0x00000008,
+ 0x1b04, 0xE2462952,
+ 0x1b08, 0x00000080,
+ 0x1b0c, 0x00000000,
+ 0x1b10, 0x00010C00,
+ 0x1b14, 0x00000000,
+ 0x1b18, 0x00292903,
+ 0x1b1c, 0xA218FC32,
+ 0x1b20, 0x01040008,
+ 0x1b24, 0x00060008,
+ 0x1b28, 0x00060300,
+ 0x1b2C, 0x00180018,
+ 0x1b30, 0x40000000,
+ 0x1b34, 0x00000800,
+ 0x1b38, 0x40000000,
+ 0x1b3C, 0x40000000,
+ 0x1b98, 0x00000000,
+ 0x1b9c, 0x00000000,
+ 0x1bc0, 0x01000000,
+ 0x1bcc, 0x00000000,
+ 0x1be4, 0x00000000,
+ 0x1bec, 0x40000000,
+ 0x1b40, 0x40000000,
+ 0x1b44, 0x20004064,
+ 0x1b48, 0x0005002D,
+ 0x1b4c, 0x00000000,
+ 0x1b60, 0x1F100000,
+ 0x1b64, 0x12000000,
+ 0x1b4c, 0x00000000,
+ 0x1b4c, 0x008a0000,
+ 0x1b50, 0x000003BE,
+ 0x1b4c, 0x018a0000,
+ 0x1b50, 0x0000057A,
+ 0x1b4c, 0x028a0000,
+ 0x1b50, 0x000006C8,
+ 0x1b4c, 0x038a0000,
+ 0x1b50, 0x000007E0,
+ 0x1b4c, 0x048a0000,
+ 0x1b50, 0x000008D5,
+ 0x1b4c, 0x058a0000,
+ 0x1b50, 0x000009B2,
+ 0x1b4c, 0x068a0000,
+ 0x1b50, 0x00000A7D,
+ 0x1b4c, 0x078a0000,
+ 0x1b50, 0x00000B3A,
+ 0x1b4c, 0x088a0000,
+ 0x1b50, 0x00000BEB,
+ 0x1b4c, 0x098a0000,
+ 0x1b50, 0x00000C92,
+ 0x1b4c, 0x0A8a0000,
+ 0x1b50, 0x00000D31,
+ 0x1b4c, 0x0B8a0000,
+ 0x1b50, 0x00000DC9,
+ 0x1b4c, 0x0C8a0000,
+ 0x1b50, 0x00000E5A,
+ 0x1b4c, 0x0D8a0000,
+ 0x1b50, 0x00000EE6,
+ 0x1b4c, 0x0E8a0000,
+ 0x1b50, 0x00000F6D,
+ 0x1b4c, 0x0F8a0000,
+ 0x1b50, 0x00000FF0,
+ 0x1b4c, 0x108a0000,
+ 0x1b50, 0x0000106F,
+ 0x1b4c, 0x118a0000,
+ 0x1b50, 0x000010E9,
+ 0x1b4c, 0x128a0000,
+ 0x1b50, 0x00001161,
+ 0x1b4c, 0x138a0000,
+ 0x1b50, 0x000011D5,
+ 0x1b4c, 0x148a0000,
+ 0x1b50, 0x00001247,
+ 0x1b4c, 0x158a0000,
+ 0x1b50, 0x000012B5,
+ 0x1b4c, 0x168a0000,
+ 0x1b50, 0x00001322,
+ 0x1b4c, 0x178a0000,
+ 0x1b50, 0x0000138B,
+ 0x1b4c, 0x188a0000,
+ 0x1b50, 0x000013F3,
+ 0x1b4c, 0x198a0000,
+ 0x1b50, 0x00001459,
+ 0x1b4c, 0x1A8a0000,
+ 0x1b50, 0x000014BD,
+ 0x1b4c, 0x1B8a0000,
+ 0x1b50, 0x0000151E,
+ 0x1b4c, 0x1C8a0000,
+ 0x1b50, 0x0000157F,
+ 0x1b4c, 0x1D8a0000,
+ 0x1b50, 0x000015DD,
+ 0x1b4c, 0x1E8a0000,
+ 0x1b50, 0x0000163A,
+ 0x1b4c, 0x1F8a0000,
+ 0x1b50, 0x00001695,
+ 0x1b4c, 0x208a0000,
+ 0x1b50, 0x000016EF,
+ 0x1b4c, 0x218a0000,
+ 0x1b50, 0x00001748,
+ 0x1b4c, 0x228a0000,
+ 0x1b50, 0x0000179F,
+ 0x1b4c, 0x238a0000,
+ 0x1b50, 0x000017F5,
+ 0x1b4c, 0x248a0000,
+ 0x1b50, 0x0000184A,
+ 0x1b4c, 0x258a0000,
+ 0x1b50, 0x0000189E,
+ 0x1b4c, 0x268a0000,
+ 0x1b50, 0x000018F1,
+ 0x1b4c, 0x278a0000,
+ 0x1b50, 0x00001942,
+ 0x1b4c, 0x288a0000,
+ 0x1b50, 0x00001993,
+ 0x1b4c, 0x298a0000,
+ 0x1b50, 0x000019E2,
+ 0x1b4c, 0x2A8a0000,
+ 0x1b50, 0x00001A31,
+ 0x1b4c, 0x2B8a0000,
+ 0x1b50, 0x00001A7F,
+ 0x1b4c, 0x2C8a0000,
+ 0x1b50, 0x00001ACC,
+ 0x1b4c, 0x2D8a0000,
+ 0x1b50, 0x00001B18,
+ 0x1b4c, 0x2E8a0000,
+ 0x1b50, 0x00001B63,
+ 0x1b4c, 0x2F8a0000,
+ 0x1b50, 0x00001BAD,
+ 0x1b4c, 0x308a0000,
+ 0x1b50, 0x00001BF7,
+ 0x1b4c, 0x318a0000,
+ 0x1b50, 0x00001C40,
+ 0x1b4c, 0x328a0000,
+ 0x1b50, 0x00001C88,
+ 0x1b4c, 0x338a0000,
+ 0x1b50, 0x00001CCF,
+ 0x1b4c, 0x348a0000,
+ 0x1b50, 0x00001D16,
+ 0x1b4c, 0x358a0000,
+ 0x1b50, 0x00001D5C,
+ 0x1b4c, 0x368a0000,
+ 0x1b50, 0x00001DA2,
+ 0x1b4c, 0x378a0000,
+ 0x1b50, 0x00001DE6,
+ 0x1b4c, 0x388a0000,
+ 0x1b50, 0x00001E2B,
+ 0x1b4c, 0x398a0000,
+ 0x1b50, 0x00001E6E,
+ 0x1b4c, 0x3A8a0000,
+ 0x1b50, 0x00001EB1,
+ 0x1b4c, 0x3B8a0000,
+ 0x1b50, 0x00001EF4,
+ 0x1b4c, 0x3C8a0000,
+ 0x1b50, 0x00001F35,
+ 0x1b4c, 0x3D8a0000,
+ 0x1b50, 0x00001F77,
+ 0x1b4c, 0x3E8a0000,
+ 0x1b50, 0x00001FB8,
+ 0x1b4c, 0x3F8a0000,
+ 0x1b50, 0x00001FF8,
+ 0x1b4c, 0x00000000,
+ 0x1b50, 0x00000000,
+ 0x1b58, 0x00890000,
+ 0x1b5C, 0x3C6B3FFF,
+ 0x1b58, 0x02890000,
+ 0x1b5C, 0x35D9390A,
+ 0x1b58, 0x04890000,
+ 0x1b5C, 0x2FFE32D6,
+ 0x1b58, 0x06890000,
+ 0x1b5C, 0x2AC62D4F,
+ 0x1b58, 0x08890000,
+ 0x1b5C, 0x261F2862,
+ 0x1b58, 0x0A890000,
+ 0x1b5C, 0x21FA23FD,
+ 0x1b58, 0x0C890000,
+ 0x1b5C, 0x1E482013,
+ 0x1b58, 0x0E890000,
+ 0x1b5C, 0x1AFD1C96,
+ 0x1b58, 0x10890000,
+ 0x1b5C, 0x180E197B,
+ 0x1b58, 0x12890000,
+ 0x1b5C, 0x157016B5,
+ 0x1b58, 0x14890000,
+ 0x1b5C, 0x131B143D,
+ 0x1b58, 0x16890000,
+ 0x1b5C, 0x1107120A,
+ 0x1b58, 0x18890000,
+ 0x1b5C, 0x0F2D1013,
+ 0x1b58, 0x1A890000,
+ 0x1b5C, 0x0D870E54,
+ 0x1b58, 0x1C890000,
+ 0x1b5C, 0x0C0E0CC5,
+ 0x1b58, 0x1E890000,
+ 0x1b5C, 0x0ABF0B62,
+ 0x1b58, 0x20890000,
+ 0x1b5C, 0x09930A25,
+ 0x1b58, 0x22890000,
+ 0x1b5C, 0x0889090A,
+ 0x1b58, 0x24890000,
+ 0x1b5C, 0x079B080F,
+ 0x1b58, 0x26890000,
+ 0x1b5C, 0x06C7072E,
+ 0x1b58, 0x28890000,
+ 0x1b5C, 0x060B0666,
+ 0x1b58, 0x2A890000,
+ 0x1b5C, 0x056305B4,
+ 0x1b58, 0x2C890000,
+ 0x1b5C, 0x04CD0515,
+ 0x1b58, 0x2E890000,
+ 0x1b5C, 0x04470488,
+ 0x1b58, 0x30890000,
+ 0x1b5C, 0x03D0040A,
+ 0x1b58, 0x32890000,
+ 0x1b5C, 0x03660399,
+ 0x1b58, 0x34890000,
+ 0x1b5C, 0x03070335,
+ 0x1b58, 0x36890000,
+ 0x1b5C, 0x02B302DC,
+ 0x1b58, 0x38890000,
+ 0x1b5C, 0x0268028C,
+ 0x1b58, 0x3A890000,
+ 0x1b5C, 0x02250245,
+ 0x1b58, 0x3C890000,
+ 0x1b5C, 0x01E90206,
+ 0x1b58, 0x3E890000,
+ 0x1b5C, 0x01B401CE,
+ 0x1b58, 0x40890000,
+ 0x1b5C, 0x0185019C,
+ 0x1b58, 0x42890000,
+ 0x1b5C, 0x015A016F,
+ 0x1b58, 0x44890000,
+ 0x1b5C, 0x01350147,
+ 0x1b58, 0x46890000,
+ 0x1b5C, 0x01130123,
+ 0x1b58, 0x48890000,
+ 0x1b5C, 0x00F50104,
+ 0x1b58, 0x4A890000,
+ 0x1b5C, 0x00DA00E7,
+ 0x1b58, 0x4C890000,
+ 0x1b5C, 0x00C300CE,
+ 0x1b58, 0x4E890000,
+ 0x1b5C, 0x00AE00B8,
+ 0x1b58, 0x50890000,
+ 0x1b5C, 0x009B00A4,
+ 0x1b58, 0x52890000,
+ 0x1b5C, 0x008A0092,
+ 0x1b58, 0x54890000,
+ 0x1b5C, 0x007B0082,
+ 0x1b58, 0x56890000,
+ 0x1b5C, 0x006E0074,
+ 0x1b58, 0x58890000,
+ 0x1b5C, 0x00620067,
+ 0x1b58, 0x5A890000,
+ 0x1b5C, 0x0057005C,
+ 0x1b58, 0x5C890000,
+ 0x1b5C, 0x004E0052,
+ 0x1b58, 0x5E890000,
+ 0x1b5C, 0x00450049,
+ 0x1b58, 0x60890000,
+ 0x1b5C, 0x003E0041,
+ 0x1b58, 0x62890000,
+ 0x1b5C, 0x0037003A,
+ 0x1b58, 0x62010000,
+ 0x1b00, 0x0000000A,
+ 0x1b00, 0x00A7000A,
+ 0x1b00, 0x0015000A,
+ 0x1b00, 0x0000000A,
+ 0x1b04, 0xE2462952,
+ 0x1b08, 0x00000080,
+ 0x1b0c, 0x00000000,
+ 0x1b10, 0x00010C00,
+ 0x1b14, 0x00000000,
+ 0x1b18, 0x00292903,
+ 0x1b1c, 0xA218FC32,
+ 0x1b20, 0x01040008,
+ 0x1b24, 0x00060008,
+ 0x1b28, 0x00060300,
+ 0x1b2C, 0x00180018,
+ 0x1b30, 0x40000000,
+ 0x1b34, 0x00000800,
+ 0x1b38, 0x40000000,
+ 0x1b3C, 0x40000000,
+ 0x1b98, 0x00000000,
+ 0x1b9c, 0x00000000,
+ 0x1bc0, 0x01000000,
+ 0x1bcc, 0x00000000,
+ 0x1be4, 0x00000000,
+ 0x1bec, 0x40000000,
+ 0x1b60, 0x1F100000,
+ 0x1b64, 0x12000000,
+ 0x1b58, 0x00890000,
+ 0x1b5C, 0x3C6B3FFF,
+ 0x1b58, 0x02890000,
+ 0x1b5C, 0x35D9390A,
+ 0x1b58, 0x04890000,
+ 0x1b5C, 0x2FFE32D6,
+ 0x1b58, 0x06890000,
+ 0x1b5C, 0x2AC62D4F,
+ 0x1b58, 0x08890000,
+ 0x1b5C, 0x261F2862,
+ 0x1b58, 0x0A890000,
+ 0x1b5C, 0x21FA23FD,
+ 0x1b58, 0x0C890000,
+ 0x1b5C, 0x1E482013,
+ 0x1b58, 0x0E890000,
+ 0x1b5C, 0x1AFD1C96,
+ 0x1b58, 0x10890000,
+ 0x1b5C, 0x180E197B,
+ 0x1b58, 0x12890000,
+ 0x1b5C, 0x157016B5,
+ 0x1b58, 0x14890000,
+ 0x1b5C, 0x131B143D,
+ 0x1b58, 0x16890000,
+ 0x1b5C, 0x1107120A,
+ 0x1b58, 0x18890000,
+ 0x1b5C, 0x0F2D1013,
+ 0x1b58, 0x1A890000,
+ 0x1b5C, 0x0D870E54,
+ 0x1b58, 0x1C890000,
+ 0x1b5C, 0x0C0E0CC5,
+ 0x1b58, 0x1E890000,
+ 0x1b5C, 0x0ABF0B62,
+ 0x1b58, 0x20890000,
+ 0x1b5C, 0x09930A25,
+ 0x1b58, 0x22890000,
+ 0x1b5C, 0x0889090A,
+ 0x1b58, 0x24890000,
+ 0x1b5C, 0x079B080F,
+ 0x1b58, 0x26890000,
+ 0x1b5C, 0x06C7072E,
+ 0x1b58, 0x28890000,
+ 0x1b5C, 0x060B0666,
+ 0x1b58, 0x2A890000,
+ 0x1b5C, 0x056305B4,
+ 0x1b58, 0x2C890000,
+ 0x1b5C, 0x04CD0515,
+ 0x1b58, 0x2E890000,
+ 0x1b5C, 0x04470488,
+ 0x1b58, 0x30890000,
+ 0x1b5C, 0x03D0040A,
+ 0x1b58, 0x32890000,
+ 0x1b5C, 0x03660399,
+ 0x1b58, 0x34890000,
+ 0x1b5C, 0x03070335,
+ 0x1b58, 0x36890000,
+ 0x1b5C, 0x02B302DC,
+ 0x1b58, 0x38890000,
+ 0x1b5C, 0x0268028C,
+ 0x1b58, 0x3A890000,
+ 0x1b5C, 0x02250245,
+ 0x1b58, 0x3C890000,
+ 0x1b5C, 0x01E90206,
+ 0x1b58, 0x3E890000,
+ 0x1b5C, 0x01B401CE,
+ 0x1b58, 0x40890000,
+ 0x1b5C, 0x0185019C,
+ 0x1b58, 0x42890000,
+ 0x1b5C, 0x015A016F,
+ 0x1b58, 0x44890000,
+ 0x1b5C, 0x01350147,
+ 0x1b58, 0x46890000,
+ 0x1b5C, 0x01130123,
+ 0x1b58, 0x48890000,
+ 0x1b5C, 0x00F50104,
+ 0x1b58, 0x4A890000,
+ 0x1b5C, 0x00DA00E7,
+ 0x1b58, 0x4C890000,
+ 0x1b5C, 0x00C300CE,
+ 0x1b58, 0x4E890000,
+ 0x1b5C, 0x00AE00B8,
+ 0x1b58, 0x50890000,
+ 0x1b5C, 0x009B00A4,
+ 0x1b58, 0x52890000,
+ 0x1b5C, 0x008A0092,
+ 0x1b58, 0x54890000,
+ 0x1b5C, 0x007B0082,
+ 0x1b58, 0x56890000,
+ 0x1b5C, 0x006E0074,
+ 0x1b58, 0x58890000,
+ 0x1b5C, 0x00620067,
+ 0x1b58, 0x5A890000,
+ 0x1b5C, 0x0057005C,
+ 0x1b58, 0x5C890000,
+ 0x1b5C, 0x004E0052,
+ 0x1b58, 0x5E890000,
+ 0x1b5C, 0x00450049,
+ 0x1b58, 0x60890000,
+ 0x1b5C, 0x003E0041,
+ 0x1b58, 0x62890000,
+ 0x1b5C, 0x0037003A,
+ 0x1b58, 0x62010000,
+ 0x1b00, 0x0000000C,
+ 0x1bd4, 0x000000F0,
+ 0x1bb8, 0x20202020,
+ 0x1bbc, 0x20202020,
+ 0x1bc0, 0x20202020,
+ 0x1bc4, 0x20202020,
+ 0x1bc8, 0x04040404,
+ 0x1bcc, 0x04040404,
+ 0x1bd0, 0x04040404,
+ 0x1bd8, 0x04040404,
+ 0x1bdc, 0x20202020,
+ 0x1be0, 0x04040404,
+ 0x1be4, 0x77472F17,
+ 0x1be8, 0xEFBFA78F,
+ 0x1bec, 0x00000000,
+ 0x1bf0, 0x1F1F1939,
+ 0x1b04, 0x0000005B,
+ 0x1b08, 0xB000C000,
+ 0x1b5c, 0x0000005B,
+ 0x1b60, 0xB000C000,
+ 0x1bb4, 0x20000000,
+ 0x1b00, 0x00000008,
+ 0x1b80, 0x00000007,
+ 0x1b80, 0x00080005,
+ 0x1b80, 0x00080007,
+ 0x1b80, 0x80000015,
+ 0x1b80, 0x80000017,
+ 0x1b80, 0x09080025,
+ 0x1b80, 0x09080027,
+ 0x1b80, 0x0f020035,
+ 0x1b80, 0x0f020037,
+ 0x1b80, 0x00220045,
+ 0x1b80, 0x00220047,
+ 0x1b80, 0x00040055,
+ 0x1b80, 0x00040057,
+ 0x1b80, 0x05c00065,
+ 0x1b80, 0x05c00067,
+ 0x1b80, 0x00070075,
+ 0x1b80, 0x00070077,
+ 0x1b80, 0x64020085,
+ 0x1b80, 0x64020087,
+ 0x1b80, 0x00020095,
+ 0x1b80, 0x00020097,
+ 0x1b80, 0x000400a5,
+ 0x1b80, 0x000400a7,
+ 0x1b80, 0x4a0000b5,
+ 0x1b80, 0x4a0000b7,
+ 0x1b80, 0x4b0400c5,
+ 0x1b80, 0x4b0400c7,
+ 0x1b80, 0x860300d5,
+ 0x1b80, 0x860300d7,
+ 0x1b80, 0x400900e5,
+ 0x1b80, 0x400900e7,
+ 0x1b80, 0xe02700f5,
+ 0x1b80, 0xe02700f7,
+ 0x1b80, 0x4b050105,
+ 0x1b80, 0x4b050107,
+ 0x1b80, 0x87030115,
+ 0x1b80, 0x87030117,
+ 0x1b80, 0x400b0125,
+ 0x1b80, 0x400b0127,
+ 0x1b80, 0xe0270135,
+ 0x1b80, 0xe0270137,
+ 0x1b80, 0x4b060145,
+ 0x1b80, 0x4b060147,
+ 0x1b80, 0x88030155,
+ 0x1b80, 0x88030157,
+ 0x1b80, 0x400d0165,
+ 0x1b80, 0x400d0167,
+ 0x1b80, 0xe0270175,
+ 0x1b80, 0xe0270177,
+ 0x1b80, 0x4b000185,
+ 0x1b80, 0x4b000187,
+ 0x1b80, 0x00070195,
+ 0x1b80, 0x00070197,
+ 0x1b80, 0x4c0001a5,
+ 0x1b80, 0x4c0001a7,
+ 0x1b80, 0x000401b5,
+ 0x1b80, 0x000401b7,
+ 0x1b80, 0x400801c5,
+ 0x1b80, 0x400801c7,
+ 0x1b80, 0x505501d5,
+ 0x1b80, 0x505501d7,
+ 0x1b80, 0x090a01e5,
+ 0x1b80, 0x090a01e7,
+ 0x1b80, 0x0ffe01f5,
+ 0x1b80, 0x0ffe01f7,
+ 0x1b80, 0x00220205,
+ 0x1b80, 0x00220207,
+ 0x1b80, 0x00040215,
+ 0x1b80, 0x00040217,
+ 0x1b80, 0x05c00225,
+ 0x1b80, 0x05c00227,
+ 0x1b80, 0x00070235,
+ 0x1b80, 0x00070237,
+ 0x1b80, 0x64000245,
+ 0x1b80, 0x64000247,
+ 0x1b80, 0x00020255,
+ 0x1b80, 0x00020257,
+ 0x1b80, 0x30000265,
+ 0x1b80, 0x30000267,
+ 0x1b80, 0xa5100275,
+ 0x1b80, 0xa5100277,
+ 0x1b80, 0xe3520285,
+ 0x1b80, 0xe3520287,
+ 0x1b80, 0xf01d0295,
+ 0x1b80, 0xf01d0297,
+ 0x1b80, 0xf11d02a5,
+ 0x1b80, 0xf11d02a7,
+ 0x1b80, 0xf21d02b5,
+ 0x1b80, 0xf21d02b7,
+ 0x1b80, 0xf31d02c5,
+ 0x1b80, 0xf31d02c7,
+ 0x1b80, 0xf41d02d5,
+ 0x1b80, 0xf41d02d7,
+ 0x1b80, 0xf51d02e5,
+ 0x1b80, 0xf51d02e7,
+ 0x1b80, 0xf61d02f5,
+ 0x1b80, 0xf61d02f7,
+ 0x1b80, 0xf71d0305,
+ 0x1b80, 0xf71d0307,
+ 0x1b80, 0xf81d0315,
+ 0x1b80, 0xf81d0317,
+ 0x1b80, 0xf91d0325,
+ 0x1b80, 0xf91d0327,
+ 0x1b80, 0xfa1d0335,
+ 0x1b80, 0xfa1d0337,
+ 0x1b80, 0xfb1d0345,
+ 0x1b80, 0xfb1d0347,
+ 0x1b80, 0xfc1d0355,
+ 0x1b80, 0xfc1d0357,
+ 0x1b80, 0xfd1d0365,
+ 0x1b80, 0xfd1d0367,
+ 0x1b80, 0xf21d0375,
+ 0x1b80, 0xf21d0377,
+ 0x1b80, 0xf31d0385,
+ 0x1b80, 0xf31d0387,
+ 0x1b80, 0xf41d0395,
+ 0x1b80, 0xf41d0397,
+ 0x1b80, 0xf51d03a5,
+ 0x1b80, 0xf51d03a7,
+ 0x1b80, 0xf61d03b5,
+ 0x1b80, 0xf61d03b7,
+ 0x1b80, 0xf71d03c5,
+ 0x1b80, 0xf71d03c7,
+ 0x1b80, 0xf81d03d5,
+ 0x1b80, 0xf81d03d7,
+ 0x1b80, 0xf91d03e5,
+ 0x1b80, 0xf91d03e7,
+ 0x1b80, 0xfa1d03f5,
+ 0x1b80, 0xfa1d03f7,
+ 0x1b80, 0xfb1d0405,
+ 0x1b80, 0xfb1d0407,
+ 0x1b80, 0xfc1d0415,
+ 0x1b80, 0xfc1d0417,
+ 0x1b80, 0xfd1d0425,
+ 0x1b80, 0xfd1d0427,
+ 0x1b80, 0xfe1d0435,
+ 0x1b80, 0xfe1d0437,
+ 0x1b80, 0xff1d0445,
+ 0x1b80, 0xff1d0447,
+ 0x1b80, 0x00010455,
+ 0x1b80, 0x00010457,
+ 0x1b80, 0x30620465,
+ 0x1b80, 0x30620467,
+ 0x1b80, 0x307a0475,
+ 0x1b80, 0x307a0477,
+ 0x1b80, 0x307c0485,
+ 0x1b80, 0x307c0487,
+ 0x1b80, 0x30eb0495,
+ 0x1b80, 0x30eb0497,
+ 0x1b80, 0x308004a5,
+ 0x1b80, 0x308004a7,
+ 0x1b80, 0x308c04b5,
+ 0x1b80, 0x308c04b7,
+ 0x1b80, 0x309804c5,
+ 0x1b80, 0x309804c7,
+ 0x1b80, 0x307f04d5,
+ 0x1b80, 0x307f04d7,
+ 0x1b80, 0x308b04e5,
+ 0x1b80, 0x308b04e7,
+ 0x1b80, 0x309704f5,
+ 0x1b80, 0x309704f7,
+ 0x1b80, 0x30ef0505,
+ 0x1b80, 0x30ef0507,
+ 0x1b80, 0x30fa0515,
+ 0x1b80, 0x30fa0517,
+ 0x1b80, 0x31050525,
+ 0x1b80, 0x31050527,
+ 0x1b80, 0x316a0535,
+ 0x1b80, 0x316a0537,
+ 0x1b80, 0x307a0545,
+ 0x1b80, 0x307a0547,
+ 0x1b80, 0x30e90555,
+ 0x1b80, 0x30e90557,
+ 0x1b80, 0x31870565,
+ 0x1b80, 0x31870567,
+ 0x1b80, 0x31a00575,
+ 0x1b80, 0x31a00577,
+ 0x1b80, 0x31ba0585,
+ 0x1b80, 0x31ba0587,
+ 0x1b80, 0x31c20595,
+ 0x1b80, 0x31c20597,
+ 0x1b80, 0x31ca05a5,
+ 0x1b80, 0x31ca05a7,
+ 0x1b80, 0x31d205b5,
+ 0x1b80, 0x31d205b7,
+ 0x1b80, 0x31da05c5,
+ 0x1b80, 0x31da05c7,
+ 0x1b80, 0x31e905d5,
+ 0x1b80, 0x31e905d7,
+ 0x1b80, 0x31f805e5,
+ 0x1b80, 0x31f805e7,
+ 0x1b80, 0x31fe05f5,
+ 0x1b80, 0x31fe05f7,
+ 0x1b80, 0x32040605,
+ 0x1b80, 0x32040607,
+ 0x1b80, 0x320a0615,
+ 0x1b80, 0x320a0617,
+ 0x1b80, 0xe2eb0625,
+ 0x1b80, 0xe2eb0627,
+ 0x1b80, 0x4d040635,
+ 0x1b80, 0x4d040637,
+ 0x1b80, 0x20800645,
+ 0x1b80, 0x20800647,
+ 0x1b80, 0x00000655,
+ 0x1b80, 0x00000657,
+ 0x1b80, 0x4d000665,
+ 0x1b80, 0x4d000667,
+ 0x1b80, 0x55070675,
+ 0x1b80, 0x55070677,
+ 0x1b80, 0xe2e30685,
+ 0x1b80, 0xe2e30687,
+ 0x1b80, 0xe2e30695,
+ 0x1b80, 0xe2e30697,
+ 0x1b80, 0x4d0406a5,
+ 0x1b80, 0x4d0406a7,
+ 0x1b80, 0x208806b5,
+ 0x1b80, 0x208806b7,
+ 0x1b80, 0x020006c5,
+ 0x1b80, 0x020006c7,
+ 0x1b80, 0x4d0006d5,
+ 0x1b80, 0x4d0006d7,
+ 0x1b80, 0x550f06e5,
+ 0x1b80, 0x550f06e7,
+ 0x1b80, 0xe2e306f5,
+ 0x1b80, 0xe2e306f7,
+ 0x1b80, 0x4f020705,
+ 0x1b80, 0x4f020707,
+ 0x1b80, 0x4e000715,
+ 0x1b80, 0x4e000717,
+ 0x1b80, 0x53020725,
+ 0x1b80, 0x53020727,
+ 0x1b80, 0x52010735,
+ 0x1b80, 0x52010737,
+ 0x1b80, 0xe2e70745,
+ 0x1b80, 0xe2e70747,
+ 0x1b80, 0x4d080755,
+ 0x1b80, 0x4d080757,
+ 0x1b80, 0x57100765,
+ 0x1b80, 0x57100767,
+ 0x1b80, 0x57000775,
+ 0x1b80, 0x57000777,
+ 0x1b80, 0x4d000785,
+ 0x1b80, 0x4d000787,
+ 0x1b80, 0x00010795,
+ 0x1b80, 0x00010797,
+ 0x1b80, 0xe2eb07a5,
+ 0x1b80, 0xe2eb07a7,
+ 0x1b80, 0x000107b5,
+ 0x1b80, 0x000107b7,
+ 0x1b80, 0x620607c5,
+ 0x1b80, 0x620607c7,
+ 0x1b80, 0xe2eb07d5,
+ 0x1b80, 0xe2eb07d7,
+ 0x1b80, 0x000107e5,
+ 0x1b80, 0x000107e7,
+ 0x1b80, 0x620607f5,
+ 0x1b80, 0x620607f7,
+ 0x1b80, 0x30ad0805,
+ 0x1b80, 0x30ad0807,
+ 0x1b80, 0x00260815,
+ 0x1b80, 0x00260817,
+ 0x1b80, 0xe3450825,
+ 0x1b80, 0xe3450827,
+ 0x1b80, 0x00020835,
+ 0x1b80, 0x00020837,
+ 0x1b80, 0x54ec0845,
+ 0x1b80, 0x54ec0847,
+ 0x1b80, 0x0ba60855,
+ 0x1b80, 0x0ba60857,
+ 0x1b80, 0x00260865,
+ 0x1b80, 0x00260867,
+ 0x1b80, 0xe3450875,
+ 0x1b80, 0xe3450877,
+ 0x1b80, 0x00020885,
+ 0x1b80, 0x00020887,
+ 0x1b80, 0x63c30895,
+ 0x1b80, 0x63c30897,
+ 0x1b80, 0x30d908a5,
+ 0x1b80, 0x30d908a7,
+ 0x1b80, 0x620608b5,
+ 0x1b80, 0x620608b7,
+ 0x1b80, 0x30a508c5,
+ 0x1b80, 0x30a508c7,
+ 0x1b80, 0x002408d5,
+ 0x1b80, 0x002408d7,
+ 0x1b80, 0xe34508e5,
+ 0x1b80, 0xe34508e7,
+ 0x1b80, 0x000208f5,
+ 0x1b80, 0x000208f7,
+ 0x1b80, 0x54ea0905,
+ 0x1b80, 0x54ea0907,
+ 0x1b80, 0x0ba60915,
+ 0x1b80, 0x0ba60917,
+ 0x1b80, 0x00240925,
+ 0x1b80, 0x00240927,
+ 0x1b80, 0xe3450935,
+ 0x1b80, 0xe3450937,
+ 0x1b80, 0x00020945,
+ 0x1b80, 0x00020947,
+ 0x1b80, 0x63c30955,
+ 0x1b80, 0x63c30957,
+ 0x1b80, 0x30d90965,
+ 0x1b80, 0x30d90967,
+ 0x1b80, 0x62060975,
+ 0x1b80, 0x62060977,
+ 0x1b80, 0x6c100985,
+ 0x1b80, 0x6c100987,
+ 0x1b80, 0x6d0f0995,
+ 0x1b80, 0x6d0f0997,
+ 0x1b80, 0xe2eb09a5,
+ 0x1b80, 0xe2eb09a7,
+ 0x1b80, 0xe34509b5,
+ 0x1b80, 0xe34509b7,
+ 0x1b80, 0x6c2409c5,
+ 0x1b80, 0x6c2409c7,
+ 0x1b80, 0xe2eb09d5,
+ 0x1b80, 0xe2eb09d7,
+ 0x1b80, 0xe34509e5,
+ 0x1b80, 0xe34509e7,
+ 0x1b80, 0x6c4409f5,
+ 0x1b80, 0x6c4409f7,
+ 0x1b80, 0xe2eb0a05,
+ 0x1b80, 0xe2eb0a07,
+ 0x1b80, 0xe3450a15,
+ 0x1b80, 0xe3450a17,
+ 0x1b80, 0x6c640a25,
+ 0x1b80, 0x6c640a27,
+ 0x1b80, 0xe2eb0a35,
+ 0x1b80, 0xe2eb0a37,
+ 0x1b80, 0xe3450a45,
+ 0x1b80, 0xe3450a47,
+ 0x1b80, 0x0baa0a55,
+ 0x1b80, 0x0baa0a57,
+ 0x1b80, 0x6c840a65,
+ 0x1b80, 0x6c840a67,
+ 0x1b80, 0x6d0f0a75,
+ 0x1b80, 0x6d0f0a77,
+ 0x1b80, 0xe2eb0a85,
+ 0x1b80, 0xe2eb0a87,
+ 0x1b80, 0xe3450a95,
+ 0x1b80, 0xe3450a97,
+ 0x1b80, 0x6ca40aa5,
+ 0x1b80, 0x6ca40aa7,
+ 0x1b80, 0xe2eb0ab5,
+ 0x1b80, 0xe2eb0ab7,
+ 0x1b80, 0xe3450ac5,
+ 0x1b80, 0xe3450ac7,
+ 0x1b80, 0x0bac0ad5,
+ 0x1b80, 0x0bac0ad7,
+ 0x1b80, 0x6cc40ae5,
+ 0x1b80, 0x6cc40ae7,
+ 0x1b80, 0x6d0f0af5,
+ 0x1b80, 0x6d0f0af7,
+ 0x1b80, 0xe2eb0b05,
+ 0x1b80, 0xe2eb0b07,
+ 0x1b80, 0xe3450b15,
+ 0x1b80, 0xe3450b17,
+ 0x1b80, 0x6ce40b25,
+ 0x1b80, 0x6ce40b27,
+ 0x1b80, 0xe2eb0b35,
+ 0x1b80, 0xe2eb0b37,
+ 0x1b80, 0xe3450b45,
+ 0x1b80, 0xe3450b47,
+ 0x1b80, 0x6cf40b55,
+ 0x1b80, 0x6cf40b57,
+ 0x1b80, 0xe2eb0b65,
+ 0x1b80, 0xe2eb0b67,
+ 0x1b80, 0xe3450b75,
+ 0x1b80, 0xe3450b77,
+ 0x1b80, 0x6c0c0b85,
+ 0x1b80, 0x6c0c0b87,
+ 0x1b80, 0x6d000b95,
+ 0x1b80, 0x6d000b97,
+ 0x1b80, 0xe2eb0ba5,
+ 0x1b80, 0xe2eb0ba7,
+ 0x1b80, 0xe3450bb5,
+ 0x1b80, 0xe3450bb7,
+ 0x1b80, 0x6c1c0bc5,
+ 0x1b80, 0x6c1c0bc7,
+ 0x1b80, 0xe2eb0bd5,
+ 0x1b80, 0xe2eb0bd7,
+ 0x1b80, 0xe3450be5,
+ 0x1b80, 0xe3450be7,
+ 0x1b80, 0x6c3c0bf5,
+ 0x1b80, 0x6c3c0bf7,
+ 0x1b80, 0xe2eb0c05,
+ 0x1b80, 0xe2eb0c07,
+ 0x1b80, 0xe3450c15,
+ 0x1b80, 0xe3450c17,
+ 0x1b80, 0xf4bf0c25,
+ 0x1b80, 0xf4bf0c27,
+ 0x1b80, 0xf7be0c35,
+ 0x1b80, 0xf7be0c37,
+ 0x1b80, 0x6c5c0c45,
+ 0x1b80, 0x6c5c0c47,
+ 0x1b80, 0xe2eb0c55,
+ 0x1b80, 0xe2eb0c57,
+ 0x1b80, 0xe3450c65,
+ 0x1b80, 0xe3450c67,
+ 0x1b80, 0x6c7c0c75,
+ 0x1b80, 0x6c7c0c77,
+ 0x1b80, 0xe2eb0c85,
+ 0x1b80, 0xe2eb0c87,
+ 0x1b80, 0xe3450c95,
+ 0x1b80, 0xe3450c97,
+ 0x1b80, 0xf5c30ca5,
+ 0x1b80, 0xf5c30ca7,
+ 0x1b80, 0xf8c20cb5,
+ 0x1b80, 0xf8c20cb7,
+ 0x1b80, 0x6c9c0cc5,
+ 0x1b80, 0x6c9c0cc7,
+ 0x1b80, 0xe2eb0cd5,
+ 0x1b80, 0xe2eb0cd7,
+ 0x1b80, 0xe3450ce5,
+ 0x1b80, 0xe3450ce7,
+ 0x1b80, 0x6cbc0cf5,
+ 0x1b80, 0x6cbc0cf7,
+ 0x1b80, 0xe2eb0d05,
+ 0x1b80, 0xe2eb0d07,
+ 0x1b80, 0xe3450d15,
+ 0x1b80, 0xe3450d17,
+ 0x1b80, 0x6cdc0d25,
+ 0x1b80, 0x6cdc0d27,
+ 0x1b80, 0xe2eb0d35,
+ 0x1b80, 0xe2eb0d37,
+ 0x1b80, 0xe3450d45,
+ 0x1b80, 0xe3450d47,
+ 0x1b80, 0x6cf00d55,
+ 0x1b80, 0x6cf00d57,
+ 0x1b80, 0xe2eb0d65,
+ 0x1b80, 0xe2eb0d67,
+ 0x1b80, 0xe3450d75,
+ 0x1b80, 0xe3450d77,
+ 0x1b80, 0x63c30d85,
+ 0x1b80, 0x63c30d87,
+ 0x1b80, 0x55010d95,
+ 0x1b80, 0x55010d97,
+ 0x1b80, 0x57040da5,
+ 0x1b80, 0x57040da7,
+ 0x1b80, 0x57000db5,
+ 0x1b80, 0x57000db7,
+ 0x1b80, 0x96000dc5,
+ 0x1b80, 0x96000dc7,
+ 0x1b80, 0x57080dd5,
+ 0x1b80, 0x57080dd7,
+ 0x1b80, 0x57000de5,
+ 0x1b80, 0x57000de7,
+ 0x1b80, 0x95000df5,
+ 0x1b80, 0x95000df7,
+ 0x1b80, 0x4d000e05,
+ 0x1b80, 0x4d000e07,
+ 0x1b80, 0x63050e15,
+ 0x1b80, 0x63050e17,
+ 0x1b80, 0x7b400e25,
+ 0x1b80, 0x7b400e27,
+ 0x1b80, 0x7a000e35,
+ 0x1b80, 0x7a000e37,
+ 0x1b80, 0x79000e45,
+ 0x1b80, 0x79000e47,
+ 0x1b80, 0x7f400e55,
+ 0x1b80, 0x7f400e57,
+ 0x1b80, 0x7e000e65,
+ 0x1b80, 0x7e000e67,
+ 0x1b80, 0x7d000e75,
+ 0x1b80, 0x7d000e77,
+ 0x1b80, 0x00010e85,
+ 0x1b80, 0x00010e87,
+ 0x1b80, 0xe3170e95,
+ 0x1b80, 0xe3170e97,
+ 0x1b80, 0x00010ea5,
+ 0x1b80, 0x00010ea7,
+ 0x1b80, 0x5c320eb5,
+ 0x1b80, 0x5c320eb7,
+ 0x1b80, 0xe3410ec5,
+ 0x1b80, 0xe3410ec7,
+ 0x1b80, 0xe3170ed5,
+ 0x1b80, 0xe3170ed7,
+ 0x1b80, 0x00010ee5,
+ 0x1b80, 0x00010ee7,
+ 0x1b80, 0x31260ef5,
+ 0x1b80, 0x31260ef7,
+ 0x1b80, 0x00260f05,
+ 0x1b80, 0x00260f07,
+ 0x1b80, 0xe34a0f15,
+ 0x1b80, 0xe34a0f17,
+ 0x1b80, 0x00020f25,
+ 0x1b80, 0x00020f27,
+ 0x1b80, 0x54ec0f35,
+ 0x1b80, 0x54ec0f37,
+ 0x1b80, 0x0ba60f45,
+ 0x1b80, 0x0ba60f47,
+ 0x1b80, 0x00260f55,
+ 0x1b80, 0x00260f57,
+ 0x1b80, 0xe34a0f65,
+ 0x1b80, 0xe34a0f67,
+ 0x1b80, 0x00020f75,
+ 0x1b80, 0x00020f77,
+ 0x1b80, 0x63830f85,
+ 0x1b80, 0x63830f87,
+ 0x1b80, 0x30d90f95,
+ 0x1b80, 0x30d90f97,
+ 0x1b80, 0x311a0fa5,
+ 0x1b80, 0x311a0fa7,
+ 0x1b80, 0x00240fb5,
+ 0x1b80, 0x00240fb7,
+ 0x1b80, 0xe34a0fc5,
+ 0x1b80, 0xe34a0fc7,
+ 0x1b80, 0x00020fd5,
+ 0x1b80, 0x00020fd7,
+ 0x1b80, 0x54ea0fe5,
+ 0x1b80, 0x54ea0fe7,
+ 0x1b80, 0x0ba60ff5,
+ 0x1b80, 0x0ba60ff7,
+ 0x1b80, 0x00241005,
+ 0x1b80, 0x00241007,
+ 0x1b80, 0xe34a1015,
+ 0x1b80, 0xe34a1017,
+ 0x1b80, 0x00021025,
+ 0x1b80, 0x00021027,
+ 0x1b80, 0x63831035,
+ 0x1b80, 0x63831037,
+ 0x1b80, 0x30d91045,
+ 0x1b80, 0x30d91047,
+ 0x1b80, 0x5c321055,
+ 0x1b80, 0x5c321057,
+ 0x1b80, 0x54e61065,
+ 0x1b80, 0x54e61067,
+ 0x1b80, 0x6e101075,
+ 0x1b80, 0x6e101077,
+ 0x1b80, 0x6f0f1085,
+ 0x1b80, 0x6f0f1087,
+ 0x1b80, 0xe3171095,
+ 0x1b80, 0xe3171097,
+ 0x1b80, 0xe34a10a5,
+ 0x1b80, 0xe34a10a7,
+ 0x1b80, 0x5c3210b5,
+ 0x1b80, 0x5c3210b7,
+ 0x1b80, 0x54e710c5,
+ 0x1b80, 0x54e710c7,
+ 0x1b80, 0x6e2410d5,
+ 0x1b80, 0x6e2410d7,
+ 0x1b80, 0xe31710e5,
+ 0x1b80, 0xe31710e7,
+ 0x1b80, 0xe34a10f5,
+ 0x1b80, 0xe34a10f7,
+ 0x1b80, 0x5c321105,
+ 0x1b80, 0x5c321107,
+ 0x1b80, 0x54e81115,
+ 0x1b80, 0x54e81117,
+ 0x1b80, 0x6e441125,
+ 0x1b80, 0x6e441127,
+ 0x1b80, 0xe3171135,
+ 0x1b80, 0xe3171137,
+ 0x1b80, 0xe34a1145,
+ 0x1b80, 0xe34a1147,
+ 0x1b80, 0x5c321155,
+ 0x1b80, 0x5c321157,
+ 0x1b80, 0x54e91165,
+ 0x1b80, 0x54e91167,
+ 0x1b80, 0x6e641175,
+ 0x1b80, 0x6e641177,
+ 0x1b80, 0xe3171185,
+ 0x1b80, 0xe3171187,
+ 0x1b80, 0xe34a1195,
+ 0x1b80, 0xe34a1197,
+ 0x1b80, 0x5c3211a5,
+ 0x1b80, 0x5c3211a7,
+ 0x1b80, 0x54ea11b5,
+ 0x1b80, 0x54ea11b7,
+ 0x1b80, 0x0baa11c5,
+ 0x1b80, 0x0baa11c7,
+ 0x1b80, 0x6e8411d5,
+ 0x1b80, 0x6e8411d7,
+ 0x1b80, 0x6f0f11e5,
+ 0x1b80, 0x6f0f11e7,
+ 0x1b80, 0xe31711f5,
+ 0x1b80, 0xe31711f7,
+ 0x1b80, 0xe34a1205,
+ 0x1b80, 0xe34a1207,
+ 0x1b80, 0x5c321215,
+ 0x1b80, 0x5c321217,
+ 0x1b80, 0x54eb1225,
+ 0x1b80, 0x54eb1227,
+ 0x1b80, 0x6ea41235,
+ 0x1b80, 0x6ea41237,
+ 0x1b80, 0xe3171245,
+ 0x1b80, 0xe3171247,
+ 0x1b80, 0xe34a1255,
+ 0x1b80, 0xe34a1257,
+ 0x1b80, 0x5c321265,
+ 0x1b80, 0x5c321267,
+ 0x1b80, 0x54ec1275,
+ 0x1b80, 0x54ec1277,
+ 0x1b80, 0x0bac1285,
+ 0x1b80, 0x0bac1287,
+ 0x1b80, 0x6ec41295,
+ 0x1b80, 0x6ec41297,
+ 0x1b80, 0x6f0f12a5,
+ 0x1b80, 0x6f0f12a7,
+ 0x1b80, 0xe31712b5,
+ 0x1b80, 0xe31712b7,
+ 0x1b80, 0xe34a12c5,
+ 0x1b80, 0xe34a12c7,
+ 0x1b80, 0x5c3212d5,
+ 0x1b80, 0x5c3212d7,
+ 0x1b80, 0x54ed12e5,
+ 0x1b80, 0x54ed12e7,
+ 0x1b80, 0x6ee412f5,
+ 0x1b80, 0x6ee412f7,
+ 0x1b80, 0xe3171305,
+ 0x1b80, 0xe3171307,
+ 0x1b80, 0xe34a1315,
+ 0x1b80, 0xe34a1317,
+ 0x1b80, 0x5c321325,
+ 0x1b80, 0x5c321327,
+ 0x1b80, 0x54ee1335,
+ 0x1b80, 0x54ee1337,
+ 0x1b80, 0x6ef41345,
+ 0x1b80, 0x6ef41347,
+ 0x1b80, 0xe3171355,
+ 0x1b80, 0xe3171357,
+ 0x1b80, 0xe34a1365,
+ 0x1b80, 0xe34a1367,
+ 0x1b80, 0x5c321375,
+ 0x1b80, 0x5c321377,
+ 0x1b80, 0x54ef1385,
+ 0x1b80, 0x54ef1387,
+ 0x1b80, 0x6e0c1395,
+ 0x1b80, 0x6e0c1397,
+ 0x1b80, 0x6f0013a5,
+ 0x1b80, 0x6f0013a7,
+ 0x1b80, 0xe31713b5,
+ 0x1b80, 0xe31713b7,
+ 0x1b80, 0xe34a13c5,
+ 0x1b80, 0xe34a13c7,
+ 0x1b80, 0x5c3213d5,
+ 0x1b80, 0x5c3213d7,
+ 0x1b80, 0x54f013e5,
+ 0x1b80, 0x54f013e7,
+ 0x1b80, 0x6e1c13f5,
+ 0x1b80, 0x6e1c13f7,
+ 0x1b80, 0xe3171405,
+ 0x1b80, 0xe3171407,
+ 0x1b80, 0xe34a1415,
+ 0x1b80, 0xe34a1417,
+ 0x1b80, 0x5c321425,
+ 0x1b80, 0x5c321427,
+ 0x1b80, 0x54f11435,
+ 0x1b80, 0x54f11437,
+ 0x1b80, 0x6e3c1445,
+ 0x1b80, 0x6e3c1447,
+ 0x1b80, 0xe3171455,
+ 0x1b80, 0xe3171457,
+ 0x1b80, 0xe34a1465,
+ 0x1b80, 0xe34a1467,
+ 0x1b80, 0xfaa91475,
+ 0x1b80, 0xfaa91477,
+ 0x1b80, 0x5c321485,
+ 0x1b80, 0x5c321487,
+ 0x1b80, 0x54f21495,
+ 0x1b80, 0x54f21497,
+ 0x1b80, 0x6e5c14a5,
+ 0x1b80, 0x6e5c14a7,
+ 0x1b80, 0xe31714b5,
+ 0x1b80, 0xe31714b7,
+ 0x1b80, 0xe34a14c5,
+ 0x1b80, 0xe34a14c7,
+ 0x1b80, 0x5c3214d5,
+ 0x1b80, 0x5c3214d7,
+ 0x1b80, 0x54f314e5,
+ 0x1b80, 0x54f314e7,
+ 0x1b80, 0x6e7c14f5,
+ 0x1b80, 0x6e7c14f7,
+ 0x1b80, 0xe3171505,
+ 0x1b80, 0xe3171507,
+ 0x1b80, 0xe34a1515,
+ 0x1b80, 0xe34a1517,
+ 0x1b80, 0xfba91525,
+ 0x1b80, 0xfba91527,
+ 0x1b80, 0x5c321535,
+ 0x1b80, 0x5c321537,
+ 0x1b80, 0x54f41545,
+ 0x1b80, 0x54f41547,
+ 0x1b80, 0x6e9c1555,
+ 0x1b80, 0x6e9c1557,
+ 0x1b80, 0xe3171565,
+ 0x1b80, 0xe3171567,
+ 0x1b80, 0xe34a1575,
+ 0x1b80, 0xe34a1577,
+ 0x1b80, 0x5c321585,
+ 0x1b80, 0x5c321587,
+ 0x1b80, 0x54f51595,
+ 0x1b80, 0x54f51597,
+ 0x1b80, 0x6ebc15a5,
+ 0x1b80, 0x6ebc15a7,
+ 0x1b80, 0xe31715b5,
+ 0x1b80, 0xe31715b7,
+ 0x1b80, 0xe34a15c5,
+ 0x1b80, 0xe34a15c7,
+ 0x1b80, 0x5c3215d5,
+ 0x1b80, 0x5c3215d7,
+ 0x1b80, 0x54f615e5,
+ 0x1b80, 0x54f615e7,
+ 0x1b80, 0x6edc15f5,
+ 0x1b80, 0x6edc15f7,
+ 0x1b80, 0xe3171605,
+ 0x1b80, 0xe3171607,
+ 0x1b80, 0xe34a1615,
+ 0x1b80, 0xe34a1617,
+ 0x1b80, 0x5c321625,
+ 0x1b80, 0x5c321627,
+ 0x1b80, 0x54f71635,
+ 0x1b80, 0x54f71637,
+ 0x1b80, 0x6ef01645,
+ 0x1b80, 0x6ef01647,
+ 0x1b80, 0xe3171655,
+ 0x1b80, 0xe3171657,
+ 0x1b80, 0xe34a1665,
+ 0x1b80, 0xe34a1667,
+ 0x1b80, 0x63831675,
+ 0x1b80, 0x63831677,
+ 0x1b80, 0x30d91685,
+ 0x1b80, 0x30d91687,
+ 0x1b80, 0x00011695,
+ 0x1b80, 0x00011697,
+ 0x1b80, 0x000416a5,
+ 0x1b80, 0x000416a7,
+ 0x1b80, 0x550116b5,
+ 0x1b80, 0x550116b7,
+ 0x1b80, 0x5c3116c5,
+ 0x1b80, 0x5c3116c7,
+ 0x1b80, 0x5f8216d5,
+ 0x1b80, 0x5f8216d7,
+ 0x1b80, 0x660516e5,
+ 0x1b80, 0x660516e7,
+ 0x1b80, 0x000616f5,
+ 0x1b80, 0x000616f7,
+ 0x1b80, 0x5d801705,
+ 0x1b80, 0x5d801707,
+ 0x1b80, 0x09001715,
+ 0x1b80, 0x09001717,
+ 0x1b80, 0x0a011725,
+ 0x1b80, 0x0a011727,
+ 0x1b80, 0x0b401735,
+ 0x1b80, 0x0b401737,
+ 0x1b80, 0x0d001745,
+ 0x1b80, 0x0d001747,
+ 0x1b80, 0x0f011755,
+ 0x1b80, 0x0f011757,
+ 0x1b80, 0x002a1765,
+ 0x1b80, 0x002a1767,
+ 0x1b80, 0x055a1775,
+ 0x1b80, 0x055a1777,
+ 0x1b80, 0x05db1785,
+ 0x1b80, 0x05db1787,
+ 0x1b80, 0xe3351795,
+ 0x1b80, 0xe3351797,
+ 0x1b80, 0xe2e317a5,
+ 0x1b80, 0xe2e317a7,
+ 0x1b80, 0x000617b5,
+ 0x1b80, 0x000617b7,
+ 0x1b80, 0x06da17c5,
+ 0x1b80, 0x06da17c7,
+ 0x1b80, 0x07db17d5,
+ 0x1b80, 0x07db17d7,
+ 0x1b80, 0xe33517e5,
+ 0x1b80, 0xe33517e7,
+ 0x1b80, 0xe2e317f5,
+ 0x1b80, 0xe2e317f7,
+ 0x1b80, 0xe32c1805,
+ 0x1b80, 0xe32c1807,
+ 0x1b80, 0x00021815,
+ 0x1b80, 0x00021817,
+ 0x1b80, 0xe3311825,
+ 0x1b80, 0xe3311827,
+ 0x1b80, 0x5d001835,
+ 0x1b80, 0x5d001837,
+ 0x1b80, 0x00041845,
+ 0x1b80, 0x00041847,
+ 0x1b80, 0x5fa21855,
+ 0x1b80, 0x5fa21857,
+ 0x1b80, 0x00011865,
+ 0x1b80, 0x00011867,
+ 0x1b80, 0xe2571875,
+ 0x1b80, 0xe2571877,
+ 0x1b80, 0x74081885,
+ 0x1b80, 0x74081887,
+ 0x1b80, 0xe2a11895,
+ 0x1b80, 0xe2a11897,
+ 0x1b80, 0xe28318a5,
+ 0x1b80, 0xe28318a7,
+ 0x1b80, 0xe2c118b5,
+ 0x1b80, 0xe2c118b7,
+ 0x1b80, 0xb90018c5,
+ 0x1b80, 0xb90018c7,
+ 0x1b80, 0x990018d5,
+ 0x1b80, 0x990018d7,
+ 0x1b80, 0x000618e5,
+ 0x1b80, 0x000618e7,
+ 0x1b80, 0x770018f5,
+ 0x1b80, 0x770018f7,
+ 0x1b80, 0x00041905,
+ 0x1b80, 0x00041907,
+ 0x1b80, 0x49041915,
+ 0x1b80, 0x49041917,
+ 0x1b80, 0x4bb01925,
+ 0x1b80, 0x4bb01927,
+ 0x1b80, 0x00061935,
+ 0x1b80, 0x00061937,
+ 0x1b80, 0x75041945,
+ 0x1b80, 0x75041947,
+ 0x1b80, 0x77081955,
+ 0x1b80, 0x77081957,
+ 0x1b80, 0x00071965,
+ 0x1b80, 0x00071967,
+ 0x1b80, 0x77101975,
+ 0x1b80, 0x77101977,
+ 0x1b80, 0x00041985,
+ 0x1b80, 0x00041987,
+ 0x1b80, 0x44801995,
+ 0x1b80, 0x44801997,
+ 0x1b80, 0x45ff19a5,
+ 0x1b80, 0x45ff19a7,
+ 0x1b80, 0x463f19b5,
+ 0x1b80, 0x463f19b7,
+ 0x1b80, 0x473119c5,
+ 0x1b80, 0x473119c7,
+ 0x1b80, 0x400819d5,
+ 0x1b80, 0x400819d7,
+ 0x1b80, 0xe23e19e5,
+ 0x1b80, 0xe23e19e7,
+ 0x1b80, 0x000119f5,
+ 0x1b80, 0x000119f7,
+ 0x1b80, 0xe2571a05,
+ 0x1b80, 0xe2571a07,
+ 0x1b80, 0x74081a15,
+ 0x1b80, 0x74081a17,
+ 0x1b80, 0xe2b11a25,
+ 0x1b80, 0xe2b11a27,
+ 0x1b80, 0xe2831a35,
+ 0x1b80, 0xe2831a37,
+ 0x1b80, 0xe2c71a45,
+ 0x1b80, 0xe2c71a47,
+ 0x1b80, 0xb9001a55,
+ 0x1b80, 0xb9001a57,
+ 0x1b80, 0x99001a65,
+ 0x1b80, 0x99001a67,
+ 0x1b80, 0x00061a75,
+ 0x1b80, 0x00061a77,
+ 0x1b80, 0x77001a85,
+ 0x1b80, 0x77001a87,
+ 0x1b80, 0x00051a95,
+ 0x1b80, 0x00051a97,
+ 0x1b80, 0x61041aa5,
+ 0x1b80, 0x61041aa7,
+ 0x1b80, 0x63b01ab5,
+ 0x1b80, 0x63b01ab7,
+ 0x1b80, 0x00061ac5,
+ 0x1b80, 0x00061ac7,
+ 0x1b80, 0x75081ad5,
+ 0x1b80, 0x75081ad7,
+ 0x1b80, 0x77081ae5,
+ 0x1b80, 0x77081ae7,
+ 0x1b80, 0x00071af5,
+ 0x1b80, 0x00071af7,
+ 0x1b80, 0x77201b05,
+ 0x1b80, 0x77201b07,
+ 0x1b80, 0x00051b15,
+ 0x1b80, 0x00051b17,
+ 0x1b80, 0x5c801b25,
+ 0x1b80, 0x5c801b27,
+ 0x1b80, 0x5dff1b35,
+ 0x1b80, 0x5dff1b37,
+ 0x1b80, 0x5e3f1b45,
+ 0x1b80, 0x5e3f1b47,
+ 0x1b80, 0x5f311b55,
+ 0x1b80, 0x5f311b57,
+ 0x1b80, 0x00041b65,
+ 0x1b80, 0x00041b67,
+ 0x1b80, 0x400a1b75,
+ 0x1b80, 0x400a1b77,
+ 0x1b80, 0xe23e1b85,
+ 0x1b80, 0xe23e1b87,
+ 0x1b80, 0x00011b95,
+ 0x1b80, 0x00011b97,
+ 0x1b80, 0xe2571ba5,
+ 0x1b80, 0xe2571ba7,
+ 0x1b80, 0x74081bb5,
+ 0x1b80, 0x74081bb7,
+ 0x1b80, 0xe2a11bc5,
+ 0x1b80, 0xe2a11bc7,
+ 0x1b80, 0xe2831bd5,
+ 0x1b80, 0xe2831bd7,
+ 0x1b80, 0xe2c11be5,
+ 0x1b80, 0xe2c11be7,
+ 0x1b80, 0xe2cd1bf5,
+ 0x1b80, 0xe2cd1bf7,
+ 0x1b80, 0xe2101c05,
+ 0x1b80, 0xe2101c07,
+ 0x1b80, 0x00011c15,
+ 0x1b80, 0x00011c17,
+ 0x1b80, 0xe2571c25,
+ 0x1b80, 0xe2571c27,
+ 0x1b80, 0x74081c35,
+ 0x1b80, 0x74081c37,
+ 0x1b80, 0xe2b11c45,
+ 0x1b80, 0xe2b11c47,
+ 0x1b80, 0xe2831c55,
+ 0x1b80, 0xe2831c57,
+ 0x1b80, 0xe2c71c65,
+ 0x1b80, 0xe2c71c67,
+ 0x1b80, 0xe2cd1c75,
+ 0x1b80, 0xe2cd1c77,
+ 0x1b80, 0xe2261c85,
+ 0x1b80, 0xe2261c87,
+ 0x1b80, 0x00011c95,
+ 0x1b80, 0x00011c97,
+ 0x1b80, 0xe26d1ca5,
+ 0x1b80, 0xe26d1ca7,
+ 0x1b80, 0x74001cb5,
+ 0x1b80, 0x74001cb7,
+ 0x1b80, 0xe2a11cc5,
+ 0x1b80, 0xe2a11cc7,
+ 0x1b80, 0xe2921cd5,
+ 0x1b80, 0xe2921cd7,
+ 0x1b80, 0xe2c11ce5,
+ 0x1b80, 0xe2c11ce7,
+ 0x1b80, 0xe2cd1cf5,
+ 0x1b80, 0xe2cd1cf7,
+ 0x1b80, 0xe2101d05,
+ 0x1b80, 0xe2101d07,
+ 0x1b80, 0x00011d15,
+ 0x1b80, 0x00011d17,
+ 0x1b80, 0xe26d1d25,
+ 0x1b80, 0xe26d1d27,
+ 0x1b80, 0x74001d35,
+ 0x1b80, 0x74001d37,
+ 0x1b80, 0xe2b11d45,
+ 0x1b80, 0xe2b11d47,
+ 0x1b80, 0xe2921d55,
+ 0x1b80, 0xe2921d57,
+ 0x1b80, 0xe2c71d65,
+ 0x1b80, 0xe2c71d67,
+ 0x1b80, 0xe2cd1d75,
+ 0x1b80, 0xe2cd1d77,
+ 0x1b80, 0xe2261d85,
+ 0x1b80, 0xe2261d87,
+ 0x1b80, 0x00011d95,
+ 0x1b80, 0x00011d97,
+ 0x1b80, 0x00041da5,
+ 0x1b80, 0x00041da7,
+ 0x1b80, 0x445b1db5,
+ 0x1b80, 0x445b1db7,
+ 0x1b80, 0x47b01dc5,
+ 0x1b80, 0x47b01dc7,
+ 0x1b80, 0x47301dd5,
+ 0x1b80, 0x47301dd7,
+ 0x1b80, 0x47001de5,
+ 0x1b80, 0x47001de7,
+ 0x1b80, 0x00061df5,
+ 0x1b80, 0x00061df7,
+ 0x1b80, 0x77081e05,
+ 0x1b80, 0x77081e07,
+ 0x1b80, 0x00041e15,
+ 0x1b80, 0x00041e17,
+ 0x1b80, 0x49401e25,
+ 0x1b80, 0x49401e27,
+ 0x1b80, 0x4bb01e35,
+ 0x1b80, 0x4bb01e37,
+ 0x1b80, 0x00071e45,
+ 0x1b80, 0x00071e47,
+ 0x1b80, 0x54401e55,
+ 0x1b80, 0x54401e57,
+ 0x1b80, 0x00041e65,
+ 0x1b80, 0x00041e67,
+ 0x1b80, 0x40081e75,
+ 0x1b80, 0x40081e77,
+ 0x1b80, 0x00011e85,
+ 0x1b80, 0x00011e87,
+ 0x1b80, 0x00051e95,
+ 0x1b80, 0x00051e97,
+ 0x1b80, 0x5c5b1ea5,
+ 0x1b80, 0x5c5b1ea7,
+ 0x1b80, 0x5fb01eb5,
+ 0x1b80, 0x5fb01eb7,
+ 0x1b80, 0x5f301ec5,
+ 0x1b80, 0x5f301ec7,
+ 0x1b80, 0x5f001ed5,
+ 0x1b80, 0x5f001ed7,
+ 0x1b80, 0x00061ee5,
+ 0x1b80, 0x00061ee7,
+ 0x1b80, 0x77081ef5,
+ 0x1b80, 0x77081ef7,
+ 0x1b80, 0x00051f05,
+ 0x1b80, 0x00051f07,
+ 0x1b80, 0x61401f15,
+ 0x1b80, 0x61401f17,
+ 0x1b80, 0x63b01f25,
+ 0x1b80, 0x63b01f27,
+ 0x1b80, 0x00071f35,
+ 0x1b80, 0x00071f37,
+ 0x1b80, 0x54401f45,
+ 0x1b80, 0x54401f47,
+ 0x1b80, 0x00041f55,
+ 0x1b80, 0x00041f57,
+ 0x1b80, 0x40081f65,
+ 0x1b80, 0x40081f67,
+ 0x1b80, 0x00011f75,
+ 0x1b80, 0x00011f77,
+ 0x1b80, 0xe2571f85,
+ 0x1b80, 0xe2571f87,
+ 0x1b80, 0x74081f95,
+ 0x1b80, 0x74081f97,
+ 0x1b80, 0xe2a11fa5,
+ 0x1b80, 0xe2a11fa7,
+ 0x1b80, 0x00041fb5,
+ 0x1b80, 0x00041fb7,
+ 0x1b80, 0x40081fc5,
+ 0x1b80, 0x40081fc7,
+ 0x1b80, 0x00011fd5,
+ 0x1b80, 0x00011fd7,
+ 0x1b80, 0xe2571fe5,
+ 0x1b80, 0xe2571fe7,
+ 0x1b80, 0x74081ff5,
+ 0x1b80, 0x74081ff7,
+ 0x1b80, 0xe2b12005,
+ 0x1b80, 0xe2b12007,
+ 0x1b80, 0x00042015,
+ 0x1b80, 0x00042017,
+ 0x1b80, 0x40082025,
+ 0x1b80, 0x40082027,
+ 0x1b80, 0x00012035,
+ 0x1b80, 0x00012037,
+ 0x1b80, 0xe26d2045,
+ 0x1b80, 0xe26d2047,
+ 0x1b80, 0x74002055,
+ 0x1b80, 0x74002057,
+ 0x1b80, 0xe2a12065,
+ 0x1b80, 0xe2a12067,
+ 0x1b80, 0x00042075,
+ 0x1b80, 0x00042077,
+ 0x1b80, 0x40082085,
+ 0x1b80, 0x40082087,
+ 0x1b80, 0x00012095,
+ 0x1b80, 0x00012097,
+ 0x1b80, 0xe26d20a5,
+ 0x1b80, 0xe26d20a7,
+ 0x1b80, 0x740020b5,
+ 0x1b80, 0x740020b7,
+ 0x1b80, 0xe2b120c5,
+ 0x1b80, 0xe2b120c7,
+ 0x1b80, 0x000420d5,
+ 0x1b80, 0x000420d7,
+ 0x1b80, 0x400820e5,
+ 0x1b80, 0x400820e7,
+ 0x1b80, 0x000120f5,
+ 0x1b80, 0x000120f7,
+ 0x1b80, 0x00042105,
+ 0x1b80, 0x00042107,
+ 0x1b80, 0x49042115,
+ 0x1b80, 0x49042117,
+ 0x1b80, 0x4bb02125,
+ 0x1b80, 0x4bb02127,
+ 0x1b80, 0x00062135,
+ 0x1b80, 0x00062137,
+ 0x1b80, 0x75042145,
+ 0x1b80, 0x75042147,
+ 0x1b80, 0x77082155,
+ 0x1b80, 0x77082157,
+ 0x1b80, 0x00042165,
+ 0x1b80, 0x00042167,
+ 0x1b80, 0x44802175,
+ 0x1b80, 0x44802177,
+ 0x1b80, 0x45ff2185,
+ 0x1b80, 0x45ff2187,
+ 0x1b80, 0x463f2195,
+ 0x1b80, 0x463f2197,
+ 0x1b80, 0x473121a5,
+ 0x1b80, 0x473121a7,
+ 0x1b80, 0x400821b5,
+ 0x1b80, 0x400821b7,
+ 0x1b80, 0xe23e21c5,
+ 0x1b80, 0xe23e21c7,
+ 0x1b80, 0x000421d5,
+ 0x1b80, 0x000421d7,
+ 0x1b80, 0x400c21e5,
+ 0x1b80, 0x400c21e7,
+ 0x1b80, 0x000621f5,
+ 0x1b80, 0x000621f7,
+ 0x1b80, 0x75002205,
+ 0x1b80, 0x75002207,
+ 0x1b80, 0x00042215,
+ 0x1b80, 0x00042217,
+ 0x1b80, 0x445b2225,
+ 0x1b80, 0x445b2227,
+ 0x1b80, 0x47002235,
+ 0x1b80, 0x47002237,
+ 0x1b80, 0x40082245,
+ 0x1b80, 0x40082247,
+ 0x1b80, 0x00012255,
+ 0x1b80, 0x00012257,
+ 0x1b80, 0x00052265,
+ 0x1b80, 0x00052267,
+ 0x1b80, 0x61042275,
+ 0x1b80, 0x61042277,
+ 0x1b80, 0x63b02285,
+ 0x1b80, 0x63b02287,
+ 0x1b80, 0x00062295,
+ 0x1b80, 0x00062297,
+ 0x1b80, 0x750822a5,
+ 0x1b80, 0x750822a7,
+ 0x1b80, 0x770822b5,
+ 0x1b80, 0x770822b7,
+ 0x1b80, 0x000522c5,
+ 0x1b80, 0x000522c7,
+ 0x1b80, 0x5c8022d5,
+ 0x1b80, 0x5c8022d7,
+ 0x1b80, 0x5dff22e5,
+ 0x1b80, 0x5dff22e7,
+ 0x1b80, 0x5e3f22f5,
+ 0x1b80, 0x5e3f22f7,
+ 0x1b80, 0x5f312305,
+ 0x1b80, 0x5f312307,
+ 0x1b80, 0x00042315,
+ 0x1b80, 0x00042317,
+ 0x1b80, 0x400a2325,
+ 0x1b80, 0x400a2327,
+ 0x1b80, 0xe23e2335,
+ 0x1b80, 0xe23e2337,
+ 0x1b80, 0x00042345,
+ 0x1b80, 0x00042347,
+ 0x1b80, 0x400c2355,
+ 0x1b80, 0x400c2357,
+ 0x1b80, 0x00062365,
+ 0x1b80, 0x00062367,
+ 0x1b80, 0x75002375,
+ 0x1b80, 0x75002377,
+ 0x1b80, 0x00052385,
+ 0x1b80, 0x00052387,
+ 0x1b80, 0x5c5b2395,
+ 0x1b80, 0x5c5b2397,
+ 0x1b80, 0x5f0023a5,
+ 0x1b80, 0x5f0023a7,
+ 0x1b80, 0x000423b5,
+ 0x1b80, 0x000423b7,
+ 0x1b80, 0x400823c5,
+ 0x1b80, 0x400823c7,
+ 0x1b80, 0x000123d5,
+ 0x1b80, 0x000123d7,
+ 0x1b80, 0x000723e5,
+ 0x1b80, 0x000723e7,
+ 0x1b80, 0x4c1223f5,
+ 0x1b80, 0x4c1223f7,
+ 0x1b80, 0x4e202405,
+ 0x1b80, 0x4e202407,
+ 0x1b80, 0x00052415,
+ 0x1b80, 0x00052417,
+ 0x1b80, 0x598f2425,
+ 0x1b80, 0x598f2427,
+ 0x1b80, 0x40022435,
+ 0x1b80, 0x40022437,
+ 0x1b80, 0x4c012445,
+ 0x1b80, 0x4c012447,
+ 0x1b80, 0x4c002455,
+ 0x1b80, 0x4c002457,
+ 0x1b80, 0xab002465,
+ 0x1b80, 0xab002467,
+ 0x1b80, 0x40032475,
+ 0x1b80, 0x40032477,
+ 0x1b80, 0x49802485,
+ 0x1b80, 0x49802487,
+ 0x1b80, 0x56c02495,
+ 0x1b80, 0x56c02497,
+ 0x1b80, 0x540224a5,
+ 0x1b80, 0x540224a7,
+ 0x1b80, 0x4c0124b5,
+ 0x1b80, 0x4c0124b7,
+ 0x1b80, 0x4c0024c5,
+ 0x1b80, 0x4c0024c7,
+ 0x1b80, 0xab0024d5,
+ 0x1b80, 0xab0024d7,
+ 0x1b80, 0x540024e5,
+ 0x1b80, 0x540024e7,
+ 0x1b80, 0x000724f5,
+ 0x1b80, 0x000724f7,
+ 0x1b80, 0x4c002505,
+ 0x1b80, 0x4c002507,
+ 0x1b80, 0x4e002515,
+ 0x1b80, 0x4e002517,
+ 0x1b80, 0x00052525,
+ 0x1b80, 0x00052527,
+ 0x1b80, 0x40042535,
+ 0x1b80, 0x40042537,
+ 0x1b80, 0x4c012545,
+ 0x1b80, 0x4c012547,
+ 0x1b80, 0x4c002555,
+ 0x1b80, 0x4c002557,
+ 0x1b80, 0x00012565,
+ 0x1b80, 0x00012567,
+ 0x1b80, 0x00042575,
+ 0x1b80, 0x00042577,
+ 0x1b80, 0x44802585,
+ 0x1b80, 0x44802587,
+ 0x1b80, 0x4b002595,
+ 0x1b80, 0x4b002597,
+ 0x1b80, 0x000525a5,
+ 0x1b80, 0x000525a7,
+ 0x1b80, 0x5c8025b5,
+ 0x1b80, 0x5c8025b7,
+ 0x1b80, 0x630025c5,
+ 0x1b80, 0x630025c7,
+ 0x1b80, 0x000725d5,
+ 0x1b80, 0x000725d7,
+ 0x1b80, 0x780c25e5,
+ 0x1b80, 0x780c25e7,
+ 0x1b80, 0x791925f5,
+ 0x1b80, 0x791925f7,
+ 0x1b80, 0x7a002605,
+ 0x1b80, 0x7a002607,
+ 0x1b80, 0x7b822615,
+ 0x1b80, 0x7b822617,
+ 0x1b80, 0x7b022625,
+ 0x1b80, 0x7b022627,
+ 0x1b80, 0x78142635,
+ 0x1b80, 0x78142637,
+ 0x1b80, 0x79ee2645,
+ 0x1b80, 0x79ee2647,
+ 0x1b80, 0x7a012655,
+ 0x1b80, 0x7a012657,
+ 0x1b80, 0x7b832665,
+ 0x1b80, 0x7b832667,
+ 0x1b80, 0x7b032675,
+ 0x1b80, 0x7b032677,
+ 0x1b80, 0x78282685,
+ 0x1b80, 0x78282687,
+ 0x1b80, 0x79b42695,
+ 0x1b80, 0x79b42697,
+ 0x1b80, 0x7a0026a5,
+ 0x1b80, 0x7a0026a7,
+ 0x1b80, 0x7b0026b5,
+ 0x1b80, 0x7b0026b7,
+ 0x1b80, 0x000126c5,
+ 0x1b80, 0x000126c7,
+ 0x1b80, 0x000426d5,
+ 0x1b80, 0x000426d7,
+ 0x1b80, 0x448026e5,
+ 0x1b80, 0x448026e7,
+ 0x1b80, 0x4b0026f5,
+ 0x1b80, 0x4b0026f7,
+ 0x1b80, 0x00052705,
+ 0x1b80, 0x00052707,
+ 0x1b80, 0x5c802715,
+ 0x1b80, 0x5c802717,
+ 0x1b80, 0x63002725,
+ 0x1b80, 0x63002727,
+ 0x1b80, 0x00072735,
+ 0x1b80, 0x00072737,
+ 0x1b80, 0x78102745,
+ 0x1b80, 0x78102747,
+ 0x1b80, 0x79132755,
+ 0x1b80, 0x79132757,
+ 0x1b80, 0x7a002765,
+ 0x1b80, 0x7a002767,
+ 0x1b80, 0x7b802775,
+ 0x1b80, 0x7b802777,
+ 0x1b80, 0x7b002785,
+ 0x1b80, 0x7b002787,
+ 0x1b80, 0x78db2795,
+ 0x1b80, 0x78db2797,
+ 0x1b80, 0x790027a5,
+ 0x1b80, 0x790027a7,
+ 0x1b80, 0x7a0027b5,
+ 0x1b80, 0x7a0027b7,
+ 0x1b80, 0x7b8127c5,
+ 0x1b80, 0x7b8127c7,
+ 0x1b80, 0x7b0127d5,
+ 0x1b80, 0x7b0127d7,
+ 0x1b80, 0x782827e5,
+ 0x1b80, 0x782827e7,
+ 0x1b80, 0x79b427f5,
+ 0x1b80, 0x79b427f7,
+ 0x1b80, 0x7a002805,
+ 0x1b80, 0x7a002807,
+ 0x1b80, 0x7b002815,
+ 0x1b80, 0x7b002817,
+ 0x1b80, 0x00012825,
+ 0x1b80, 0x00012827,
+ 0x1b80, 0x00072835,
+ 0x1b80, 0x00072837,
+ 0x1b80, 0x783e2845,
+ 0x1b80, 0x783e2847,
+ 0x1b80, 0x79f92855,
+ 0x1b80, 0x79f92857,
+ 0x1b80, 0x7a012865,
+ 0x1b80, 0x7a012867,
+ 0x1b80, 0x7b822875,
+ 0x1b80, 0x7b822877,
+ 0x1b80, 0x7b022885,
+ 0x1b80, 0x7b022887,
+ 0x1b80, 0x78a92895,
+ 0x1b80, 0x78a92897,
+ 0x1b80, 0x79ed28a5,
+ 0x1b80, 0x79ed28a7,
+ 0x1b80, 0x7b8328b5,
+ 0x1b80, 0x7b8328b7,
+ 0x1b80, 0x7b0328c5,
+ 0x1b80, 0x7b0328c7,
+ 0x1b80, 0x782828d5,
+ 0x1b80, 0x782828d7,
+ 0x1b80, 0x79b428e5,
+ 0x1b80, 0x79b428e7,
+ 0x1b80, 0x7a0028f5,
+ 0x1b80, 0x7a0028f7,
+ 0x1b80, 0x7b002905,
+ 0x1b80, 0x7b002907,
+ 0x1b80, 0x00012915,
+ 0x1b80, 0x00012917,
+ 0x1b80, 0x00072925,
+ 0x1b80, 0x00072927,
+ 0x1b80, 0x78ae2935,
+ 0x1b80, 0x78ae2937,
+ 0x1b80, 0x79fa2945,
+ 0x1b80, 0x79fa2947,
+ 0x1b80, 0x7a012955,
+ 0x1b80, 0x7a012957,
+ 0x1b80, 0x7b802965,
+ 0x1b80, 0x7b802967,
+ 0x1b80, 0x7b002975,
+ 0x1b80, 0x7b002977,
+ 0x1b80, 0x787a2985,
+ 0x1b80, 0x787a2987,
+ 0x1b80, 0x79f12995,
+ 0x1b80, 0x79f12997,
+ 0x1b80, 0x7b8129a5,
+ 0x1b80, 0x7b8129a7,
+ 0x1b80, 0x7b0129b5,
+ 0x1b80, 0x7b0129b7,
+ 0x1b80, 0x782829c5,
+ 0x1b80, 0x782829c7,
+ 0x1b80, 0x79b429d5,
+ 0x1b80, 0x79b429d7,
+ 0x1b80, 0x7a0029e5,
+ 0x1b80, 0x7a0029e7,
+ 0x1b80, 0x7b0029f5,
+ 0x1b80, 0x7b0029f7,
+ 0x1b80, 0x00012a05,
+ 0x1b80, 0x00012a07,
+ 0x1b80, 0x00072a15,
+ 0x1b80, 0x00072a17,
+ 0x1b80, 0x75002a25,
+ 0x1b80, 0x75002a27,
+ 0x1b80, 0x76022a35,
+ 0x1b80, 0x76022a37,
+ 0x1b80, 0x77152a45,
+ 0x1b80, 0x77152a47,
+ 0x1b80, 0x00062a55,
+ 0x1b80, 0x00062a57,
+ 0x1b80, 0x74002a65,
+ 0x1b80, 0x74002a67,
+ 0x1b80, 0x76002a75,
+ 0x1b80, 0x76002a77,
+ 0x1b80, 0x77002a85,
+ 0x1b80, 0x77002a87,
+ 0x1b80, 0x75102a95,
+ 0x1b80, 0x75102a97,
+ 0x1b80, 0x75002aa5,
+ 0x1b80, 0x75002aa7,
+ 0x1b80, 0xb3002ab5,
+ 0x1b80, 0xb3002ab7,
+ 0x1b80, 0x93002ac5,
+ 0x1b80, 0x93002ac7,
+ 0x1b80, 0x00072ad5,
+ 0x1b80, 0x00072ad7,
+ 0x1b80, 0x76002ae5,
+ 0x1b80, 0x76002ae7,
+ 0x1b80, 0x77002af5,
+ 0x1b80, 0x77002af7,
+ 0x1b80, 0x00012b05,
+ 0x1b80, 0x00012b07,
+ 0x1b80, 0x00072b15,
+ 0x1b80, 0x00072b17,
+ 0x1b80, 0x75002b25,
+ 0x1b80, 0x75002b27,
+ 0x1b80, 0x76022b35,
+ 0x1b80, 0x76022b37,
+ 0x1b80, 0x77252b45,
+ 0x1b80, 0x77252b47,
+ 0x1b80, 0x00062b55,
+ 0x1b80, 0x00062b57,
+ 0x1b80, 0x74002b65,
+ 0x1b80, 0x74002b67,
+ 0x1b80, 0x76002b75,
+ 0x1b80, 0x76002b77,
+ 0x1b80, 0x77012b85,
+ 0x1b80, 0x77012b87,
+ 0x1b80, 0x75102b95,
+ 0x1b80, 0x75102b97,
+ 0x1b80, 0x75002ba5,
+ 0x1b80, 0x75002ba7,
+ 0x1b80, 0xb3002bb5,
+ 0x1b80, 0xb3002bb7,
+ 0x1b80, 0x93002bc5,
+ 0x1b80, 0x93002bc7,
+ 0x1b80, 0x00072bd5,
+ 0x1b80, 0x00072bd7,
+ 0x1b80, 0x76002be5,
+ 0x1b80, 0x76002be7,
+ 0x1b80, 0x77002bf5,
+ 0x1b80, 0x77002bf7,
+ 0x1b80, 0x00012c05,
+ 0x1b80, 0x00012c07,
+ 0x1b80, 0x00042c15,
+ 0x1b80, 0x00042c17,
+ 0x1b80, 0x44802c25,
+ 0x1b80, 0x44802c27,
+ 0x1b80, 0x47302c35,
+ 0x1b80, 0x47302c37,
+ 0x1b80, 0x00062c45,
+ 0x1b80, 0x00062c47,
+ 0x1b80, 0x776c2c55,
+ 0x1b80, 0x776c2c57,
+ 0x1b80, 0x00012c65,
+ 0x1b80, 0x00012c67,
+ 0x1b80, 0x00052c75,
+ 0x1b80, 0x00052c77,
+ 0x1b80, 0x5c802c85,
+ 0x1b80, 0x5c802c87,
+ 0x1b80, 0x5f302c95,
+ 0x1b80, 0x5f302c97,
+ 0x1b80, 0x00062ca5,
+ 0x1b80, 0x00062ca7,
+ 0x1b80, 0x776d2cb5,
+ 0x1b80, 0x776d2cb7,
+ 0x1b80, 0x00012cc5,
+ 0x1b80, 0x00012cc7,
+ 0x1b80, 0xb9002cd5,
+ 0x1b80, 0xb9002cd7,
+ 0x1b80, 0x99002ce5,
+ 0x1b80, 0x99002ce7,
+ 0x1b80, 0x00062cf5,
+ 0x1b80, 0x00062cf7,
+ 0x1b80, 0x77002d05,
+ 0x1b80, 0x77002d07,
+ 0x1b80, 0x98052d15,
+ 0x1b80, 0x98052d17,
+ 0x1b80, 0x00042d25,
+ 0x1b80, 0x00042d27,
+ 0x1b80, 0x40082d35,
+ 0x1b80, 0x40082d37,
+ 0x1b80, 0x4a022d45,
+ 0x1b80, 0x4a022d47,
+ 0x1b80, 0x30192d55,
+ 0x1b80, 0x30192d57,
+ 0x1b80, 0x00012d65,
+ 0x1b80, 0x00012d67,
+ 0x1b80, 0x7b482d75,
+ 0x1b80, 0x7b482d77,
+ 0x1b80, 0x7a902d85,
+ 0x1b80, 0x7a902d87,
+ 0x1b80, 0x79002d95,
+ 0x1b80, 0x79002d97,
+ 0x1b80, 0x55032da5,
+ 0x1b80, 0x55032da7,
+ 0x1b80, 0x32e32db5,
+ 0x1b80, 0x32e32db7,
+ 0x1b80, 0x7b382dc5,
+ 0x1b80, 0x7b382dc7,
+ 0x1b80, 0x7a802dd5,
+ 0x1b80, 0x7a802dd7,
+ 0x1b80, 0x550b2de5,
+ 0x1b80, 0x550b2de7,
+ 0x1b80, 0x32e32df5,
+ 0x1b80, 0x32e32df7,
+ 0x1b80, 0x7b402e05,
+ 0x1b80, 0x7b402e07,
+ 0x1b80, 0x7a002e15,
+ 0x1b80, 0x7a002e17,
+ 0x1b80, 0x55132e25,
+ 0x1b80, 0x55132e27,
+ 0x1b80, 0x74012e35,
+ 0x1b80, 0x74012e37,
+ 0x1b80, 0x74002e45,
+ 0x1b80, 0x74002e47,
+ 0x1b80, 0x8e002e55,
+ 0x1b80, 0x8e002e57,
+ 0x1b80, 0x00012e65,
+ 0x1b80, 0x00012e67,
+ 0x1b80, 0x57022e75,
+ 0x1b80, 0x57022e77,
+ 0x1b80, 0x57002e85,
+ 0x1b80, 0x57002e87,
+ 0x1b80, 0x97002e95,
+ 0x1b80, 0x97002e97,
+ 0x1b80, 0x00012ea5,
+ 0x1b80, 0x00012ea7,
+ 0x1b80, 0x4f782eb5,
+ 0x1b80, 0x4f782eb7,
+ 0x1b80, 0x53882ec5,
+ 0x1b80, 0x53882ec7,
+ 0x1b80, 0xe2f72ed5,
+ 0x1b80, 0xe2f72ed7,
+ 0x1b80, 0x54802ee5,
+ 0x1b80, 0x54802ee7,
+ 0x1b80, 0x54002ef5,
+ 0x1b80, 0x54002ef7,
+ 0x1b80, 0x54812f05,
+ 0x1b80, 0x54812f07,
+ 0x1b80, 0x54002f15,
+ 0x1b80, 0x54002f17,
+ 0x1b80, 0x54822f25,
+ 0x1b80, 0x54822f27,
+ 0x1b80, 0x54002f35,
+ 0x1b80, 0x54002f37,
+ 0x1b80, 0xe3022f45,
+ 0x1b80, 0xe3022f47,
+ 0x1b80, 0xbf1d2f55,
+ 0x1b80, 0xbf1d2f57,
+ 0x1b80, 0x30192f65,
+ 0x1b80, 0x30192f67,
+ 0x1b80, 0xe2d72f75,
+ 0x1b80, 0xe2d72f77,
+ 0x1b80, 0xe2dc2f85,
+ 0x1b80, 0xe2dc2f87,
+ 0x1b80, 0xe2e02f95,
+ 0x1b80, 0xe2e02f97,
+ 0x1b80, 0xe2e72fa5,
+ 0x1b80, 0xe2e72fa7,
+ 0x1b80, 0xe3412fb5,
+ 0x1b80, 0xe3412fb7,
+ 0x1b80, 0x55132fc5,
+ 0x1b80, 0x55132fc7,
+ 0x1b80, 0xe2e32fd5,
+ 0x1b80, 0xe2e32fd7,
+ 0x1b80, 0x55152fe5,
+ 0x1b80, 0x55152fe7,
+ 0x1b80, 0xe2e72ff5,
+ 0x1b80, 0xe2e72ff7,
+ 0x1b80, 0xe3413005,
+ 0x1b80, 0xe3413007,
+ 0x1b80, 0x00013015,
+ 0x1b80, 0x00013017,
+ 0x1b80, 0x54bf3025,
+ 0x1b80, 0x54bf3027,
+ 0x1b80, 0x54c03035,
+ 0x1b80, 0x54c03037,
+ 0x1b80, 0x54a33045,
+ 0x1b80, 0x54a33047,
+ 0x1b80, 0x54c13055,
+ 0x1b80, 0x54c13057,
+ 0x1b80, 0x54a43065,
+ 0x1b80, 0x54a43067,
+ 0x1b80, 0x4c183075,
+ 0x1b80, 0x4c183077,
+ 0x1b80, 0xbf073085,
+ 0x1b80, 0xbf073087,
+ 0x1b80, 0x54c23095,
+ 0x1b80, 0x54c23097,
+ 0x1b80, 0x54a430a5,
+ 0x1b80, 0x54a430a7,
+ 0x1b80, 0xbf0430b5,
+ 0x1b80, 0xbf0430b7,
+ 0x1b80, 0x54c130c5,
+ 0x1b80, 0x54c130c7,
+ 0x1b80, 0x54a330d5,
+ 0x1b80, 0x54a330d7,
+ 0x1b80, 0xbf0130e5,
+ 0x1b80, 0xbf0130e7,
+ 0x1b80, 0xe34f30f5,
+ 0x1b80, 0xe34f30f7,
+ 0x1b80, 0x54df3105,
+ 0x1b80, 0x54df3107,
+ 0x1b80, 0x00013115,
+ 0x1b80, 0x00013117,
+ 0x1b80, 0x54bf3125,
+ 0x1b80, 0x54bf3127,
+ 0x1b80, 0x54e53135,
+ 0x1b80, 0x54e53137,
+ 0x1b80, 0x050a3145,
+ 0x1b80, 0x050a3147,
+ 0x1b80, 0x54df3155,
+ 0x1b80, 0x54df3157,
+ 0x1b80, 0x00013165,
+ 0x1b80, 0x00013167,
+ 0x1b80, 0x7f403175,
+ 0x1b80, 0x7f403177,
+ 0x1b80, 0x7e003185,
+ 0x1b80, 0x7e003187,
+ 0x1b80, 0x7d003195,
+ 0x1b80, 0x7d003197,
+ 0x1b80, 0x550131a5,
+ 0x1b80, 0x550131a7,
+ 0x1b80, 0x5c3131b5,
+ 0x1b80, 0x5c3131b7,
+ 0x1b80, 0xe2e331c5,
+ 0x1b80, 0xe2e331c7,
+ 0x1b80, 0xe2e731d5,
+ 0x1b80, 0xe2e731d7,
+ 0x1b80, 0x548031e5,
+ 0x1b80, 0x548031e7,
+ 0x1b80, 0x540031f5,
+ 0x1b80, 0x540031f7,
+ 0x1b80, 0x54813205,
+ 0x1b80, 0x54813207,
+ 0x1b80, 0x54003215,
+ 0x1b80, 0x54003217,
+ 0x1b80, 0x54823225,
+ 0x1b80, 0x54823227,
+ 0x1b80, 0x54003235,
+ 0x1b80, 0x54003237,
+ 0x1b80, 0xe3023245,
+ 0x1b80, 0xe3023247,
+ 0x1b80, 0xbfed3255,
+ 0x1b80, 0xbfed3257,
+ 0x1b80, 0x30193265,
+ 0x1b80, 0x30193267,
+ 0x1b80, 0x74023275,
+ 0x1b80, 0x74023277,
+ 0x1b80, 0x003f3285,
+ 0x1b80, 0x003f3287,
+ 0x1b80, 0x74003295,
+ 0x1b80, 0x74003297,
+ 0x1b80, 0x000232a5,
+ 0x1b80, 0x000232a7,
+ 0x1b80, 0x000132b5,
+ 0x1b80, 0x000132b7,
+ 0x1b80, 0x000632c5,
+ 0x1b80, 0x000632c7,
+ 0x1b80, 0x5a8032d5,
+ 0x1b80, 0x5a8032d7,
+ 0x1b80, 0x5a0032e5,
+ 0x1b80, 0x5a0032e7,
+ 0x1b80, 0x920032f5,
+ 0x1b80, 0x920032f7,
+ 0x1b80, 0x00013305,
+ 0x1b80, 0x00013307,
+ 0x1b80, 0x5b8f3315,
+ 0x1b80, 0x5b8f3317,
+ 0x1b80, 0x5b0f3325,
+ 0x1b80, 0x5b0f3327,
+ 0x1b80, 0x91003335,
+ 0x1b80, 0x91003337,
+ 0x1b80, 0x00013345,
+ 0x1b80, 0x00013347,
+ 0x1b80, 0x00063355,
+ 0x1b80, 0x00063357,
+ 0x1b80, 0x5d803365,
+ 0x1b80, 0x5d803367,
+ 0x1b80, 0x5e563375,
+ 0x1b80, 0x5e563377,
+ 0x1b80, 0x00043385,
+ 0x1b80, 0x00043387,
+ 0x1b80, 0x4d083395,
+ 0x1b80, 0x4d083397,
+ 0x1b80, 0x571033a5,
+ 0x1b80, 0x571033a7,
+ 0x1b80, 0x570033b5,
+ 0x1b80, 0x570033b7,
+ 0x1b80, 0x4d0033c5,
+ 0x1b80, 0x4d0033c7,
+ 0x1b80, 0x000633d5,
+ 0x1b80, 0x000633d7,
+ 0x1b80, 0x5d0033e5,
+ 0x1b80, 0x5d0033e7,
+ 0x1b80, 0x000433f5,
+ 0x1b80, 0x000433f7,
+ 0x1b80, 0x00013405,
+ 0x1b80, 0x00013407,
+ 0x1b80, 0x549f3415,
+ 0x1b80, 0x549f3417,
+ 0x1b80, 0x54ff3425,
+ 0x1b80, 0x54ff3427,
+ 0x1b80, 0x54003435,
+ 0x1b80, 0x54003437,
+ 0x1b80, 0x00013445,
+ 0x1b80, 0x00013447,
+ 0x1b80, 0x5c313455,
+ 0x1b80, 0x5c313457,
+ 0x1b80, 0x07143465,
+ 0x1b80, 0x07143467,
+ 0x1b80, 0x54003475,
+ 0x1b80, 0x54003477,
+ 0x1b80, 0x5c323485,
+ 0x1b80, 0x5c323487,
+ 0x1b80, 0x00013495,
+ 0x1b80, 0x00013497,
+ 0x1b80, 0x5c3234a5,
+ 0x1b80, 0x5c3234a7,
+ 0x1b80, 0x071434b5,
+ 0x1b80, 0x071434b7,
+ 0x1b80, 0x540034c5,
+ 0x1b80, 0x540034c7,
+ 0x1b80, 0x5c3134d5,
+ 0x1b80, 0x5c3134d7,
+ 0x1b80, 0x000134e5,
+ 0x1b80, 0x000134e7,
+ 0x1b80, 0x4c9834f5,
+ 0x1b80, 0x4c9834f7,
+ 0x1b80, 0x4c183505,
+ 0x1b80, 0x4c183507,
+ 0x1b80, 0x00013515,
+ 0x1b80, 0x00013517,
+ 0x1b80, 0x5c323525,
+ 0x1b80, 0x5c323527,
+ 0x1b80, 0x62043535,
+ 0x1b80, 0x62043537,
+ 0x1b80, 0x63033545,
+ 0x1b80, 0x63033547,
+ 0x1b80, 0x66073555,
+ 0x1b80, 0x66073557,
+ 0x1b80, 0x7b403565,
+ 0x1b80, 0x7b403567,
+ 0x1b80, 0x7a003575,
+ 0x1b80, 0x7a003577,
+ 0x1b80, 0x79003585,
+ 0x1b80, 0x79003587,
+ 0x1b80, 0x7f403595,
+ 0x1b80, 0x7f403597,
+ 0x1b80, 0x7e0035a5,
+ 0x1b80, 0x7e0035a7,
+ 0x1b80, 0x7d0035b5,
+ 0x1b80, 0x7d0035b7,
+ 0x1b80, 0x090135c5,
+ 0x1b80, 0x090135c7,
+ 0x1b80, 0x0c0135d5,
+ 0x1b80, 0x0c0135d7,
+ 0x1b80, 0x0ba635e5,
+ 0x1b80, 0x0ba635e7,
+ 0x1b80, 0x000135f5,
+ 0x1b80, 0x000135f7,
+ 0x1b80, 0x00000006,
+ 0x1b80, 0x00000002,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8822c_array_mp_cal_init, rtw_phy_cfg_bb);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.h b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.h
new file mode 100644
index 000000000000..06e207dd8e5f
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW8822C_TABLE_H__
+#define __RTW8822C_TABLE_H__
+
+extern const struct rtw_table rtw8822c_mac_tbl;
+extern const struct rtw_table rtw8822c_agc_tbl;
+extern const struct rtw_table rtw8822c_bb_tbl;
+extern const struct rtw_table rtw8822c_bb_pg_type0_tbl;
+extern const struct rtw_table rtw8822c_rf_a_tbl;
+extern const struct rtw_table rtw8822c_rf_b_tbl;
+extern const struct rtw_table rtw8822c_txpwr_lmt_type0_tbl;
+extern const struct rtw_table rtw8822c_array_mp_cal_init_tbl;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rx.c b/drivers/net/wireless/realtek/rtw88/rx.c
new file mode 100644
index 000000000000..4d837f0c6d5f
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rx.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "rx.h"
+#include "ps.h"
+
+void rtw_rx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ struct rtw_vif *rtwvif;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (!ieee80211_is_data(hdr->frame_control))
+ return;
+
+ if (!is_broadcast_ether_addr(hdr->addr1) &&
+ !is_multicast_ether_addr(hdr->addr1)) {
+ rtwdev->stats.rx_unicast += skb->len;
+ rtwdev->stats.rx_cnt++;
+ if (vif) {
+ rtwvif = (struct rtw_vif *)vif->drv_priv;
+ rtwvif->stats.rx_unicast += skb->len;
+ rtwvif->stats.rx_cnt++;
+ if (rtwvif->stats.rx_cnt > RTW_LPS_THRESHOLD)
+ rtw_leave_lps_irqsafe(rtwdev, rtwvif);
+ }
+ }
+}
+EXPORT_SYMBOL(rtw_rx_stats);
+
+struct rtw_rx_addr_match_data {
+ struct rtw_dev *rtwdev;
+ struct ieee80211_hdr *hdr;
+ struct rtw_rx_pkt_stat *pkt_stat;
+ u8 *bssid;
+};
+
+static void rtw_rx_addr_match_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_rx_addr_match_data *iter_data = data;
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr *hdr = iter_data->hdr;
+ struct rtw_dev *rtwdev = iter_data->rtwdev;
+ struct rtw_sta_info *si;
+ struct rtw_rx_pkt_stat *pkt_stat = iter_data->pkt_stat;
+ u8 *bssid = iter_data->bssid;
+
+ if (ether_addr_equal(vif->bss_conf.bssid, bssid) &&
+ (ether_addr_equal(vif->addr, hdr->addr1) ||
+ ieee80211_is_beacon(hdr->frame_control)))
+ sta = ieee80211_find_sta_by_ifaddr(rtwdev->hw, hdr->addr2,
+ vif->addr);
+ else
+ return;
+
+ if (!sta)
+ return;
+
+ si = (struct rtw_sta_info *)sta->drv_priv;
+ ewma_rssi_add(&si->avg_rssi, pkt_stat->rssi);
+}
+
+static void rtw_rx_addr_match(struct rtw_dev *rtwdev,
+ struct rtw_rx_pkt_stat *pkt_stat,
+ struct ieee80211_hdr *hdr)
+{
+ struct rtw_rx_addr_match_data data = {};
+
+ if (pkt_stat->crc_err || pkt_stat->icv_err || !pkt_stat->phy_status ||
+ ieee80211_is_ctl(hdr->frame_control))
+ return;
+
+ data.rtwdev = rtwdev;
+ data.hdr = hdr;
+ data.pkt_stat = pkt_stat;
+ data.bssid = get_hdr_bssid(hdr);
+
+ rtw_iterate_vifs_atomic(rtwdev, rtw_rx_addr_match_iter, &data);
+}
+
+void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev,
+ struct rtw_rx_pkt_stat *pkt_stat,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *rx_status,
+ u8 *phy_status)
+{
+ struct ieee80211_hw *hw = rtwdev->hw;
+
+ memset(rx_status, 0, sizeof(*rx_status));
+ rx_status->freq = hw->conf.chandef.chan->center_freq;
+ rx_status->band = hw->conf.chandef.chan->band;
+ if (pkt_stat->crc_err)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (pkt_stat->decrypted)
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+
+ if (pkt_stat->rate >= DESC_RATEVHT1SS_MCS0)
+ rx_status->encoding = RX_ENC_VHT;
+ else if (pkt_stat->rate >= DESC_RATEMCS0)
+ rx_status->encoding = RX_ENC_HT;
+
+ if (pkt_stat->rate >= DESC_RATEVHT1SS_MCS0 &&
+ pkt_stat->rate <= DESC_RATEVHT1SS_MCS9) {
+ rx_status->nss = 1;
+ rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT1SS_MCS0;
+ } else if (pkt_stat->rate >= DESC_RATEVHT2SS_MCS0 &&
+ pkt_stat->rate <= DESC_RATEVHT2SS_MCS9) {
+ rx_status->nss = 2;
+ rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT2SS_MCS0;
+ } else if (pkt_stat->rate >= DESC_RATEVHT3SS_MCS0 &&
+ pkt_stat->rate <= DESC_RATEVHT3SS_MCS9) {
+ rx_status->nss = 3;
+ rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT3SS_MCS0;
+ } else if (pkt_stat->rate >= DESC_RATEVHT4SS_MCS0 &&
+ pkt_stat->rate <= DESC_RATEVHT4SS_MCS9) {
+ rx_status->nss = 4;
+ rx_status->rate_idx = pkt_stat->rate - DESC_RATEVHT4SS_MCS0;
+ } else if (pkt_stat->rate >= DESC_RATEMCS0 &&
+ pkt_stat->rate <= DESC_RATEMCS15) {
+ rx_status->rate_idx = pkt_stat->rate - DESC_RATEMCS0;
+ } else if (rx_status->band == NL80211_BAND_5GHZ &&
+ pkt_stat->rate >= DESC_RATE6M &&
+ pkt_stat->rate <= DESC_RATE54M) {
+ rx_status->rate_idx = pkt_stat->rate - DESC_RATE6M;
+ } else if (rx_status->band == NL80211_BAND_2GHZ &&
+ pkt_stat->rate >= DESC_RATE1M &&
+ pkt_stat->rate <= DESC_RATE54M) {
+ rx_status->rate_idx = pkt_stat->rate - DESC_RATE1M;
+ } else {
+ rx_status->rate_idx = 0;
+ }
+
+ rx_status->flag |= RX_FLAG_MACTIME_START;
+ rx_status->mactime = pkt_stat->tsf_low;
+
+ if (pkt_stat->bw == RTW_CHANNEL_WIDTH_80)
+ rx_status->bw = RATE_INFO_BW_80;
+ else if (pkt_stat->bw == RTW_CHANNEL_WIDTH_40)
+ rx_status->bw = RATE_INFO_BW_40;
+ else
+ rx_status->bw = RATE_INFO_BW_20;
+
+ rx_status->signal = pkt_stat->signal_power;
+
+ rtw_rx_addr_match(rtwdev, pkt_stat, hdr);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/rx.h b/drivers/net/wireless/realtek/rtw88/rx.h
new file mode 100644
index 000000000000..383f3b2babc1
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rx.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_RX_H_
+#define __RTW_RX_H_
+
+#define GET_RX_DESC_PHYST(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x00), BIT(26))
+#define GET_RX_DESC_ICV_ERR(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x00), BIT(15))
+#define GET_RX_DESC_CRC32(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x00), BIT(14))
+#define GET_RX_DESC_SWDEC(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x00), BIT(27))
+#define GET_RX_DESC_C2H(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x02), BIT(28))
+#define GET_RX_DESC_PKT_LEN(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x00), GENMASK(13, 0))
+#define GET_RX_DESC_DRV_INFO_SIZE(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x00), GENMASK(19, 16))
+#define GET_RX_DESC_SHIFT(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x00), GENMASK(25, 24))
+#define GET_RX_DESC_RX_RATE(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x03), GENMASK(6, 0))
+#define GET_RX_DESC_MACID(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x01), GENMASK(6, 0))
+#define GET_RX_DESC_PPDU_CNT(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x02), GENMASK(30, 29))
+#define GET_RX_DESC_TSFL(rxdesc) \
+ le32_get_bits(*((__le32 *)(rxdesc) + 0x05), GENMASK(31, 0))
+
+void rtw_rx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct sk_buff *skb);
+void rtw_rx_fill_rx_status(struct rtw_dev *rtwdev,
+ struct rtw_rx_pkt_stat *pkt_stat,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *rx_status,
+ u8 *phy_status);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/sec.c b/drivers/net/wireless/realtek/rtw88/sec.c
new file mode 100644
index 000000000000..c594fc02804d
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/sec.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "sec.h"
+#include "reg.h"
+
+int rtw_sec_get_free_cam(struct rtw_sec_desc *sec)
+{
+ /* if default key search is enabled, the first 4 cam entries
+ * are used to direct map to group key with its key->key_idx, so
+ * driver should use cam entries after 4 to install pairwise key
+ */
+ if (sec->default_key_search)
+ return find_next_zero_bit(sec->cam_map, RTW_MAX_SEC_CAM_NUM,
+ RTW_SEC_DEFAULT_KEY_NUM);
+
+ return find_first_zero_bit(sec->cam_map, RTW_MAX_SEC_CAM_NUM);
+}
+
+void rtw_sec_write_cam(struct rtw_dev *rtwdev,
+ struct rtw_sec_desc *sec,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ u8 hw_key_type, u8 hw_key_idx)
+{
+ struct rtw_cam_entry *cam = &sec->cam_table[hw_key_idx];
+ u32 write_cmd;
+ u32 command;
+ u32 content;
+ u32 addr;
+ int i, j;
+
+ set_bit(hw_key_idx, sec->cam_map);
+ cam->valid = true;
+ cam->group = !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+ cam->hw_key_type = hw_key_type;
+ cam->key = key;
+ if (sta)
+ ether_addr_copy(cam->addr, sta->addr);
+ else
+ eth_broadcast_addr(cam->addr);
+
+ write_cmd = RTW_SEC_CMD_WRITE_ENABLE | RTW_SEC_CMD_POLLING;
+ addr = hw_key_idx << RTW_SEC_CAM_ENTRY_SHIFT;
+ for (i = 5; i >= 0; i--) {
+ switch (i) {
+ case 0:
+ content = ((key->keyidx & 0x3)) |
+ ((hw_key_type & 0x7) << 2) |
+ (cam->group << 6) |
+ (cam->valid << 15) |
+ (cam->addr[0] << 16) |
+ (cam->addr[1] << 24);
+ break;
+ case 1:
+ content = (cam->addr[2]) |
+ (cam->addr[3] << 8) |
+ (cam->addr[4] << 16) |
+ (cam->addr[5] << 24);
+ break;
+ default:
+ j = (i - 2) << 2;
+ content = (key->key[j]) |
+ (key->key[j + 1] << 8) |
+ (key->key[j + 2] << 16) |
+ (key->key[j + 3] << 24);
+ break;
+ }
+
+ command = write_cmd | (addr + i);
+ rtw_write32(rtwdev, RTW_SEC_WRITE_REG, content);
+ rtw_write32(rtwdev, RTW_SEC_CMD_REG, command);
+ }
+}
+
+void rtw_sec_clear_cam(struct rtw_dev *rtwdev,
+ struct rtw_sec_desc *sec,
+ u8 hw_key_idx)
+{
+ struct rtw_cam_entry *cam = &sec->cam_table[hw_key_idx];
+ u32 write_cmd;
+ u32 command;
+ u32 addr;
+
+ clear_bit(hw_key_idx, sec->cam_map);
+ cam->valid = false;
+ cam->key = NULL;
+ eth_zero_addr(cam->addr);
+
+ write_cmd = RTW_SEC_CMD_WRITE_ENABLE | RTW_SEC_CMD_POLLING;
+ addr = hw_key_idx << RTW_SEC_CAM_ENTRY_SHIFT;
+ command = write_cmd | addr;
+ rtw_write32(rtwdev, RTW_SEC_WRITE_REG, 0);
+ rtw_write32(rtwdev, RTW_SEC_CMD_REG, command);
+}
+
+void rtw_sec_enable_sec_engine(struct rtw_dev *rtwdev)
+{
+ struct rtw_sec_desc *sec = &rtwdev->sec;
+ u16 ctrl_reg;
+ u16 sec_config;
+
+ /* default use default key search for now */
+ sec->default_key_search = true;
+
+ ctrl_reg = rtw_read16(rtwdev, REG_CR);
+ ctrl_reg |= RTW_SEC_ENGINE_EN;
+ rtw_write16(rtwdev, REG_CR, ctrl_reg);
+
+ sec_config = rtw_read16(rtwdev, RTW_SEC_CONFIG);
+
+ sec_config |= RTW_SEC_TX_DEC_EN | RTW_SEC_RX_DEC_EN;
+ if (sec->default_key_search)
+ sec_config |= RTW_SEC_TX_UNI_USE_DK | RTW_SEC_RX_UNI_USE_DK |
+ RTW_SEC_TX_BC_USE_DK | RTW_SEC_RX_BC_USE_DK;
+
+ rtw_write16(rtwdev, RTW_SEC_CONFIG, sec_config);
+}
diff --git a/drivers/net/wireless/realtek/rtw88/sec.h b/drivers/net/wireless/realtek/rtw88/sec.h
new file mode 100644
index 000000000000..8c50a895c797
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/sec.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_SEC_H_
+#define __RTW_SEC_H_
+
+#define RTW_SEC_CMD_REG 0x670
+#define RTW_SEC_WRITE_REG 0x674
+#define RTW_SEC_READ_REG 0x678
+#define RTW_SEC_CONFIG 0x680
+
+#define RTW_SEC_CAM_ENTRY_SHIFT 3
+#define RTW_SEC_DEFAULT_KEY_NUM 4
+#define RTW_SEC_CMD_WRITE_ENABLE BIT(16)
+#define RTW_SEC_CMD_CLEAR BIT(30)
+#define RTW_SEC_CMD_POLLING BIT(31)
+
+#define RTW_SEC_TX_UNI_USE_DK BIT(0)
+#define RTW_SEC_RX_UNI_USE_DK BIT(1)
+#define RTW_SEC_TX_DEC_EN BIT(2)
+#define RTW_SEC_RX_DEC_EN BIT(3)
+#define RTW_SEC_TX_BC_USE_DK BIT(6)
+#define RTW_SEC_RX_BC_USE_DK BIT(7)
+
+#define RTW_SEC_ENGINE_EN BIT(9)
+
+int rtw_sec_get_free_cam(struct rtw_sec_desc *sec);
+void rtw_sec_write_cam(struct rtw_dev *rtwdev,
+ struct rtw_sec_desc *sec,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ u8 hw_key_type, u8 hw_key_idx);
+void rtw_sec_clear_cam(struct rtw_dev *rtwdev,
+ struct rtw_sec_desc *sec,
+ u8 hw_key_idx);
+void rtw_sec_enable_sec_engine(struct rtw_dev *rtwdev);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
new file mode 100644
index 000000000000..e32faf8bead9
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "tx.h"
+#include "fw.h"
+#include "ps.h"
+
+static
+void rtw_tx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ struct rtw_vif *rtwvif;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (!ieee80211_is_data(hdr->frame_control))
+ return;
+
+ if (!is_broadcast_ether_addr(hdr->addr1) &&
+ !is_multicast_ether_addr(hdr->addr1)) {
+ rtwdev->stats.tx_unicast += skb->len;
+ rtwdev->stats.tx_cnt++;
+ if (vif) {
+ rtwvif = (struct rtw_vif *)vif->drv_priv;
+ rtwvif->stats.tx_unicast += skb->len;
+ rtwvif->stats.tx_cnt++;
+ if (rtwvif->stats.tx_cnt > RTW_LPS_THRESHOLD)
+ rtw_leave_lps_irqsafe(rtwdev, rtwvif);
+ }
+ }
+}
+
+void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb)
+{
+ __le32 *txdesc = (__le32 *)skb->data;
+
+ SET_TX_DESC_TXPKTSIZE(txdesc, pkt_info->tx_pkt_size);
+ SET_TX_DESC_OFFSET(txdesc, pkt_info->offset);
+ SET_TX_DESC_PKT_OFFSET(txdesc, pkt_info->pkt_offset);
+ SET_TX_DESC_QSEL(txdesc, pkt_info->qsel);
+ SET_TX_DESC_BMC(txdesc, pkt_info->bmc);
+ SET_TX_DESC_RATE_ID(txdesc, pkt_info->rate_id);
+ SET_TX_DESC_DATARATE(txdesc, pkt_info->rate);
+ SET_TX_DESC_DISDATAFB(txdesc, pkt_info->dis_rate_fallback);
+ SET_TX_DESC_USE_RATE(txdesc, pkt_info->use_rate);
+ SET_TX_DESC_SEC_TYPE(txdesc, pkt_info->sec_type);
+ SET_TX_DESC_DATA_BW(txdesc, pkt_info->bw);
+ SET_TX_DESC_SW_SEQ(txdesc, pkt_info->seq);
+ SET_TX_DESC_MAX_AGG_NUM(txdesc, pkt_info->ampdu_factor);
+ SET_TX_DESC_AMPDU_DENSITY(txdesc, pkt_info->ampdu_density);
+ SET_TX_DESC_DATA_STBC(txdesc, pkt_info->stbc);
+ SET_TX_DESC_DATA_LDPC(txdesc, pkt_info->ldpc);
+ SET_TX_DESC_AGG_EN(txdesc, pkt_info->ampdu_en);
+ SET_TX_DESC_LS(txdesc, pkt_info->ls);
+ SET_TX_DESC_DATA_SHORT(txdesc, pkt_info->short_gi);
+ SET_TX_DESC_SPE_RPT(txdesc, pkt_info->report);
+ SET_TX_DESC_SW_DEFINE(txdesc, pkt_info->sn);
+}
+EXPORT_SYMBOL(rtw_tx_fill_tx_desc);
+
+static u8 get_tx_ampdu_factor(struct ieee80211_sta *sta)
+{
+ u8 exp = sta->ht_cap.ampdu_factor;
+
+ /* the least ampdu factor is 8K, and the value in the tx desc is the
+ * max aggregation num, which represents val * 2 packets can be
+ * aggregated in an AMPDU, so here we should use 8/2=4 as the base
+ */
+ return (BIT(2) << exp) - 1;
+}
+
+static u8 get_tx_ampdu_density(struct ieee80211_sta *sta)
+{
+ return sta->ht_cap.ampdu_density;
+}
+
+static u8 get_highest_ht_tx_rate(struct rtw_dev *rtwdev,
+ struct ieee80211_sta *sta)
+{
+ u8 rate;
+
+ if (rtwdev->hal.rf_type == RF_2T2R && sta->ht_cap.mcs.rx_mask[1] != 0)
+ rate = DESC_RATEMCS15;
+ else
+ rate = DESC_RATEMCS7;
+
+ return rate;
+}
+
+static u8 get_highest_vht_tx_rate(struct rtw_dev *rtwdev,
+ struct ieee80211_sta *sta)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u8 rate;
+ u16 tx_mcs_map;
+
+ tx_mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.tx_mcs_map);
+ if (efuse->hw_cap.nss == 1) {
+ switch (tx_mcs_map & 0x3) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7:
+ rate = DESC_RATEVHT1SS_MCS7;
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8:
+ rate = DESC_RATEVHT1SS_MCS8;
+ break;
+ default:
+ case IEEE80211_VHT_MCS_SUPPORT_0_9:
+ rate = DESC_RATEVHT1SS_MCS9;
+ break;
+ }
+ } else if (efuse->hw_cap.nss >= 2) {
+ switch ((tx_mcs_map & 0xc) >> 2) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7:
+ rate = DESC_RATEVHT2SS_MCS7;
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8:
+ rate = DESC_RATEVHT2SS_MCS8;
+ break;
+ default:
+ case IEEE80211_VHT_MCS_SUPPORT_0_9:
+ rate = DESC_RATEVHT2SS_MCS9;
+ break;
+ }
+ } else {
+ rate = DESC_RATEVHT1SS_MCS9;
+ }
+
+ return rate;
+}
+
+static void rtw_tx_report_enable(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info)
+{
+ struct rtw_tx_report *tx_report = &rtwdev->tx_report;
+
+ /* [11:8], reserved, fills with zero
+ * [7:2], tx report sequence number
+ * [1:0], firmware use, fills with zero
+ */
+ pkt_info->sn = (atomic_inc_return(&tx_report->sn) << 2) & 0xfc;
+ pkt_info->report = true;
+}
+
+void rtw_tx_report_purge_timer(struct timer_list *t)
+{
+ struct rtw_dev *rtwdev = from_timer(rtwdev, t, tx_report.purge_timer);
+ struct rtw_tx_report *tx_report = &rtwdev->tx_report;
+ unsigned long flags;
+
+ if (skb_queue_len(&tx_report->queue) == 0)
+ return;
+
+ WARN(1, "purge skb(s) not reported by firmware\n");
+
+ spin_lock_irqsave(&tx_report->q_lock, flags);
+ skb_queue_purge(&tx_report->queue);
+ spin_unlock_irqrestore(&tx_report->q_lock, flags);
+}
+
+void rtw_tx_report_enqueue(struct rtw_dev *rtwdev, struct sk_buff *skb, u8 sn)
+{
+ struct rtw_tx_report *tx_report = &rtwdev->tx_report;
+ unsigned long flags;
+ u8 *drv_data;
+
+ /* pass sn to tx report handler through driver data */
+ drv_data = (u8 *)IEEE80211_SKB_CB(skb)->status.status_driver_data;
+ *drv_data = sn;
+
+ spin_lock_irqsave(&tx_report->q_lock, flags);
+ __skb_queue_tail(&tx_report->queue, skb);
+ spin_unlock_irqrestore(&tx_report->q_lock, flags);
+
+ mod_timer(&tx_report->purge_timer, jiffies + RTW_TX_PROBE_TIMEOUT);
+}
+EXPORT_SYMBOL(rtw_tx_report_enqueue);
+
+static void rtw_tx_report_tx_status(struct rtw_dev *rtwdev,
+ struct sk_buff *skb, bool acked)
+{
+ struct ieee80211_tx_info *info;
+
+ info = IEEE80211_SKB_CB(skb);
+ ieee80211_tx_info_clear_status(info);
+ if (acked)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ else
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
+
+ ieee80211_tx_status_irqsafe(rtwdev->hw, skb);
+}
+
+void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
+{
+ struct rtw_tx_report *tx_report = &rtwdev->tx_report;
+ struct rtw_c2h_cmd *c2h;
+ struct sk_buff *cur, *tmp;
+ unsigned long flags;
+ u8 sn, st;
+ u8 *n;
+
+ c2h = get_c2h_from_skb(skb);
+
+ sn = GET_CCX_REPORT_SEQNUM(c2h->payload);
+ st = GET_CCX_REPORT_STATUS(c2h->payload);
+
+ spin_lock_irqsave(&tx_report->q_lock, flags);
+ skb_queue_walk_safe(&tx_report->queue, cur, tmp) {
+ n = (u8 *)IEEE80211_SKB_CB(cur)->status.status_driver_data;
+ if (*n == sn) {
+ __skb_unlink(cur, &tx_report->queue);
+ rtw_tx_report_tx_status(rtwdev, cur, st == 0);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&tx_report->q_lock, flags);
+}
+
+static void rtw_tx_mgmt_pkt_info_update(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ pkt_info->use_rate = true;
+ pkt_info->rate_id = 6;
+ pkt_info->dis_rate_fallback = true;
+}
+
+static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ieee80211_sta *sta = control->sta;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct rtw_sta_info *si;
+ u16 seq;
+ u8 ampdu_factor = 0;
+ u8 ampdu_density = 0;
+ bool ampdu_en = false;
+ u8 rate = DESC_RATE6M;
+ u8 rate_id = 6;
+ u8 bw = RTW_CHANNEL_WIDTH_20;
+ bool stbc = false;
+ bool ldpc = false;
+
+ seq = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+
+ /* for broadcast/multicast, use default values */
+ if (!sta)
+ goto out;
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ ampdu_en = true;
+ ampdu_factor = get_tx_ampdu_factor(sta);
+ ampdu_density = get_tx_ampdu_density(sta);
+ }
+
+ if (sta->vht_cap.vht_supported)
+ rate = get_highest_vht_tx_rate(rtwdev, sta);
+ else if (sta->ht_cap.ht_supported)
+ rate = get_highest_ht_tx_rate(rtwdev, sta);
+ else if (sta->supp_rates[0] <= 0xf)
+ rate = DESC_RATE11M;
+ else
+ rate = DESC_RATE54M;
+
+ si = (struct rtw_sta_info *)sta->drv_priv;
+
+ bw = si->bw_mode;
+ rate_id = si->rate_id;
+ stbc = si->stbc_en;
+ ldpc = si->ldpc_en;
+
+out:
+ pkt_info->seq = seq;
+ pkt_info->ampdu_factor = ampdu_factor;
+ pkt_info->ampdu_density = ampdu_density;
+ pkt_info->ampdu_en = ampdu_en;
+ pkt_info->rate = rate;
+ pkt_info->rate_id = rate_id;
+ pkt_info->bw = bw;
+ pkt_info->stbc = stbc;
+ pkt_info->ldpc = ldpc;
+}
+
+void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct rtw_sta_info *si;
+ struct ieee80211_vif *vif = NULL;
+ __le16 fc = hdr->frame_control;
+ u8 sec_type = 0;
+ bool bmc;
+
+ if (control->sta) {
+ si = (struct rtw_sta_info *)control->sta->drv_priv;
+ vif = si->vif;
+ }
+
+ if (ieee80211_is_mgmt(fc) || ieee80211_is_nullfunc(fc))
+ rtw_tx_mgmt_pkt_info_update(rtwdev, pkt_info, control, skb);
+ else if (ieee80211_is_data(fc))
+ rtw_tx_data_pkt_info_update(rtwdev, pkt_info, control, skb);
+
+ if (info->control.hw_key) {
+ struct ieee80211_key_conf *key = info->control.hw_key;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ sec_type = 0x01;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ sec_type = 0x03;
+ break;
+ default:
+ break;
+ }
+ }
+
+ bmc = is_broadcast_ether_addr(hdr->addr1) ||
+ is_multicast_ether_addr(hdr->addr1);
+
+ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
+ rtw_tx_report_enable(rtwdev, pkt_info);
+
+ pkt_info->bmc = bmc;
+ pkt_info->sec_type = sec_type;
+ pkt_info->tx_pkt_size = skb->len;
+ pkt_info->offset = chip->tx_pkt_desc_sz;
+ pkt_info->qsel = skb->priority;
+ pkt_info->ls = true;
+
+ /* maybe merge with tx status ? */
+ rtw_tx_stats(rtwdev, vif, skb);
+}
+
+void rtw_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct sk_buff *skb)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ bool bmc;
+
+ bmc = is_broadcast_ether_addr(hdr->addr1) ||
+ is_multicast_ether_addr(hdr->addr1);
+ pkt_info->use_rate = true;
+ pkt_info->rate_id = 6;
+ pkt_info->dis_rate_fallback = true;
+ pkt_info->bmc = bmc;
+ pkt_info->tx_pkt_size = skb->len;
+ pkt_info->offset = chip->tx_pkt_desc_sz;
+ pkt_info->qsel = skb->priority;
+ pkt_info->ls = true;
+}
diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h
new file mode 100644
index 000000000000..8338dbf55576
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/tx.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_TX_H_
+#define __RTW_TX_H_
+
+#define RTK_TX_MAX_AGG_NUM_MASK 0x1f
+
+#define RTW_TX_PROBE_TIMEOUT msecs_to_jiffies(500)
+
+#define SET_TX_DESC_TXPKTSIZE(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x00, value, GENMASK(15, 0))
+#define SET_TX_DESC_OFFSET(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x00, value, GENMASK(23, 16))
+#define SET_TX_DESC_PKT_OFFSET(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x01, value, GENMASK(28, 24))
+#define SET_TX_DESC_QSEL(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x01, value, GENMASK(12, 8))
+#define SET_TX_DESC_BMC(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x00, value, BIT(24))
+#define SET_TX_DESC_RATE_ID(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x01, value, GENMASK(20, 16))
+#define SET_TX_DESC_DATARATE(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x04, value, GENMASK(6, 0))
+#define SET_TX_DESC_DISDATAFB(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, BIT(10))
+#define SET_TX_DESC_USE_RATE(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, BIT(8))
+#define SET_TX_DESC_SEC_TYPE(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x01, value, GENMASK(23, 22))
+#define SET_TX_DESC_DATA_BW(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x05, value, GENMASK(6, 5))
+#define SET_TX_DESC_SW_SEQ(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x09, value, GENMASK(23, 12))
+#define SET_TX_DESC_MAX_AGG_NUM(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, GENMASK(21, 17))
+#define SET_TX_DESC_AMPDU_DENSITY(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x02, value, GENMASK(22, 20))
+#define SET_TX_DESC_DATA_STBC(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x05, value, GENMASK(9, 8))
+#define SET_TX_DESC_DATA_LDPC(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x05, value, BIT(7))
+#define SET_TX_DESC_AGG_EN(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x02, value, BIT(12))
+#define SET_TX_DESC_LS(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x00, value, BIT(26))
+#define SET_TX_DESC_DATA_SHORT(txdesc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x05, value, BIT(4))
+#define SET_TX_DESC_SPE_RPT(tx_desc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x02, value, BIT(19))
+#define SET_TX_DESC_SW_DEFINE(tx_desc, value) \
+ le32p_replace_bits((__le32 *)(txdesc) + 0x06, value, GENMASK(11, 0))
+
+enum rtw_tx_desc_queue_select {
+ TX_DESC_QSEL_TID0 = 0,
+ TX_DESC_QSEL_TID1 = 1,
+ TX_DESC_QSEL_TID2 = 2,
+ TX_DESC_QSEL_TID3 = 3,
+ TX_DESC_QSEL_TID4 = 4,
+ TX_DESC_QSEL_TID5 = 5,
+ TX_DESC_QSEL_TID6 = 6,
+ TX_DESC_QSEL_TID7 = 7,
+ TX_DESC_QSEL_TID8 = 8,
+ TX_DESC_QSEL_TID9 = 9,
+ TX_DESC_QSEL_TID10 = 10,
+ TX_DESC_QSEL_TID11 = 11,
+ TX_DESC_QSEL_TID12 = 12,
+ TX_DESC_QSEL_TID13 = 13,
+ TX_DESC_QSEL_TID14 = 14,
+ TX_DESC_QSEL_TID15 = 15,
+ TX_DESC_QSEL_BEACON = 16,
+ TX_DESC_QSEL_HIGH = 17,
+ TX_DESC_QSEL_MGMT = 18,
+ TX_DESC_QSEL_H2C = 19,
+};
+
+void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb);
+void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb);
+void rtw_tx_report_enqueue(struct rtw_dev *rtwdev, struct sk_buff *skb, u8 sn);
+void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb);
+void rtw_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ struct sk_buff *skb);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/util.c b/drivers/net/wireless/realtek/rtw88/util.c
new file mode 100644
index 000000000000..212070c2baa8
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/util.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "util.h"
+#include "reg.h"
+
+bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target)
+{
+ u32 cnt;
+
+ for (cnt = 0; cnt < 1000; cnt++) {
+ if (rtw_read32_mask(rtwdev, addr, mask) == target)
+ return true;
+
+ udelay(10);
+ }
+
+ return false;
+}
+
+bool ltecoex_read_reg(struct rtw_dev *rtwdev, u16 offset, u32 *val)
+{
+ if (!check_hw_ready(rtwdev, LTECOEX_ACCESS_CTRL, LTECOEX_READY, 1))
+ return false;
+
+ rtw_write32(rtwdev, LTECOEX_ACCESS_CTRL, 0x800F0000 | offset);
+ *val = rtw_read32(rtwdev, LTECOEX_READ_DATA);
+
+ return true;
+}
+
+bool ltecoex_reg_write(struct rtw_dev *rtwdev, u16 offset, u32 value)
+{
+ if (!check_hw_ready(rtwdev, LTECOEX_ACCESS_CTRL, LTECOEX_READY, 1))
+ return false;
+
+ rtw_write32(rtwdev, LTECOEX_WRITE_DATA, value);
+ rtw_write32(rtwdev, LTECOEX_ACCESS_CTRL, 0xC00F0000 | offset);
+
+ return true;
+}
+
+void rtw_restore_reg(struct rtw_dev *rtwdev,
+ struct rtw_backup_info *bckp, u32 num)
+{
+ u8 len;
+ u32 reg;
+ u32 val;
+ int i;
+
+ for (i = 0; i < num; i++, bckp++) {
+ len = bckp->len;
+ reg = bckp->reg;
+ val = bckp->val;
+
+ switch (len) {
+ case 1:
+ rtw_write8(rtwdev, reg, (u8)val);
+ break;
+ case 2:
+ rtw_write16(rtwdev, reg, (u16)val);
+ break;
+ case 4:
+ rtw_write32(rtwdev, reg, (u32)val);
+ break;
+ default:
+ break;
+ }
+ }
+}
diff --git a/drivers/net/wireless/realtek/rtw88/util.h b/drivers/net/wireless/realtek/rtw88/util.h
new file mode 100644
index 000000000000..7bd2843b0bce
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/util.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW_UTIL_H__
+#define __RTW_UTIL_H__
+
+struct rtw_dev;
+
+#define rtw_iterate_vifs(rtwdev, iterator, data) \
+ ieee80211_iterate_active_interfaces(rtwdev->hw, \
+ IEEE80211_IFACE_ITER_NORMAL, iterator, data)
+#define rtw_iterate_vifs_atomic(rtwdev, iterator, data) \
+ ieee80211_iterate_active_interfaces_atomic(rtwdev->hw, \
+ IEEE80211_IFACE_ITER_NORMAL, iterator, data)
+#define rtw_iterate_stas_atomic(rtwdev, iterator, data) \
+ ieee80211_iterate_stations_atomic(rtwdev->hw, iterator, data)
+
+static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr)
+{
+ __le16 fc = hdr->frame_control;
+ u8 *bssid;
+
+ if (ieee80211_has_tods(fc))
+ bssid = hdr->addr1;
+ else if (ieee80211_has_fromds(fc))
+ bssid = hdr->addr2;
+ else
+ bssid = hdr->addr3;
+
+ return bssid;
+}
+
+#endif
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 51e4e92d95a0..e07a1152cec1 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1707,7 +1707,7 @@ static struct ndis_80211_pmkid *get_device_pmkids(struct usbnet *usbdev)
int len, ret, max_pmkids;
max_pmkids = priv->wdev.wiphy->max_num_pmkids;
- len = sizeof(*pmkids) + max_pmkids * sizeof(pmkids->bssid_info[0]);
+ len = struct_size(pmkids, bssid_info, max_pmkids);
pmkids = kzalloc(len, GFP_KERNEL);
if (!pmkids)
@@ -1740,7 +1740,7 @@ static int set_device_pmkids(struct usbnet *usbdev,
int ret, len, num_pmkids;
num_pmkids = le32_to_cpu(pmkids->bssid_info_count);
- len = sizeof(*pmkids) + num_pmkids * sizeof(pmkids->bssid_info[0]);
+ len = struct_size(pmkids, bssid_info, num_pmkids);
pmkids->length = cpu_to_le32(len);
debug_print_pmkids(usbdev, pmkids, __func__);
@@ -1761,7 +1761,7 @@ static struct ndis_80211_pmkid *remove_pmkid(struct usbnet *usbdev,
struct cfg80211_pmksa *pmksa,
int max_pmkids)
{
- int i, newlen, err;
+ int i, err;
unsigned int count;
count = le32_to_cpu(pmkids->bssid_info_count);
@@ -1786,9 +1786,7 @@ static struct ndis_80211_pmkid *remove_pmkid(struct usbnet *usbdev,
pmkids->bssid_info[i] = pmkids->bssid_info[i + 1];
count--;
- newlen = sizeof(*pmkids) + count * sizeof(pmkids->bssid_info[0]);
-
- pmkids->length = cpu_to_le32(newlen);
+ pmkids->length = cpu_to_le32(struct_size(pmkids, bssid_info, count));
pmkids->bssid_info_count = cpu_to_le32(count);
return pmkids;
@@ -1831,7 +1829,7 @@ static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev,
}
/* add new pmkid */
- newlen = sizeof(*pmkids) + (count + 1) * sizeof(pmkids->bssid_info[0]);
+ newlen = struct_size(pmkids, bssid_info, count + 1);
new_pmkids = krealloc(pmkids, newlen, GFP_KERNEL);
if (!new_pmkids) {
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index 1dbaab2a96b7..f84250bdb8cf 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -31,6 +31,13 @@ static struct ta_metadata metadata_flash_content[] = {
};
+static struct ta_metadata metadata[] = {{"pmemdata_dummy", 0x00000000},
+ {"rsi/rs9116_wlan.rps", 0x00000000},
+ {"rsi/rs9116_wlan_bt_classic.rps", 0x00000000},
+ {"rsi/pmemdata_dummy", 0x00000000},
+ {"rsi/rs9116_wlan_bt_classic.rps", 0x00000000}
+};
+
int rsi_send_pkt_to_bus(struct rsi_common *common, struct sk_buff *skb)
{
struct rsi_hw *adapter = common->priv;
@@ -829,21 +836,18 @@ static int auto_fw_upgrade(struct rsi_hw *adapter, u8 *flash_content,
return 0;
}
-static int rsi_load_firmware(struct rsi_hw *adapter)
+static int rsi_hal_prepare_fwload(struct rsi_hw *adapter)
{
- struct rsi_common *common = adapter->priv;
struct rsi_host_intf_ops *hif_ops = adapter->host_intf_ops;
- const struct firmware *fw_entry = NULL;
- u32 regout_val = 0, content_size;
- u16 tmp_regout_val = 0;
- struct ta_metadata *metadata_p;
+ u32 regout_val = 0;
int status;
bl_start_cmd_timer(adapter, BL_CMD_TIMEOUT);
while (!adapter->blcmd_timer_expired) {
status = hif_ops->master_reg_read(adapter, SWBL_REGOUT,
- &regout_val, 2);
+ &regout_val,
+ RSI_COMMON_REG_SIZE);
if (status < 0) {
rsi_dbg(ERR_ZONE,
"%s: REGOUT read failed\n", __func__);
@@ -865,13 +869,26 @@ static int rsi_load_firmware(struct rsi_hw *adapter)
(regout_val & 0xff));
status = hif_ops->master_reg_write(adapter, SWBL_REGOUT,
- (REGOUT_INVALID | REGOUT_INVALID << 8),
- 2);
- if (status < 0) {
+ (REGOUT_INVALID |
+ REGOUT_INVALID << 8),
+ RSI_COMMON_REG_SIZE);
+ if (status < 0)
rsi_dbg(ERR_ZONE, "%s: REGOUT writing failed..\n", __func__);
- return status;
- }
- mdelay(1);
+ else
+ rsi_dbg(INFO_ZONE,
+ "===> Device is ready to load firmware <===\n");
+
+ return status;
+}
+
+static int rsi_load_9113_firmware(struct rsi_hw *adapter)
+{
+ struct rsi_common *common = adapter->priv;
+ const struct firmware *fw_entry = NULL;
+ u32 content_size;
+ u16 tmp_regout_val = 0;
+ struct ta_metadata *metadata_p;
+ int status;
status = bl_cmd(adapter, CONFIG_AUTO_READ_MODE, CMD_PASS,
"AUTO_READ_CMD");
@@ -902,13 +919,15 @@ static int rsi_load_firmware(struct rsi_hw *adapter)
/* Get the firmware version */
common->lmac_ver.ver.info.fw_ver[0] =
- fw_entry->data[LMAC_VER_OFFSET] & 0xFF;
+ fw_entry->data[LMAC_VER_OFFSET_9113] & 0xFF;
common->lmac_ver.ver.info.fw_ver[1] =
- fw_entry->data[LMAC_VER_OFFSET + 1] & 0xFF;
- common->lmac_ver.major = fw_entry->data[LMAC_VER_OFFSET + 2] & 0xFF;
+ fw_entry->data[LMAC_VER_OFFSET_9113 + 1] & 0xFF;
+ common->lmac_ver.major =
+ fw_entry->data[LMAC_VER_OFFSET_9113 + 2] & 0xFF;
common->lmac_ver.release_num =
- fw_entry->data[LMAC_VER_OFFSET + 3] & 0xFF;
- common->lmac_ver.minor = fw_entry->data[LMAC_VER_OFFSET + 4] & 0xFF;
+ fw_entry->data[LMAC_VER_OFFSET_9113 + 3] & 0xFF;
+ common->lmac_ver.minor =
+ fw_entry->data[LMAC_VER_OFFSET_9113 + 4] & 0xFF;
common->lmac_ver.patch_num = 0;
rsi_print_version(common);
@@ -977,19 +996,161 @@ fail:
return status;
}
+static int rsi_load_9116_firmware(struct rsi_hw *adapter)
+{
+ struct rsi_common *common = adapter->priv;
+ struct rsi_host_intf_ops *hif_ops = adapter->host_intf_ops;
+ const struct firmware *fw_entry;
+ struct ta_metadata *metadata_p;
+ u8 *ta_firmware, *fw_p;
+ struct bootload_ds bootload_ds;
+ u32 instructions_sz, base_address;
+ u16 block_size = adapter->block_size;
+ u32 dest, len;
+ int status, cnt;
+
+ rsi_dbg(INIT_ZONE, "***** Load 9116 TA Instructions *****\n");
+
+ if (adapter->rsi_host_intf == RSI_HOST_INTF_USB) {
+ status = bl_cmd(adapter, POLLING_MODE, CMD_PASS,
+ "POLLING_MODE");
+ if (status < 0)
+ return status;
+ }
+
+ status = hif_ops->master_reg_write(adapter, MEM_ACCESS_CTRL_FROM_HOST,
+ RAM_384K_ACCESS_FROM_TA,
+ RSI_9116_REG_SIZE);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE, "%s: Unable to access full RAM memory\n",
+ __func__);
+ return status;
+ }
+
+ metadata_p = &metadata[adapter->priv->coex_mode];
+ rsi_dbg(INIT_ZONE, "%s: loading file %s\n", __func__, metadata_p->name);
+ status = request_firmware(&fw_entry, metadata_p->name, adapter->device);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE, "%s: Failed to open file %s\n",
+ __func__, metadata_p->name);
+ return status;
+ }
+
+ ta_firmware = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
+ if (!ta_firmware)
+ goto fail_release_fw;
+ fw_p = ta_firmware;
+ instructions_sz = fw_entry->size;
+ rsi_dbg(INFO_ZONE, "FW Length = %d bytes\n", instructions_sz);
+
+ common->lmac_ver.major = ta_firmware[LMAC_VER_OFFSET_9116];
+ common->lmac_ver.minor = ta_firmware[LMAC_VER_OFFSET_9116 + 1];
+ common->lmac_ver.release_num = ta_firmware[LMAC_VER_OFFSET_9116 + 2];
+ common->lmac_ver.patch_num = ta_firmware[LMAC_VER_OFFSET_9116 + 3];
+ common->lmac_ver.ver.info.fw_ver[0] =
+ ta_firmware[LMAC_VER_OFFSET_9116 + 4];
+
+ if (instructions_sz % FW_ALIGN_SIZE)
+ instructions_sz +=
+ (FW_ALIGN_SIZE - (instructions_sz % FW_ALIGN_SIZE));
+ rsi_dbg(INFO_ZONE, "instructions_sz : %d\n", instructions_sz);
+
+ if (*(u16 *)fw_p == RSI_9116_FW_MAGIC_WORD) {
+ memcpy(&bootload_ds, fw_p, sizeof(struct bootload_ds));
+ fw_p += le16_to_cpu(bootload_ds.offset);
+ rsi_dbg(INFO_ZONE, "FW start = %x\n", *(u32 *)fw_p);
+
+ cnt = 0;
+ do {
+ rsi_dbg(ERR_ZONE, "%s: Loading chunk %d\n",
+ __func__, cnt);
+
+ dest = le32_to_cpu(bootload_ds.bl_entry[cnt].dst_addr);
+ len = le32_to_cpu(bootload_ds.bl_entry[cnt].control) &
+ RSI_BL_CTRL_LEN_MASK;
+ rsi_dbg(INFO_ZONE, "length %d destination %x\n",
+ len, dest);
+
+ status = hif_ops->load_data_master_write(adapter, dest,
+ len,
+ block_size,
+ fw_p);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "Failed to load chunk %d\n", cnt);
+ break;
+ }
+ fw_p += len;
+ if (le32_to_cpu(bootload_ds.bl_entry[cnt].control) &
+ RSI_BL_CTRL_LAST_ENTRY)
+ break;
+ cnt++;
+ } while (1);
+ } else {
+ base_address = metadata_p->address;
+ status = hif_ops->load_data_master_write(adapter,
+ base_address,
+ instructions_sz,
+ block_size,
+ ta_firmware);
+ }
+ if (status) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unable to load %s blk\n",
+ __func__, metadata_p->name);
+ goto fail_free_fw;
+ }
+
+ rsi_dbg(INIT_ZONE, "%s: Successfully loaded %s instructions\n",
+ __func__, metadata_p->name);
+
+ if (adapter->rsi_host_intf == RSI_HOST_INTF_SDIO) {
+ if (hif_ops->ta_reset(adapter))
+ rsi_dbg(ERR_ZONE, "Unable to put ta in reset\n");
+ } else {
+ if (bl_cmd(adapter, JUMP_TO_ZERO_PC,
+ CMD_PASS, "JUMP_TO_ZERO") < 0)
+ rsi_dbg(INFO_ZONE, "Jump to zero command failed\n");
+ else
+ rsi_dbg(INFO_ZONE, "Jump to zero command successful\n");
+ }
+
+fail_free_fw:
+ kfree(ta_firmware);
+fail_release_fw:
+ release_firmware(fw_entry);
+
+ return status;
+}
+
int rsi_hal_device_init(struct rsi_hw *adapter)
{
struct rsi_common *common = adapter->priv;
+ int status;
switch (adapter->device_model) {
case RSI_DEV_9113:
- if (rsi_load_firmware(adapter)) {
+ status = rsi_hal_prepare_fwload(adapter);
+ if (status < 0)
+ return status;
+ if (rsi_load_9113_firmware(adapter)) {
rsi_dbg(ERR_ZONE,
"%s: Failed to load TA instructions\n",
__func__);
return -EINVAL;
}
break;
+ case RSI_DEV_9116:
+ status = rsi_hal_prepare_fwload(adapter);
+ if (status < 0)
+ return status;
+ if (rsi_load_9116_firmware(adapter)) {
+ rsi_dbg(ERR_ZONE,
+ "%s: Failed to load firmware to 9116 device\n",
+ __func__);
+ return -EINVAL;
+ }
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 831046e760f8..49df3bb08d41 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -188,27 +188,27 @@ bool rsi_is_cipher_wep(struct rsi_common *common)
* @adapter: Pointer to the adapter structure.
* @band: Operating band to be set.
*
- * Return: None.
+ * Return: int - 0 on success, negative error on failure.
*/
-static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
+static int rsi_register_rates_channels(struct rsi_hw *adapter, int band)
{
struct ieee80211_supported_band *sbands = &adapter->sbands[band];
void *channels = NULL;
if (band == NL80211_BAND_2GHZ) {
- channels = kmalloc(sizeof(rsi_2ghz_channels), GFP_KERNEL);
- memcpy(channels,
- rsi_2ghz_channels,
- sizeof(rsi_2ghz_channels));
+ channels = kmemdup(rsi_2ghz_channels, sizeof(rsi_2ghz_channels),
+ GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
sbands->band = NL80211_BAND_2GHZ;
sbands->n_channels = ARRAY_SIZE(rsi_2ghz_channels);
sbands->bitrates = rsi_rates;
sbands->n_bitrates = ARRAY_SIZE(rsi_rates);
} else {
- channels = kmalloc(sizeof(rsi_5ghz_channels), GFP_KERNEL);
- memcpy(channels,
- rsi_5ghz_channels,
- sizeof(rsi_5ghz_channels));
+ channels = kmemdup(rsi_5ghz_channels, sizeof(rsi_5ghz_channels),
+ GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
sbands->band = NL80211_BAND_5GHZ;
sbands->n_channels = ARRAY_SIZE(rsi_5ghz_channels);
sbands->bitrates = &rsi_rates[4];
@@ -227,6 +227,7 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
sbands->ht_cap.mcs.rx_mask[0] = 0xff;
sbands->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
/* sbands->ht_cap.mcs.rx_highest = 0x82; */
+ return 0;
}
static int rsi_mac80211_hw_scan_start(struct ieee80211_hw *hw,
@@ -2064,11 +2065,16 @@ int rsi_mac80211_attach(struct rsi_common *common)
wiphy->available_antennas_rx = 1;
wiphy->available_antennas_tx = 1;
- rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ);
+ status = rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ);
+ if (status)
+ return status;
wiphy->bands[NL80211_BAND_2GHZ] =
&adapter->sbands[NL80211_BAND_2GHZ];
if (common->num_supp_bands > 1) {
- rsi_register_rates_channels(adapter, NL80211_BAND_5GHZ);
+ status = rsi_register_rates_channels(adapter,
+ NL80211_BAND_5GHZ);
+ if (status)
+ return status;
wiphy->bands[NL80211_BAND_5GHZ] =
&adapter->sbands[NL80211_BAND_5GHZ];
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 844f2fac298f..6c7f26ef6476 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -209,6 +209,59 @@ static struct bootup_params boot_params_40 = {
.beacon_resedue_alg_en = 0,
};
+static struct bootup_params_9116 boot_params_9116_20 = {
+ .magic_number = cpu_to_le16(LOADED_TOKEN),
+ .valid = cpu_to_le32(VALID_20),
+ .device_clk_info_9116 = {{
+ .pll_config_9116_g = {
+ .pll_ctrl_set_reg = cpu_to_le16(0xd518),
+ .pll_ctrl_clr_reg = cpu_to_le16(0x2ae7),
+ .pll_modem_conig_reg = cpu_to_le16(0x2000),
+ .soc_clk_config_reg = cpu_to_le16(0x0c18),
+ .adc_dac_strm1_config_reg = cpu_to_le16(0x1100),
+ .adc_dac_strm2_config_reg = cpu_to_le16(0x6600),
+ },
+ .switch_clk_9116_g = {
+ .switch_clk_info =
+ cpu_to_le32((RSI_SWITCH_TASS_CLK |
+ RSI_SWITCH_WLAN_BBP_LMAC_CLK_REG |
+ RSI_SWITCH_BBP_LMAC_CLK_REG)),
+ .tass_clock_reg = cpu_to_le32(0x083C0503),
+ .wlan_bbp_lmac_clk_reg_val = cpu_to_le32(0x01042001),
+ .zbbt_bbp_lmac_clk_reg_val = cpu_to_le32(0x02010001),
+ .bbp_lmac_clk_en_val = cpu_to_le32(0x0000003b),
+ }
+ },
+ },
+};
+
+static struct bootup_params_9116 boot_params_9116_40 = {
+ .magic_number = cpu_to_le16(LOADED_TOKEN),
+ .valid = cpu_to_le32(VALID_40),
+ .device_clk_info_9116 = {{
+ .pll_config_9116_g = {
+ .pll_ctrl_set_reg = cpu_to_le16(0xd518),
+ .pll_ctrl_clr_reg = cpu_to_le16(0x2ae7),
+ .pll_modem_conig_reg = cpu_to_le16(0x3000),
+ .soc_clk_config_reg = cpu_to_le16(0x0c18),
+ .adc_dac_strm1_config_reg = cpu_to_le16(0x0000),
+ .adc_dac_strm2_config_reg = cpu_to_le16(0x6600),
+ },
+ .switch_clk_9116_g = {
+ .switch_clk_info =
+ cpu_to_le32((RSI_SWITCH_TASS_CLK |
+ RSI_SWITCH_WLAN_BBP_LMAC_CLK_REG |
+ RSI_SWITCH_BBP_LMAC_CLK_REG |
+ RSI_MODEM_CLK_160MHZ)),
+ .tass_clock_reg = cpu_to_le32(0x083C0503),
+ .wlan_bbp_lmac_clk_reg_val = cpu_to_le32(0x01042002),
+ .zbbt_bbp_lmac_clk_reg_val = cpu_to_le32(0x04010002),
+ .bbp_lmac_clk_en_val = cpu_to_le32(0x0000003b),
+ }
+ },
+ },
+};
+
static u16 mcs[] = {13, 26, 39, 52, 78, 104, 117, 130};
/**
@@ -235,6 +288,14 @@ static void rsi_set_default_parameters(struct rsi_common *common)
common->obm_ant_sel_val = 2;
common->beacon_interval = RSI_BEACON_INTERVAL;
common->dtim_cnt = RSI_DTIM_COUNT;
+ common->w9116_features.pll_mode = 0x0;
+ common->w9116_features.rf_type = 1;
+ common->w9116_features.wireless_mode = 0;
+ common->w9116_features.enable_ppe = 0;
+ common->w9116_features.afe_type = 1;
+ common->w9116_features.dpd = 0;
+ common->w9116_features.sifs_tx_enable = 0;
+ common->w9116_features.ps_options = 0;
}
void init_bgscan_params(struct rsi_common *common)
@@ -363,6 +424,10 @@ static int rsi_load_radio_caps(struct rsi_common *common)
}
radio_caps->radio_info |= radio_id;
+ if (adapter->device_model == RSI_DEV_9116 &&
+ common->channel_width == BW_20MHZ)
+ radio_caps->radio_cfg_info &= ~0x3;
+
radio_caps->sifs_tx_11n = cpu_to_le16(SIFS_TX_11N_VALUE);
radio_caps->sifs_tx_11b = cpu_to_le16(SIFS_TX_11B_VALUE);
radio_caps->slot_rx_11n = cpu_to_le16(SHORT_SLOT_VALUE);
@@ -378,14 +443,16 @@ static int rsi_load_radio_caps(struct rsi_common *common)
}
for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
- radio_caps->qos_params[ii].cont_win_min_q =
- cpu_to_le16(common->edca_params[ii].cw_min);
- radio_caps->qos_params[ii].cont_win_max_q =
- cpu_to_le16(common->edca_params[ii].cw_max);
- radio_caps->qos_params[ii].aifsn_val_q =
- cpu_to_le16((common->edca_params[ii].aifs) << 8);
- radio_caps->qos_params[ii].txop_q =
- cpu_to_le16(common->edca_params[ii].txop);
+ if (common->edca_params[ii].cw_max > 0) {
+ radio_caps->qos_params[ii].cont_win_min_q =
+ cpu_to_le16(common->edca_params[ii].cw_min);
+ radio_caps->qos_params[ii].cont_win_max_q =
+ cpu_to_le16(common->edca_params[ii].cw_max);
+ radio_caps->qos_params[ii].aifsn_val_q =
+ cpu_to_le16(common->edca_params[ii].aifs << 8);
+ radio_caps->qos_params[ii].txop_q =
+ cpu_to_le16(common->edca_params[ii].txop);
+ }
}
radio_caps->qos_params[BROADCAST_HW_Q].txop_q = cpu_to_le16(0xffff);
@@ -893,6 +960,50 @@ static int rsi_load_bootup_params(struct rsi_common *common)
return rsi_send_internal_mgmt_frame(common, skb);
}
+static int rsi_load_9116_bootup_params(struct rsi_common *common)
+{
+ struct sk_buff *skb;
+ struct rsi_boot_params_9116 *boot_params;
+
+ rsi_dbg(MGMT_TX_ZONE, "%s: Sending boot params frame\n", __func__);
+
+ skb = dev_alloc_skb(sizeof(struct rsi_boot_params_9116));
+ if (!skb)
+ return -ENOMEM;
+ memset(skb->data, 0, sizeof(struct rsi_boot_params));
+ boot_params = (struct rsi_boot_params_9116 *)skb->data;
+
+ if (common->channel_width == BW_40MHZ) {
+ memcpy(&boot_params->bootup_params,
+ &boot_params_9116_40,
+ sizeof(struct bootup_params_9116));
+ rsi_dbg(MGMT_TX_ZONE, "%s: Packet 40MHZ <=== %d\n", __func__,
+ UMAC_CLK_40BW);
+ boot_params->umac_clk = cpu_to_le16(UMAC_CLK_40BW);
+ } else {
+ memcpy(&boot_params->bootup_params,
+ &boot_params_9116_20,
+ sizeof(struct bootup_params_9116));
+ if (boot_params_20.valid != cpu_to_le32(VALID_20)) {
+ boot_params->umac_clk = cpu_to_le16(UMAC_CLK_20BW);
+ rsi_dbg(MGMT_TX_ZONE,
+ "%s: Packet 20MHZ <=== %d\n", __func__,
+ UMAC_CLK_20BW);
+ } else {
+ boot_params->umac_clk = cpu_to_le16(UMAC_CLK_40MHZ);
+ rsi_dbg(MGMT_TX_ZONE,
+ "%s: Packet 20MHZ <=== %d\n", __func__,
+ UMAC_CLK_40MHZ);
+ }
+ }
+ rsi_set_len_qno(&boot_params->desc_dword0.len_qno,
+ sizeof(struct bootup_params_9116), RSI_WIFI_MGMT_Q);
+ boot_params->desc_dword0.frame_type = BOOTUP_PARAMS_REQUEST;
+ skb_put(skb, sizeof(struct rsi_boot_params_9116));
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
/**
* rsi_send_reset_mac() - This function prepares reset MAC request and sends an
* internal management frame to indicate it to firmware.
@@ -921,6 +1032,11 @@ static int rsi_send_reset_mac(struct rsi_common *common)
mgmt_frame->desc_word[1] = cpu_to_le16(RESET_MAC_REQ);
mgmt_frame->desc_word[4] = cpu_to_le16(RETRY_COUNT << 8);
+#define RSI_9116_DEF_TA_AGGR 3
+ if (common->priv->device_model == RSI_DEV_9116)
+ mgmt_frame->desc_word[3] |=
+ cpu_to_le16(RSI_9116_DEF_TA_AGGR << 8);
+
skb_put(skb, FRAME_DESC_SZ);
return rsi_send_internal_mgmt_frame(common, skb);
@@ -971,7 +1087,10 @@ int rsi_band_check(struct rsi_common *common,
}
if (common->channel_width != prev_bw) {
- status = rsi_load_bootup_params(common);
+ if (adapter->device_model == RSI_DEV_9116)
+ status = rsi_load_9116_bootup_params(common);
+ else
+ status = rsi_load_bootup_params(common);
if (status)
return status;
@@ -1546,6 +1665,47 @@ int rsi_send_ps_request(struct rsi_hw *adapter, bool enable,
return rsi_send_internal_mgmt_frame(common, skb);
}
+static int rsi_send_w9116_features(struct rsi_common *common)
+{
+ struct rsi_wlan_9116_features *w9116_features;
+ u16 frame_len = sizeof(struct rsi_wlan_9116_features);
+ struct sk_buff *skb;
+
+ rsi_dbg(MGMT_TX_ZONE,
+ "%s: Sending wlan 9116 features\n", __func__);
+
+ skb = dev_alloc_skb(frame_len);
+ if (!skb)
+ return -ENOMEM;
+ memset(skb->data, 0, frame_len);
+
+ w9116_features = (struct rsi_wlan_9116_features *)skb->data;
+
+ w9116_features->pll_mode = common->w9116_features.pll_mode;
+ w9116_features->rf_type = common->w9116_features.rf_type;
+ w9116_features->wireless_mode = common->w9116_features.wireless_mode;
+ w9116_features->enable_ppe = common->w9116_features.enable_ppe;
+ w9116_features->afe_type = common->w9116_features.afe_type;
+ if (common->w9116_features.dpd)
+ w9116_features->feature_enable |= cpu_to_le32(RSI_DPD);
+ if (common->w9116_features.sifs_tx_enable)
+ w9116_features->feature_enable |=
+ cpu_to_le32(RSI_SIFS_TX_ENABLE);
+ if (common->w9116_features.ps_options & RSI_DUTY_CYCLING)
+ w9116_features->feature_enable |= cpu_to_le32(RSI_DUTY_CYCLING);
+ if (common->w9116_features.ps_options & RSI_END_OF_FRAME)
+ w9116_features->feature_enable |= cpu_to_le32(RSI_END_OF_FRAME);
+ w9116_features->feature_enable |=
+ cpu_to_le32((common->w9116_features.ps_options & ~0x3) << 2);
+
+ rsi_set_len_qno(&w9116_features->desc.desc_dword0.len_qno,
+ frame_len - FRAME_DESC_SZ, RSI_WIFI_MGMT_Q);
+ w9116_features->desc.desc_dword0.frame_type = FEATURES_ENABLE;
+ skb_put(skb, frame_len);
+
+ return rsi_send_internal_mgmt_frame(common, skb);
+}
+
/**
* rsi_set_antenna() - This function send antenna configuration request
* to device
@@ -1766,15 +1926,26 @@ static int rsi_handle_ta_confirm_type(struct rsi_common *common,
rsi_dbg(FSM_ZONE, "%s: Boot up params confirm received\n",
__func__);
if (common->fsm_state == FSM_BOOT_PARAMS_SENT) {
- adapter->eeprom.length = (IEEE80211_ADDR_LEN +
- WLAN_MAC_MAGIC_WORD_LEN +
- WLAN_HOST_MODE_LEN);
- adapter->eeprom.offset = WLAN_MAC_EEPROM_ADDR;
- if (rsi_eeprom_read(common)) {
- common->fsm_state = FSM_CARD_NOT_READY;
- goto out;
+ if (adapter->device_model == RSI_DEV_9116) {
+ common->band = NL80211_BAND_5GHZ;
+ common->num_supp_bands = 2;
+
+ if (rsi_send_reset_mac(common))
+ goto out;
+ else
+ common->fsm_state = FSM_RESET_MAC_SENT;
+ } else {
+ adapter->eeprom.length =
+ (IEEE80211_ADDR_LEN +
+ WLAN_MAC_MAGIC_WORD_LEN +
+ WLAN_HOST_MODE_LEN);
+ adapter->eeprom.offset = WLAN_MAC_EEPROM_ADDR;
+ if (rsi_eeprom_read(common)) {
+ common->fsm_state = FSM_CARD_NOT_READY;
+ goto out;
+ }
+ common->fsm_state = FSM_EEPROM_READ_MAC_ADDR;
}
- common->fsm_state = FSM_EEPROM_READ_MAC_ADDR;
} else {
rsi_dbg(INFO_ZONE,
"%s: Received bootup params cfm in %d state\n",
@@ -1853,6 +2024,12 @@ static int rsi_handle_ta_confirm_type(struct rsi_common *common,
case RADIO_CAPABILITIES:
if (common->fsm_state == FSM_RADIO_CAPS_SENT) {
common->rf_reset = 1;
+ if (adapter->device_model == RSI_DEV_9116 &&
+ rsi_send_w9116_features(common)) {
+ rsi_dbg(ERR_ZONE,
+ "Failed to send 9116 features\n");
+ goto out;
+ }
if (rsi_program_bb_rf(common)) {
goto out;
} else {
@@ -1925,6 +2102,8 @@ out:
int rsi_handle_card_ready(struct rsi_common *common, u8 *msg)
{
+ int status;
+
switch (common->fsm_state) {
case FSM_CARD_NOT_READY:
rsi_dbg(INIT_ZONE, "Card ready indication from Common HAL\n");
@@ -1936,14 +2115,29 @@ int rsi_handle_card_ready(struct rsi_common *common, u8 *msg)
case FSM_COMMON_DEV_PARAMS_SENT:
rsi_dbg(INIT_ZONE, "Card ready indication from WLAN HAL\n");
+ if (common->priv->device_model == RSI_DEV_9116) {
+ if (msg[16] != MAGIC_WORD) {
+ rsi_dbg(FSM_ZONE,
+ "%s: [EEPROM_READ] Invalid token\n",
+ __func__);
+ common->fsm_state = FSM_CARD_NOT_READY;
+ return -EINVAL;
+ }
+ memcpy(common->mac_addr, &msg[20], ETH_ALEN);
+ rsi_dbg(INIT_ZONE, "MAC Addr %pM", common->mac_addr);
+ }
/* Get usb buffer status register address */
common->priv->usb_buffer_status_reg = *(u32 *)&msg[8];
rsi_dbg(INFO_ZONE, "USB buffer status register = %x\n",
common->priv->usb_buffer_status_reg);
- if (rsi_load_bootup_params(common)) {
+ if (common->priv->device_model == RSI_DEV_9116)
+ status = rsi_load_9116_bootup_params(common);
+ else
+ status = rsi_load_bootup_params(common);
+ if (status < 0) {
common->fsm_state = FSM_CARD_NOT_READY;
- return -EINVAL;
+ return status;
}
common->fsm_state = FSM_BOOT_PARAMS_SENT;
break;
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 3430d7a0899e..f9c67ed473d1 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -923,6 +923,70 @@ static int rsi_sdio_reinit_device(struct rsi_hw *adapter)
return 0;
}
+static int rsi_sdio_ta_reset(struct rsi_hw *adapter)
+{
+ int status;
+ u32 addr;
+ u8 *data;
+
+ status = rsi_sdio_master_access_msword(adapter, TA_BASE_ADDR);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE,
+ "Unable to set ms word to common reg\n");
+ return status;
+ }
+
+ rsi_dbg(INIT_ZONE, "%s: Bring TA out of reset\n", __func__);
+ put_unaligned_le32(TA_HOLD_THREAD_VALUE, data);
+ addr = TA_HOLD_THREAD_REG | RSI_SD_REQUEST_MASTER;
+ status = rsi_sdio_write_register_multiple(adapter, addr,
+ (u8 *)&data,
+ RSI_9116_REG_SIZE);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE, "Unable to hold TA threads\n");
+ return status;
+ }
+
+ put_unaligned_le32(TA_SOFT_RST_CLR, data);
+ addr = TA_SOFT_RESET_REG | RSI_SD_REQUEST_MASTER;
+ status = rsi_sdio_write_register_multiple(adapter, addr,
+ (u8 *)&data,
+ RSI_9116_REG_SIZE);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE, "Unable to get TA out of reset\n");
+ return status;
+ }
+
+ put_unaligned_le32(TA_PC_ZERO, data);
+ addr = TA_TH0_PC_REG | RSI_SD_REQUEST_MASTER;
+ status = rsi_sdio_write_register_multiple(adapter, addr,
+ (u8 *)&data,
+ RSI_9116_REG_SIZE);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE, "Unable to Reset TA PC value\n");
+ return -EINVAL;
+ }
+
+ put_unaligned_le32(TA_RELEASE_THREAD_VALUE, data);
+ addr = TA_RELEASE_THREAD_REG | RSI_SD_REQUEST_MASTER;
+ status = rsi_sdio_write_register_multiple(adapter, addr,
+ (u8 *)&data,
+ RSI_9116_REG_SIZE);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE, "Unable to release TA threads\n");
+ return status;
+ }
+
+ status = rsi_sdio_master_access_msword(adapter, MISC_CFG_BASE_ADDR);
+ if (status < 0) {
+ rsi_dbg(ERR_ZONE, "Unable to set ms word to common reg\n");
+ return status;
+ }
+ rsi_dbg(INIT_ZONE, "***** TA Reset done *****\n");
+
+ return 0;
+}
+
static struct rsi_host_intf_ops sdio_host_intf_ops = {
.write_pkt = rsi_sdio_host_intf_write_pkt,
.read_pkt = rsi_sdio_host_intf_read_pkt,
@@ -933,6 +997,7 @@ static struct rsi_host_intf_ops sdio_host_intf_ops = {
.master_reg_write = rsi_sdio_master_reg_write,
.load_data_master_write = rsi_sdio_load_data_master_write,
.reinit_device = rsi_sdio_reinit_device,
+ .ta_reset = rsi_sdio_ta_reset,
};
/**
@@ -949,7 +1014,7 @@ static int rsi_probe(struct sdio_func *pfunction,
{
struct rsi_hw *adapter;
struct rsi_91x_sdiodev *sdev;
- int status;
+ int status = -EINVAL;
rsi_dbg(INIT_ZONE, "%s: Init function called\n", __func__);
@@ -968,6 +1033,20 @@ static int rsi_probe(struct sdio_func *pfunction,
status = -EIO;
goto fail_free_adapter;
}
+
+ if (pfunction->device == RSI_SDIO_PID_9113) {
+ rsi_dbg(ERR_ZONE, "%s: 9113 module detected\n", __func__);
+ adapter->device_model = RSI_DEV_9113;
+ } else if (pfunction->device == RSI_SDIO_PID_9116) {
+ rsi_dbg(ERR_ZONE, "%s: 9116 module detected\n", __func__);
+ adapter->device_model = RSI_DEV_9116;
+ } else {
+ rsi_dbg(ERR_ZONE,
+ "%s: Unsupported RSI device id 0x%x\n", __func__,
+ pfunction->device);
+ goto fail_free_adapter;
+ }
+
sdev = (struct rsi_91x_sdiodev *)adapter->rsi_dev;
rsi_init_event(&sdev->rx_thread.event);
status = rsi_create_kthread(adapter->priv, &sdev->rx_thread,
@@ -1088,16 +1167,41 @@ static void rsi_reset_chip(struct rsi_hw *adapter)
* and any pending dma transfers to rf spi in device to finish.
*/
msleep(100);
-
- ulp_read_write(adapter, RSI_ULP_RESET_REG, RSI_ULP_WRITE_0, 32);
- ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_1, RSI_ULP_WRITE_2, 32);
- ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_2, RSI_ULP_WRITE_0, 32);
- ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_1, RSI_ULP_WRITE_50,
- 32);
- ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_2, RSI_ULP_WRITE_0,
- 32);
- ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_ENABLE,
- RSI_ULP_TIMER_ENABLE, 32);
+ if (adapter->device_model != RSI_DEV_9116) {
+ ulp_read_write(adapter, RSI_ULP_RESET_REG, RSI_ULP_WRITE_0, 32);
+ ulp_read_write(adapter,
+ RSI_WATCH_DOG_TIMER_1, RSI_ULP_WRITE_2, 32);
+ ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_2, RSI_ULP_WRITE_0,
+ 32);
+ ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_1,
+ RSI_ULP_WRITE_50, 32);
+ ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_2,
+ RSI_ULP_WRITE_0, 32);
+ ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_ENABLE,
+ RSI_ULP_TIMER_ENABLE, 32);
+ } else {
+ if ((rsi_sdio_master_reg_write(adapter,
+ NWP_WWD_INTERRUPT_TIMER,
+ NWP_WWD_INT_TIMER_CLKS,
+ RSI_9116_REG_SIZE)) < 0) {
+ rsi_dbg(ERR_ZONE, "Failed to write to intr timer\n");
+ }
+ if ((rsi_sdio_master_reg_write(adapter,
+ NWP_WWD_SYSTEM_RESET_TIMER,
+ NWP_WWD_SYS_RESET_TIMER_CLKS,
+ RSI_9116_REG_SIZE)) < 0) {
+ rsi_dbg(ERR_ZONE,
+ "Failed to write to system reset timer\n");
+ }
+ if ((rsi_sdio_master_reg_write(adapter,
+ NWP_WWD_MODE_AND_RSTART,
+ NWP_WWD_TIMER_DISABLE,
+ RSI_9116_REG_SIZE)) < 0) {
+ rsi_dbg(ERR_ZONE,
+ "Failed to write to mode and restart\n");
+ }
+ rsi_dbg(ERR_ZONE, "***** Watch Dog Reset Successful *****\n");
+ }
/* This msleep will be sufficient for the ulp
* read write operations to complete for chip reset.
*/
@@ -1415,7 +1519,8 @@ static const struct dev_pm_ops rsi_pm_ops = {
#endif
static const struct sdio_device_id rsi_dev_table[] = {
- { SDIO_DEVICE(RSI_SDIO_VID_9113, RSI_SDIO_PID_9113) },
+ { SDIO_DEVICE(RSI_SDIO_VENDOR_ID, RSI_SDIO_PID_9113) },
+ { SDIO_DEVICE(RSI_SDIO_VENDOR_ID, RSI_SDIO_PID_9116) },
{ /* Blank */},
};
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index ac0ef5ea6ffb..f5048d4b8cb6 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -213,7 +213,7 @@ static int rsi_usb_reg_read(struct usb_device *usbdev,
*/
static int rsi_usb_reg_write(struct usb_device *usbdev,
u32 reg,
- u16 value,
+ u32 value,
u16 len)
{
u8 *usb_reg_buf;
@@ -226,17 +226,17 @@ static int rsi_usb_reg_write(struct usb_device *usbdev,
if (!usb_reg_buf)
return status;
- usb_reg_buf[0] = (value & 0x00ff);
- usb_reg_buf[1] = (value & 0xff00) >> 8;
- usb_reg_buf[2] = 0x0;
- usb_reg_buf[3] = 0x0;
+ usb_reg_buf[0] = (cpu_to_le32(value) & 0x00ff);
+ usb_reg_buf[1] = (cpu_to_le32(value) & 0xff00) >> 8;
+ usb_reg_buf[2] = (cpu_to_le32(value) & 0x00ff0000) >> 16;
+ usb_reg_buf[3] = (cpu_to_le32(value) & 0xff000000) >> 24;
status = usb_control_msg(usbdev,
usb_sndctrlpipe(usbdev, 0),
USB_VENDOR_REGISTER_WRITE,
RSI_USB_REQ_OUT,
- ((reg & 0xffff0000) >> 16),
- (reg & 0xffff),
+ ((cpu_to_le32(reg) & 0xffff0000) >> 16),
+ (cpu_to_le32(reg) & 0xffff),
(void *)usb_reg_buf,
len,
USB_CTRL_SET_TIMEOUT);
@@ -263,8 +263,10 @@ static void rsi_rx_done_handler(struct urb *urb)
struct rsi_91x_usbdev *dev = (struct rsi_91x_usbdev *)rx_cb->data;
int status = -EINVAL;
- if (urb->status)
- goto out;
+ if (urb->status) {
+ dev_kfree_skb(rx_cb->rx_skb);
+ return;
+ }
if (urb->actual_length <= 0 ||
urb->actual_length > rx_cb->rx_skb->len) {
@@ -698,26 +700,47 @@ static int rsi_reset_card(struct rsi_hw *adapter)
goto fail;
}
- ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_1,
- RSI_ULP_WRITE_2, 32);
- if (ret < 0)
- goto fail;
- ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_2,
- RSI_ULP_WRITE_0, 32);
- if (ret < 0)
- goto fail;
- ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_1,
- RSI_ULP_WRITE_50, 32);
- if (ret < 0)
- goto fail;
- ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_2,
- RSI_ULP_WRITE_0, 32);
- if (ret < 0)
- goto fail;
- ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_ENABLE,
- RSI_ULP_TIMER_ENABLE, 32);
- if (ret < 0)
- goto fail;
+ if (adapter->device_model != RSI_DEV_9116) {
+ ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_1,
+ RSI_ULP_WRITE_2, 32);
+ if (ret < 0)
+ goto fail;
+ ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_2,
+ RSI_ULP_WRITE_0, 32);
+ if (ret < 0)
+ goto fail;
+ ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_1,
+ RSI_ULP_WRITE_50, 32);
+ if (ret < 0)
+ goto fail;
+ ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_DELAY_TIMER_2,
+ RSI_ULP_WRITE_0, 32);
+ if (ret < 0)
+ goto fail;
+ ret = usb_ulp_read_write(adapter, RSI_WATCH_DOG_TIMER_ENABLE,
+ RSI_ULP_TIMER_ENABLE, 32);
+ if (ret < 0)
+ goto fail;
+ } else {
+ if ((rsi_usb_master_reg_write(adapter,
+ NWP_WWD_INTERRUPT_TIMER,
+ NWP_WWD_INT_TIMER_CLKS,
+ RSI_9116_REG_SIZE)) < 0) {
+ goto fail;
+ }
+ if ((rsi_usb_master_reg_write(adapter,
+ NWP_WWD_SYSTEM_RESET_TIMER,
+ NWP_WWD_SYS_RESET_TIMER_CLKS,
+ RSI_9116_REG_SIZE)) < 0) {
+ goto fail;
+ }
+ if ((rsi_usb_master_reg_write(adapter,
+ NWP_WWD_MODE_AND_RSTART,
+ NWP_WWD_TIMER_DISABLE,
+ RSI_9116_REG_SIZE)) < 0) {
+ goto fail;
+ }
+ }
rsi_dbg(INFO_ZONE, "Reset card done\n");
return ret;
@@ -763,6 +786,18 @@ static int rsi_probe(struct usb_interface *pfunction,
rsi_dbg(ERR_ZONE, "%s: Initialized os intf ops\n", __func__);
+ if (id && id->idProduct == RSI_USB_PID_9113) {
+ rsi_dbg(INIT_ZONE, "%s: 9113 module detected\n", __func__);
+ adapter->device_model = RSI_DEV_9113;
+ } else if (id && id->idProduct == RSI_USB_PID_9116) {
+ rsi_dbg(INIT_ZONE, "%s: 9116 module detected\n", __func__);
+ adapter->device_model = RSI_DEV_9116;
+ } else {
+ rsi_dbg(ERR_ZONE, "%s: Unsupported RSI device id 0x%x\n",
+ __func__, id->idProduct);
+ goto err1;
+ }
+
dev = (struct rsi_91x_usbdev *)adapter->rsi_dev;
status = rsi_usb_reg_read(dev->usbdev, FW_STATUS_REG, &fw_status, 2);
@@ -845,7 +880,8 @@ static int rsi_resume(struct usb_interface *intf)
#endif
static const struct usb_device_id rsi_dev_table[] = {
- { USB_DEVICE(RSI_USB_VID_9113, RSI_USB_PID_9113) },
+ { USB_DEVICE(RSI_USB_VENDOR_ID, RSI_USB_PID_9113) },
+ { USB_DEVICE(RSI_USB_VENDOR_ID, RSI_USB_PID_9116) },
{ /* Blank */},
};
diff --git a/drivers/net/wireless/rsi/rsi_boot_params.h b/drivers/net/wireless/rsi/rsi_boot_params.h
index ad903b22440e..c1cf19d1e376 100644
--- a/drivers/net/wireless/rsi/rsi_boot_params.h
+++ b/drivers/net/wireless/rsi/rsi_boot_params.h
@@ -80,6 +80,15 @@ struct pll_config {
struct afepll_info afepll_info_g;
} __packed;
+struct pll_config_9116 {
+ __le16 pll_ctrl_set_reg;
+ __le16 pll_ctrl_clr_reg;
+ __le16 pll_modem_conig_reg;
+ __le16 soc_clk_config_reg;
+ __le16 adc_dac_strm1_config_reg;
+ __le16 adc_dac_strm2_config_reg;
+} __packed;
+
/* structure to store configs related to UMAC clk programming */
struct switch_clk {
__le16 switch_clk_info;
@@ -93,11 +102,32 @@ struct switch_clk {
__le16 qspi_uart_clock_reg_config;
} __packed;
+#define RSI_SWITCH_TASS_CLK BIT(0)
+#define RSI_SWITCH_QSPI_CLK BIT(1)
+#define RSI_SWITCH_SLP_CLK_2_32 BIT(2)
+#define RSI_SWITCH_WLAN_BBP_LMAC_CLK_REG BIT(3)
+#define RSI_SWITCH_ZBBT_BBP_LMAC_CLK_REG BIT(4)
+#define RSI_SWITCH_BBP_LMAC_CLK_REG BIT(5)
+#define RSI_MODEM_CLK_160MHZ BIT(6)
+
+struct switch_clk_9116 {
+ __le32 switch_clk_info;
+ __le32 tass_clock_reg;
+ __le32 wlan_bbp_lmac_clk_reg_val;
+ __le32 zbbt_bbp_lmac_clk_reg_val;
+ __le32 bbp_lmac_clk_en_val;
+} __packed;
+
struct device_clk_info {
struct pll_config pll_config_g;
struct switch_clk switch_clk_g;
} __packed;
+struct device_clk_info_9116 {
+ struct pll_config_9116 pll_config_9116_g;
+ struct switch_clk_9116 switch_clk_9116_g;
+} __packed;
+
struct bootup_params {
__le16 magic_number;
__le16 crystal_good_time;
@@ -127,4 +157,37 @@ struct bootup_params {
__le32 max_threshold_to_avoid_sleep;
u8 beacon_resedue_alg_en;
} __packed;
+
+struct bootup_params_9116 {
+ __le16 magic_number;
+#define LOADED_TOKEN 0x5AA5 /* Bootup params are installed by host
+ * or OTP/FLASH (Bootloader)
+ */
+#define ROM_TOKEN 0x55AA /* Bootup params are taken from ROM
+ * itself in MCU mode.
+ */
+ __le16 crystal_good_time;
+ __le32 valid;
+ __le32 reserved_for_valids;
+ __le16 bootup_mode_info;
+#define BT_COEXIST BIT(0)
+#define BOOTUP_MODE (BIT(2) | BIT(1))
+#define CUR_DEV_MODE_9116 (bootup_params_9116.bootup_mode_info >> 1)
+ __le16 digital_loop_back_params;
+ __le16 rtls_timestamp_en;
+ __le16 host_spi_intr_cfg;
+ struct device_clk_info_9116 device_clk_info_9116[1];
+ __le32 buckboost_wakeup_cnt;
+ __le16 pmu_wakeup_wait;
+ u8 shutdown_wait_time;
+ u8 pmu_slp_clkout_sel;
+ __le32 wdt_prog_value;
+ __le32 wdt_soc_rst_delay;
+ __le32 dcdc_operation_mode;
+ __le32 soc_reset_wait_cnt;
+ __le32 waiting_time_at_fresh_sleep;
+ __le32 max_threshold_to_avoid_sleep;
+ u8 beacon_resedue_alg_en;
+} __packed;
+
#endif
diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h
index 327638cdd30b..46e36df9e8e3 100644
--- a/drivers/net/wireless/rsi/rsi_hal.h
+++ b/drivers/net/wireless/rsi/rsi_hal.h
@@ -70,6 +70,21 @@
#define RSI_WATCH_DOG_DELAY_TIMER_2 0x16f
#define RSI_WATCH_DOG_TIMER_ENABLE 0x170
+/* Watchdog timer addresses for 9116 */
+#define NWP_AHB_BASE_ADDR 0x41300000
+#define NWP_WWD_INTERRUPT_TIMER (NWP_AHB_BASE_ADDR + 0x300)
+#define NWP_WWD_SYSTEM_RESET_TIMER (NWP_AHB_BASE_ADDR + 0x304)
+#define NWP_WWD_WINDOW_TIMER (NWP_AHB_BASE_ADDR + 0x308)
+#define NWP_WWD_TIMER_SETTINGS (NWP_AHB_BASE_ADDR + 0x30C)
+#define NWP_WWD_MODE_AND_RSTART (NWP_AHB_BASE_ADDR + 0x310)
+#define NWP_WWD_RESET_BYPASS (NWP_AHB_BASE_ADDR + 0x314)
+#define NWP_FSM_INTR_MASK_REG (NWP_AHB_BASE_ADDR + 0x104)
+
+/* Watchdog timer values */
+#define NWP_WWD_INT_TIMER_CLKS 5
+#define NWP_WWD_SYS_RESET_TIMER_CLKS 4
+#define NWP_WWD_TIMER_DISABLE 0xAA0001
+
#define RSI_ULP_WRITE_0 00
#define RSI_ULP_WRITE_2 02
#define RSI_ULP_WRITE_50 50
@@ -113,9 +128,18 @@
#define BBP_INFO_40MHZ 0x6
#define FW_FLASH_OFFSET 0x820
-#define LMAC_VER_OFFSET (FW_FLASH_OFFSET + 0x200)
+#define LMAC_VER_OFFSET_9113 (FW_FLASH_OFFSET + 0x200)
+#define LMAC_VER_OFFSET_9116 0x22C2
#define MAX_DWORD_ALIGN_BYTES 64
#define RSI_COMMON_REG_SIZE 2
+#define RSI_9116_REG_SIZE 4
+#define FW_ALIGN_SIZE 4
+#define RSI_9116_FW_MAGIC_WORD 0x5aa5
+
+#define MEM_ACCESS_CTRL_FROM_HOST 0x41300000
+#define RAM_384K_ACCESS_FROM_TA (BIT(2) | BIT(3) | BIT(4) | BIT(5) | \
+ BIT(20) | BIT(21) | BIT(22) | \
+ BIT(23) | BIT(24) | BIT(25))
struct bl_header {
__le32 flags;
@@ -130,6 +154,24 @@ struct ta_metadata {
unsigned int address;
};
+#define RSI_BL_CTRL_LEN_MASK 0xFFFFFF
+#define RSI_BL_CTRL_SPI_32BIT_MODE BIT(27)
+#define RSI_BL_CTRL_REL_TA_SOFTRESET BIT(28)
+#define RSI_BL_CTRL_START_FROM_ROM_PC BIT(29)
+#define RSI_BL_CTRL_SPI_8BIT_MODE BIT(30)
+#define RSI_BL_CTRL_LAST_ENTRY BIT(31)
+struct bootload_entry {
+ __le32 control;
+ __le32 dst_addr;
+} __packed;
+
+struct bootload_ds {
+ __le16 fixed_pattern;
+ __le16 offset;
+ __le32 reserved;
+ struct bootload_entry bl_entry[7];
+} __packed;
+
struct rsi_mgmt_desc {
__le16 len_qno;
u8 frame_type;
diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
index 35d13f35e9b0..73a19e43106b 100644
--- a/drivers/net/wireless/rsi/rsi_main.h
+++ b/drivers/net/wireless/rsi/rsi_main.h
@@ -111,9 +111,13 @@ extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...);
#define RSI_WOW_ENABLED BIT(0)
#define RSI_WOW_NO_CONNECTION BIT(1)
-#define RSI_DEV_9113 1
#define RSI_MAX_RX_PKTS 64
+enum rsi_dev_model {
+ RSI_DEV_9113 = 0,
+ RSI_DEV_9116
+};
+
struct version_info {
u16 major;
u16 minor;
@@ -215,6 +219,17 @@ enum rsi_dfs_regions {
RSI_REGION_WORLD
};
+struct rsi_9116_features {
+ u8 pll_mode;
+ u8 rf_type;
+ u8 wireless_mode;
+ u8 afe_type;
+ u8 enable_ppe;
+ u8 dpd;
+ u32 sifs_tx_enable;
+ u32 ps_options;
+};
+
struct rsi_common {
struct rsi_hw *priv;
struct vif_priv vif_info[RSI_MAX_VIFS];
@@ -310,6 +325,7 @@ struct rsi_common {
struct cfg80211_scan_request *hwscan;
struct rsi_bgscan_params bgscan;
+ struct rsi_9116_features w9116_features;
u8 bgscan_en;
u8 mac_ops_resumed;
};
@@ -329,7 +345,7 @@ struct eeprom_read {
struct rsi_hw {
struct rsi_common *priv;
- u8 device_model;
+ enum rsi_dev_model device_model;
struct ieee80211_hw *hw;
struct ieee80211_vif *vifs[RSI_MAX_VIFS];
struct ieee80211_tx_queue_params edca_params[NUM_EDCA_QUEUES];
@@ -381,6 +397,7 @@ struct rsi_host_intf_ops {
u32 instructions_size, u16 block_size,
u8 *fw);
int (*reinit_device)(struct rsi_hw *adapter);
+ int (*ta_reset)(struct rsi_hw *adapter);
};
enum rsi_host_intf rsi_get_host_intf(void *priv);
diff --git a/drivers/net/wireless/rsi/rsi_mgmt.h b/drivers/net/wireless/rsi/rsi_mgmt.h
index ea83faa15c7e..2ce2dcf57441 100644
--- a/drivers/net/wireless/rsi/rsi_mgmt.h
+++ b/drivers/net/wireless/rsi/rsi_mgmt.h
@@ -294,6 +294,7 @@ enum cmd_frame_type {
COMMON_DEV_CONFIG = 0x28,
RADIO_PARAMS_UPDATE = 0x29,
WOWLAN_CONFIG_PARAMS = 0x2B,
+ FEATURES_ENABLE = 0x33,
WOWLAN_WAKEUP_REASON = 0xc5
};
@@ -351,6 +352,15 @@ struct rsi_boot_params {
struct bootup_params bootup_params;
} __packed;
+struct rsi_boot_params_9116 {
+ struct rsi_cmd_desc_dword0 desc_dword0;
+ struct rsi_cmd_desc_dword1 desc_dword1;
+ struct rsi_cmd_desc_dword2 desc_dword2;
+ __le16 reserved;
+ __le16 umac_clk;
+ struct bootup_params_9116 bootup_params;
+} __packed;
+
struct rsi_peer_notify {
struct rsi_cmd_desc desc;
u8 mac_addr[6];
@@ -654,6 +664,22 @@ struct rsi_bgscan_probe {
__le16 probe_req_length;
} __packed;
+#define RSI_DUTY_CYCLING BIT(0)
+#define RSI_END_OF_FRAME BIT(1)
+#define RSI_SIFS_TX_ENABLE BIT(2)
+#define RSI_DPD BIT(3)
+struct rsi_wlan_9116_features {
+ struct rsi_cmd_desc desc;
+ u8 pll_mode;
+ u8 rf_type;
+ u8 wireless_mode;
+ u8 enable_ppe;
+ u8 afe_type;
+ u8 reserved1;
+ __le16 reserved2;
+ __le32 feature_enable;
+};
+
static inline u32 rsi_get_queueno(u8 *addr, u16 offset)
{
return (le16_to_cpu(*(__le16 *)&addr[offset]) & 0x7000) >> 12;
diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h
index 66dcd2ec9051..c5cfb6238f73 100644
--- a/drivers/net/wireless/rsi/rsi_sdio.h
+++ b/drivers/net/wireless/rsi/rsi_sdio.h
@@ -28,8 +28,9 @@
#include <linux/mmc/sdio_ids.h>
#include "rsi_main.h"
-#define RSI_SDIO_VID_9113 0x041B
+#define RSI_SDIO_VENDOR_ID 0x041B
#define RSI_SDIO_PID_9113 0x9330
+#define RSI_SDIO_PID_9116 0x9116
enum sdio_interrupt_type {
BUFFER_FULL = 0x0,
@@ -91,7 +92,7 @@ enum sdio_interrupt_type {
#define TA_SOFT_RST_SET BIT(0)
#define TA_PC_ZERO 0
#define TA_HOLD_THREAD_VALUE 0xF
-#define TA_RELEASE_THREAD_VALUE cpu_to_le32(0xF)
+#define TA_RELEASE_THREAD_VALUE 0xF
#define TA_BASE_ADDR 0x2200
#define MISC_CFG_BASE_ADDR 0x4105
diff --git a/drivers/net/wireless/rsi/rsi_usb.h b/drivers/net/wireless/rsi/rsi_usb.h
index 5b2eddd1a2ee..8702f434b569 100644
--- a/drivers/net/wireless/rsi/rsi_usb.h
+++ b/drivers/net/wireless/rsi/rsi_usb.h
@@ -22,8 +22,9 @@
#include "rsi_main.h"
#include "rsi_common.h"
-#define RSI_USB_VID_9113 0x1618
+#define RSI_USB_VENDOR_ID 0x1618
#define RSI_USB_PID_9113 0x9113
+#define RSI_USB_PID_9116 0x9116
#define USB_INTERNAL_REG_1 0x25000
#define RSI_USB_READY_MAGIC_NUM 0xab
diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c
index 90dc979f260b..c1608f0bf6d0 100644
--- a/drivers/net/wireless/st/cw1200/main.c
+++ b/drivers/net/wireless/st/cw1200/main.c
@@ -345,6 +345,11 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
mutex_init(&priv->wsm_cmd_mux);
mutex_init(&priv->conf_mutex);
priv->workqueue = create_singlethread_workqueue("cw1200_wq");
+ if (!priv->workqueue) {
+ ieee80211_free_hw(hw);
+ return NULL;
+ }
+
sema_init(&priv->scan.lock, 1);
INIT_WORK(&priv->scan.work, cw1200_scan_work);
INIT_DELAYED_WORK(&priv->scan.probe_work, cw1200_probe_work);
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 348be0aed97e..0415a064f6e2 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -1700,14 +1700,14 @@ void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
ch_bit_idx = wlcore_get_reg_conf_ch_idx(band, channel);
if (ch_bit_idx >= 0 && ch_bit_idx <= WL1271_MAX_CHANNELS)
- set_bit(ch_bit_idx, (long *)wl->reg_ch_conf_pending);
+ __set_bit_le(ch_bit_idx, (long *)wl->reg_ch_conf_pending);
}
int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
{
struct wl12xx_cmd_regdomain_dfs_config *cmd = NULL;
int ret = 0, i, b, ch_bit_idx;
- u32 tmp_ch_bitmap[2];
+ __le32 tmp_ch_bitmap[2] __aligned(sizeof(unsigned long));
struct wiphy *wiphy = wl->hw->wiphy;
struct ieee80211_supported_band *band;
bool timeout = false;
@@ -1717,7 +1717,7 @@ int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
wl1271_debug(DEBUG_CMD, "cmd reg domain config");
- memset(tmp_ch_bitmap, 0, sizeof(tmp_ch_bitmap));
+ memcpy(tmp_ch_bitmap, wl->reg_ch_conf_pending, sizeof(tmp_ch_bitmap));
for (b = NL80211_BAND_2GHZ; b <= NL80211_BAND_5GHZ; b++) {
band = wiphy->bands[b];
@@ -1738,13 +1738,10 @@ int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
if (ch_bit_idx < 0)
continue;
- set_bit(ch_bit_idx, (long *)tmp_ch_bitmap);
+ __set_bit_le(ch_bit_idx, (long *)tmp_ch_bitmap);
}
}
- tmp_ch_bitmap[0] |= wl->reg_ch_conf_pending[0];
- tmp_ch_bitmap[1] |= wl->reg_ch_conf_pending[1];
-
if (!memcmp(tmp_ch_bitmap, wl->reg_ch_conf_last, sizeof(tmp_ch_bitmap)))
goto out;
@@ -1754,8 +1751,8 @@ int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
goto out;
}
- cmd->ch_bit_map1 = cpu_to_le32(tmp_ch_bitmap[0]);
- cmd->ch_bit_map2 = cpu_to_le32(tmp_ch_bitmap[1]);
+ cmd->ch_bit_map1 = tmp_ch_bitmap[0];
+ cmd->ch_bit_map2 = tmp_ch_bitmap[1];
cmd->dfs_region = wl->dfs_region;
wl1271_debug(DEBUG_CMD,
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index dcb2c8b0feb6..affefaaea1ea 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -372,8 +372,8 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 nla_cmd;
int err;
- err = nla_parse(tb, WL1271_TM_ATTR_MAX, data, len, wl1271_tm_policy,
- NULL);
+ err = nla_parse_deprecated(tb, WL1271_TM_ATTR_MAX, data, len,
+ wl1271_tm_policy, NULL);
if (err)
return err;
diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
index 7f34ec077ee5..75756fb8e7b0 100644
--- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c
+++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c
@@ -41,8 +41,8 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy,
if (!data)
return -EINVAL;
- ret = nla_parse(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len,
- wlcore_vendor_attr_policy, NULL);
+ ret = nla_parse_deprecated(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len,
+ wlcore_vendor_attr_policy, NULL);
if (ret)
return ret;
@@ -122,8 +122,8 @@ wlcore_vendor_cmd_smart_config_set_group_key(struct wiphy *wiphy,
if (!data)
return -EINVAL;
- ret = nla_parse(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len,
- wlcore_vendor_attr_policy, NULL);
+ ret = nla_parse_deprecated(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len,
+ wlcore_vendor_attr_policy, NULL);
if (ret)
return ret;
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index dd14850b0603..870eea3e7a27 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -320,9 +320,9 @@ struct wl1271 {
bool watchdog_recovery;
/* Reg domain last configuration */
- u32 reg_ch_conf_last[2] __aligned(8);
+ DECLARE_BITMAP(reg_ch_conf_last, 64);
/* Reg domain pending configuration */
- u32 reg_ch_conf_pending[2];
+ DECLARE_BITMAP(reg_ch_conf_pending, 64);
/* Pointer that holds DMA-friendly block for the mailbox */
void *mbox;
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index c2cda3acd4af..a094d5b3383c 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -1917,8 +1917,7 @@ int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
if (!urb)
return -ENOMEM;
- req_len = sizeof(struct usb_req_write_regs) +
- count * sizeof(struct reg_data);
+ req_len = struct_size(req, reg_writes, count);
req = kmalloc(req_len, GFP_KERNEL);
if (!req) {
r = -ENOMEM;
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 936c0b3e0ba2..05847eb91a1b 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -248,6 +248,22 @@ struct xenvif_hash {
struct xenvif_hash_cache cache;
};
+struct backend_info {
+ struct xenbus_device *dev;
+ struct xenvif *vif;
+
+ /* This is the state that will be reflected in xenstore when any
+ * active hotplug script completes.
+ */
+ enum xenbus_state state;
+
+ enum xenbus_state frontend_state;
+ struct xenbus_watch hotplug_status_watch;
+ u8 have_hotplug_status_watch:1;
+
+ const char *hotplug_script;
+};
+
struct xenvif {
/* Unique identifier for this interface. */
domid_t domid;
@@ -283,6 +299,8 @@ struct xenvif {
struct xenbus_watch credit_watch;
struct xenbus_watch mcast_ctrl_watch;
+ struct backend_info *be;
+
spinlock_t lock;
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 6da12518e693..783198844dd7 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -148,8 +148,7 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
}
static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct xenvif *vif = netdev_priv(dev);
unsigned int size = vif->hash.size;
@@ -162,7 +161,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
return 0;
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
- return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
+ return netdev_pick_tx(dev, skb, NULL) %
+ dev->real_num_tx_queues;
xenvif_set_skb_hash(vif, skb);
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 330ddb64930f..41c9e8f2e520 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -22,22 +22,6 @@
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
-struct backend_info {
- struct xenbus_device *dev;
- struct xenvif *vif;
-
- /* This is the state that will be reflected in xenstore when any
- * active hotplug script completes.
- */
- enum xenbus_state state;
-
- enum xenbus_state frontend_state;
- struct xenbus_watch hotplug_status_watch;
- u8 have_hotplug_status_watch:1;
-
- const char *hotplug_script;
-};
-
static int connect_data_rings(struct backend_info *be,
struct xenvif_queue *queue);
static void connect(struct backend_info *be);
@@ -472,6 +456,7 @@ static int backend_create_xenvif(struct backend_info *be)
return err;
}
be->vif = vif;
+ vif->be = be;
kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
return 0;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index c914c24f880b..8d33970a2950 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -543,8 +543,7 @@ static int xennet_count_skb_slots(struct sk_buff *skb)
}
static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
unsigned int num_queues = dev->real_num_tx_queues;
u32 hash;
@@ -2038,7 +2037,7 @@ static void netback_changed(struct xenbus_device *dev,
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* Missed the backend's CLOSING state -- fallthrough */
+ /* Fall through - Missed the backend's CLOSING state. */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c
index 01acb6e53365..99727a2edda0 100644
--- a/drivers/nfc/st95hf/core.c
+++ b/drivers/nfc/st95hf/core.c
@@ -781,9 +781,7 @@ static irqreturn_t st95hf_irq_thread_handler(int irq, void *st95hfcontext)
int result = 0;
int res_len;
static bool wtx;
- struct device *dev;
struct device *spidevice;
- struct nfc_digital_dev *nfcddev;
struct sk_buff *skb_resp;
struct st95hf_context *stcontext =
(struct st95hf_context *)st95hfcontext;
@@ -828,8 +826,6 @@ static irqreturn_t st95hf_irq_thread_handler(int irq, void *st95hfcontext)
goto end;
}
- dev = &stcontext->nfcdev->dev;
- nfcddev = stcontext->ddev;
if (skb_resp->data[2] == WTX_REQ_FROM_TAG) {
/* Request for new FWT from tag */
result = st95hf_handle_wtx(stcontext, true, skb_resp->data[3]);
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index d820f3edd431..9649cd53e955 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -8,8 +8,10 @@
#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/of_net.h>
+#include <linux/of_platform.h>
#include <linux/phy.h>
#include <linux/export.h>
+#include <linux/device.h>
/**
* of_get_phy_mode - Get phy mode for given device_node
@@ -47,12 +49,52 @@ static const void *of_get_mac_addr(struct device_node *np, const char *name)
return NULL;
}
+static const void *of_get_mac_addr_nvmem(struct device_node *np)
+{
+ int ret;
+ u8 mac[ETH_ALEN];
+ struct property *pp;
+ struct platform_device *pdev = of_find_device_by_node(np);
+
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ ret = nvmem_get_mac_address(&pdev->dev, &mac);
+ if (ret)
+ return ERR_PTR(ret);
+
+ pp = devm_kzalloc(&pdev->dev, sizeof(*pp), GFP_KERNEL);
+ if (!pp)
+ return ERR_PTR(-ENOMEM);
+
+ pp->name = "nvmem-mac-address";
+ pp->length = ETH_ALEN;
+ pp->value = devm_kmemdup(&pdev->dev, mac, ETH_ALEN, GFP_KERNEL);
+ if (!pp->value) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ ret = of_add_property(np, pp);
+ if (ret)
+ goto free;
+
+ return pp->value;
+free:
+ devm_kfree(&pdev->dev, pp->value);
+ devm_kfree(&pdev->dev, pp);
+
+ return ERR_PTR(ret);
+}
+
/**
* Search the device tree for the best MAC address to use. 'mac-address' is
* checked first, because that is supposed to contain to "most recent" MAC
* address. If that isn't set, then 'local-mac-address' is checked next,
- * because that is the default address. If that isn't set, then the obsolete
- * 'address' is checked, just in case we're using an old device tree.
+ * because that is the default address. If that isn't set, then the obsolete
+ * 'address' is checked, just in case we're using an old device tree. If any
+ * of the above isn't set, then try to get MAC address from nvmem cell named
+ * 'mac-address'.
*
* Note that the 'address' property is supposed to contain a virtual address of
* the register set, but some DTS files have redefined that property to be the
@@ -64,6 +106,8 @@ static const void *of_get_mac_addr(struct device_node *np, const char *name)
* addresses. Some older U-Boots only initialized 'local-mac-address'. In
* this case, the real MAC is in 'local-mac-address', and 'mac-address' exists
* but is all zeros.
+ *
+ * Return: Will be a valid pointer on success and ERR_PTR in case of error.
*/
const void *of_get_mac_address(struct device_node *np)
{
@@ -77,6 +121,10 @@ const void *of_get_mac_address(struct device_node *np)
if (addr)
return addr;
- return of_get_mac_addr(np, "address");
+ addr = of_get_mac_addr(np, "address");
+ if (addr)
+ return addr;
+
+ return of_get_mac_addr_nvmem(np);
}
EXPORT_SYMBOL(of_get_mac_address);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index c851cf6e01c4..784a2e76a1b0 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -10,6 +10,7 @@
#ifndef __QETH_CORE_H__
#define __QETH_CORE_H__
+#include <linux/completion.h>
#include <linux/if.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
@@ -21,6 +22,7 @@
#include <linux/hashtable.h>
#include <linux/ip.h>
#include <linux/refcount.h>
+#include <linux/wait.h>
#include <linux/workqueue.h>
#include <net/ipv6.h>
@@ -163,6 +165,12 @@ struct qeth_vnicc_info {
bool rx_bcast_enabled;
};
+static inline int qeth_is_adp_supported(struct qeth_ipa_info *ipa,
+ enum qeth_ipa_setadp_cmd func)
+{
+ return (ipa->supported_funcs & func);
+}
+
static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
enum qeth_ipa_funcs func)
{
@@ -176,9 +184,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
}
#define qeth_adp_supported(c, f) \
- qeth_is_ipa_supported(&c->options.adp, f)
-#define qeth_adp_enabled(c, f) \
- qeth_is_ipa_enabled(&c->options.adp, f)
+ qeth_is_adp_supported(&c->options.adp, f)
#define qeth_is_supported(c, f) \
qeth_is_ipa_supported(&c->options.ipa4, f)
#define qeth_is_enabled(c, f) \
@@ -217,6 +223,9 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
/* QDIO queue and buffer handling */
/*****************************************************************************/
#define QETH_MAX_QUEUES 4
+#define QETH_IQD_MIN_TXQ 2 /* One for ucast, one for mcast. */
+#define QETH_IQD_MCAST_TXQ 0
+#define QETH_IQD_MIN_UCAST_TXQ 1
#define QETH_IN_BUF_SIZE_DEFAULT 65536
#define QETH_IN_BUF_COUNT_DEFAULT 64
#define QETH_IN_BUF_COUNT_HSDEFAULT 128
@@ -365,34 +374,6 @@ enum qeth_header_ids {
#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20
#define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/
-enum qeth_qdio_buffer_states {
- /*
- * inbound: read out by driver; owned by hardware in order to be filled
- * outbound: owned by driver in order to be filled
- */
- QETH_QDIO_BUF_EMPTY,
- /*
- * inbound: filled by hardware; owned by driver in order to be read out
- * outbound: filled by driver; owned by hardware in order to be sent
- */
- QETH_QDIO_BUF_PRIMED,
- /*
- * inbound: not applicable
- * outbound: identified to be pending in TPQ
- */
- QETH_QDIO_BUF_PENDING,
- /*
- * inbound: not applicable
- * outbound: found in completion queue
- */
- QETH_QDIO_BUF_IN_CQ,
- /*
- * inbound: not applicable
- * outbound: handled via transfer pending / completion queue
- */
- QETH_QDIO_BUF_HANDLED_DELAYED,
-};
-
enum qeth_qdio_info_states {
QETH_QDIO_UNINITIALIZED,
QETH_QDIO_ALLOCATED,
@@ -424,6 +405,19 @@ struct qeth_qdio_q {
int next_buf_to_init;
};
+enum qeth_qdio_out_buffer_state {
+ /* Owned by driver, in order to be filled. */
+ QETH_QDIO_BUF_EMPTY,
+ /* Filled by driver; owned by hardware in order to be sent. */
+ QETH_QDIO_BUF_PRIMED,
+ /* Identified to be pending in TPQ. */
+ QETH_QDIO_BUF_PENDING,
+ /* Found in completion queue. */
+ QETH_QDIO_BUF_IN_CQ,
+ /* Handled via transfer pending / completion queue. */
+ QETH_QDIO_BUF_HANDLED_DELAYED,
+};
+
struct qeth_qdio_out_buffer {
struct qdio_buffer *buffer;
atomic_t state;
@@ -462,7 +456,6 @@ struct qeth_card_stats {
u64 rx_errors;
u64 rx_dropped;
u64 rx_multicast;
- u64 tx_errors;
};
struct qeth_out_q_stats {
@@ -477,6 +470,7 @@ struct qeth_out_q_stats {
u64 skbs_linearized_fail;
u64 tso_bytes;
u64 packing_mode_switch;
+ u64 stopped;
/* rtnl_link_stats64 */
u64 tx_packets;
@@ -490,14 +484,12 @@ struct qeth_qdio_out_q {
struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qdio_outbuf_state *bufstates; /* convenience pointer */
struct qeth_out_q_stats stats;
- int queue_no;
+ u8 next_buf_to_fill;
+ u8 max_elements;
+ u8 queue_no;
+ u8 do_pack;
struct qeth_card *card;
atomic_t state;
- int do_pack;
- /*
- * index of buffer to be filled by driver; state EMPTY or PACKING
- */
- int next_buf_to_fill;
/*
* number of buffers that are currently filled (PRIMED)
* -> these buffers are hardware-owned
@@ -507,6 +499,11 @@ struct qeth_qdio_out_q {
atomic_t set_pci_flags_count;
};
+static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue)
+{
+ return atomic_read(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q;
+}
+
struct qeth_qdio_info {
atomic_t state;
/* input */
@@ -538,7 +535,6 @@ struct qeth_qdio_info {
enum qeth_channel_states {
CH_STATE_UP,
CH_STATE_DOWN,
- CH_STATE_ACTIVATING,
CH_STATE_HALTED,
CH_STATE_STOPPED,
CH_STATE_RCD,
@@ -585,7 +581,10 @@ struct qeth_cmd_buffer {
enum qeth_cmd_buffer_state state;
struct qeth_channel *channel;
struct qeth_reply *reply;
+ long timeout;
unsigned char *data;
+ void (*finalize)(struct qeth_card *card, struct qeth_cmd_buffer *iob,
+ unsigned int length);
void (*callback)(struct qeth_card *card, struct qeth_channel *channel,
struct qeth_cmd_buffer *iob);
};
@@ -610,6 +609,11 @@ struct qeth_channel {
int io_buf_no;
};
+static inline bool qeth_trylock_channel(struct qeth_channel *channel)
+{
+ return atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0;
+}
+
/**
* OSA card related definitions
*/
@@ -631,17 +635,15 @@ struct qeth_seqno {
__u32 pdu_hdr;
__u32 pdu_hdr_ack;
__u16 ipa;
- __u32 pkt_seqno;
};
struct qeth_reply {
struct list_head list;
- wait_queue_head_t wait_q;
+ struct completion received;
int (*callback)(struct qeth_card *, struct qeth_reply *,
unsigned long);
u32 seqno;
unsigned long offset;
- atomic_t received;
int rc;
void *param;
refcount_t refcnt;
@@ -663,7 +665,7 @@ struct qeth_card_info {
__u16 func_level;
char mcl_level[QETH_MCL_LENGTH + 1];
u8 open_when_online:1;
- int guestlan;
+ u8 is_vm_nic:1;
int mac_bits;
enum qeth_card_types type;
enum qeth_link_types link_type;
@@ -774,18 +776,19 @@ struct qeth_card {
struct qeth_card_options options;
struct workqueue_struct *event_wq;
+ struct workqueue_struct *cmd_wq;
wait_queue_head_t wait_q;
- spinlock_t mclock;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
DECLARE_HASHTABLE(mac_htable, 4);
DECLARE_HASHTABLE(ip_htable, 4);
+ struct mutex ip_lock;
DECLARE_HASHTABLE(ip_mc_htable, 4);
+ struct work_struct rx_mode_work;
struct work_struct kernel_thread_starter;
spinlock_t thread_mask_lock;
unsigned long thread_start_mask;
unsigned long thread_allowed_mask;
unsigned long thread_running_mask;
- spinlock_t ip_lock;
struct qeth_ipato ipato;
struct list_head cmd_waiter_list;
/* QDIO buffer handling */
@@ -827,6 +830,15 @@ static inline bool qeth_netdev_is_registered(struct net_device *dev)
return dev->netdev_ops != NULL;
}
+static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq)
+{
+ if (txq == QETH_IQD_MCAST_TXQ)
+ return dev->num_tx_queues - 1;
+ if (txq == dev->num_tx_queues - 1)
+ return QETH_IQD_MCAST_TXQ;
+ return txq;
+}
+
static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
unsigned int elements)
{
@@ -869,6 +881,16 @@ static inline int qeth_get_ip_version(struct sk_buff *skb)
}
}
+static inline int qeth_get_ether_cast_type(struct sk_buff *skb)
+{
+ u8 *addr = eth_hdr(skb)->h_dest;
+
+ if (is_multicast_ether_addr(addr))
+ return is_broadcast_ether_addr(addr) ? RTN_BROADCAST :
+ RTN_MULTICAST;
+ return RTN_UNICAST;
+}
+
static inline void qeth_rx_csum(struct qeth_card *card, struct sk_buff *skb,
u8 flags)
{
@@ -922,18 +944,7 @@ static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card,
data, QETH_PROT_IPV6);
}
-int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
- int ipv);
-static inline struct qeth_qdio_out_q *qeth_get_tx_queue(struct qeth_card *card,
- struct sk_buff *skb,
- int ipv, int cast_type)
-{
- if (IS_IQD(card) && cast_type != RTN_UNICAST)
- return card->qdio.out_qs[card->qdio.no_out_queues - 1];
- if (!card->qdio.do_prio_queueing)
- return card->qdio.out_qs[card->qdio.default_out_queue];
- return card->qdio.out_qs[qeth_get_priority_queue(card, skb, ipv)];
-}
+int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb);
extern struct qeth_discipline qeth_l2_discipline;
extern struct qeth_discipline qeth_l3_discipline;
@@ -979,12 +990,10 @@ void qeth_clear_ipacmd_list(struct qeth_card *);
int qeth_qdio_clear_card(struct qeth_card *, int);
void qeth_clear_working_pool_list(struct qeth_card *);
void qeth_clear_cmd_buffers(struct qeth_channel *);
-void qeth_clear_qdio_buffers(struct qeth_card *);
+void qeth_drain_output_queues(struct qeth_card *card);
void qeth_setadp_promisc_mode(struct qeth_card *);
int qeth_setadpparms_change_macaddr(struct qeth_card *);
void qeth_tx_timeout(struct net_device *);
-void qeth_prepare_control_data(struct qeth_card *, int,
- struct qeth_cmd_buffer *);
void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *);
void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
u16 cmd_length);
@@ -1016,6 +1025,8 @@ netdev_features_t qeth_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
+u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
+ u8 cast_type, struct net_device *sb_dev);
int qeth_open(struct net_device *dev);
int qeth_stop(struct net_device *dev);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 44bd6f04c145..009f2c0ec504 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -61,13 +61,13 @@ static struct kmem_cache *qeth_qdio_outbuf_cache;
static struct device *qeth_core_root_dev;
static struct lock_class_key qdio_out_skb_queue_key;
-static void qeth_send_control_data_cb(struct qeth_card *card,
- struct qeth_channel *channel,
- struct qeth_cmd_buffer *iob);
+static void qeth_issue_next_read_cb(struct qeth_card *card,
+ struct qeth_channel *channel,
+ struct qeth_cmd_buffer *iob);
static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *);
static void qeth_free_buffer_pool(struct qeth_card *);
static int qeth_qdio_establish(struct qeth_card *);
-static void qeth_free_qdio_buffers(struct qeth_card *);
+static void qeth_free_qdio_queues(struct qeth_card *card);
static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf,
enum iucv_tx_notify notification);
@@ -85,7 +85,7 @@ static void qeth_close_dev_handler(struct work_struct *work)
static const char *qeth_get_cardname(struct qeth_card *card)
{
- if (card->info.guestlan) {
+ if (IS_VM_NIC(card)) {
switch (card->info.type) {
case QETH_CARD_TYPE_OSD:
return " Virtual NIC QDIO";
@@ -120,7 +120,7 @@ static const char *qeth_get_cardname(struct qeth_card *card)
/* max length to be returned: 14 */
const char *qeth_get_cardname_short(struct qeth_card *card)
{
- if (card->info.guestlan) {
+ if (IS_VM_NIC(card)) {
switch (card->info.type) {
case QETH_CARD_TYPE_OSD:
return "Virt.NIC QDIO";
@@ -511,7 +511,9 @@ static int __qeth_issue_next_read(struct qeth_card *card)
CARD_DEVID(card));
return -ENOMEM;
}
+
qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
+ iob->callback = qeth_issue_next_read_cb;
QETH_CARD_TEXT(card, 6, "noirqpnd");
rc = ccw_device_start(channel->ccwdev, channel->ccw,
(addr_t) iob, 0, 0);
@@ -542,11 +544,10 @@ static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
{
struct qeth_reply *reply;
- reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC);
+ reply = kzalloc(sizeof(*reply), GFP_KERNEL);
if (reply) {
refcount_set(&reply->refcnt, 1);
- atomic_set(&reply->received, 0);
- init_waitqueue_head(&reply->wait_q);
+ init_completion(&reply->received);
}
return reply;
}
@@ -576,10 +577,10 @@ static void qeth_dequeue_reply(struct qeth_card *card, struct qeth_reply *reply)
spin_unlock_irq(&card->lock);
}
-static void qeth_notify_reply(struct qeth_reply *reply)
+static void qeth_notify_reply(struct qeth_reply *reply, int reason)
{
- atomic_inc(&reply->received);
- wake_up(&reply->wait_q);
+ reply->rc = reason;
+ complete(&reply->received);
}
static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
@@ -664,10 +665,8 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
QETH_CARD_TEXT(card, 4, "clipalst");
spin_lock_irqsave(&card->lock, flags);
- list_for_each_entry(reply, &card->cmd_waiter_list, list) {
- reply->rc = -EIO;
- qeth_notify_reply(reply);
- }
+ list_for_each_entry(reply, &card->cmd_waiter_list, list)
+ qeth_notify_reply(reply, -EIO);
spin_unlock_irqrestore(&card->lock, flags);
}
EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
@@ -675,9 +674,6 @@ EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
static int qeth_check_idx_response(struct qeth_card *card,
unsigned char *buffer)
{
- if (!buffer)
- return 0;
-
QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
if ((buffer[2] & 0xc0) == 0xc0) {
QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
@@ -704,6 +700,7 @@ static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel)
do {
if (channel->iob[index].state == BUF_STATE_FREE) {
channel->iob[index].state = BUF_STATE_LOCKED;
+ channel->iob[index].timeout = QETH_TIMEOUT;
channel->io_buf_no = (channel->io_buf_no + 1) %
QETH_CMD_BUFFER_NO;
memset(channel->iob[index].data, 0, QETH_BUFSIZE);
@@ -722,7 +719,7 @@ void qeth_release_buffer(struct qeth_channel *channel,
spin_lock_irqsave(&channel->iob_lock, flags);
iob->state = BUF_STATE_FREE;
- iob->callback = qeth_send_control_data_cb;
+ iob->callback = NULL;
if (iob->reply) {
qeth_put_reply(iob->reply);
iob->reply = NULL;
@@ -743,10 +740,8 @@ static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
{
struct qeth_reply *reply = iob->reply;
- if (reply) {
- reply->rc = rc;
- qeth_notify_reply(reply);
- }
+ if (reply)
+ qeth_notify_reply(reply, rc);
qeth_release_buffer(iob->channel, iob);
}
@@ -780,9 +775,9 @@ void qeth_clear_cmd_buffers(struct qeth_channel *channel)
}
EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers);
-static void qeth_send_control_data_cb(struct qeth_card *card,
- struct qeth_channel *channel,
- struct qeth_cmd_buffer *iob)
+static void qeth_issue_next_read_cb(struct qeth_card *card,
+ struct qeth_channel *channel,
+ struct qeth_cmd_buffer *iob)
{
struct qeth_ipa_cmd *cmd = NULL;
struct qeth_reply *reply = NULL;
@@ -846,11 +841,8 @@ static void qeth_send_control_data_cb(struct qeth_card *card,
}
}
- if (rc <= 0) {
- reply->rc = rc;
- qeth_notify_reply(reply);
- }
-
+ if (rc <= 0)
+ qeth_notify_reply(reply, rc);
qeth_put_reply(reply);
out:
@@ -1173,20 +1165,19 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
qeth_release_skbs(buf);
- for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
+ for (i = 0; i < queue->max_elements; ++i) {
if (buf->buffer->element[i].addr && buf->is_header[i])
kmem_cache_free(qeth_core_header_cache,
buf->buffer->element[i].addr);
buf->is_header[i] = 0;
}
- qeth_scrub_qdio_buffer(buf->buffer,
- QETH_MAX_BUFFER_ELEMENTS(queue->card));
+ qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
buf->next_element_to_fill = 0;
atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
}
-static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
+static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
{
int j;
@@ -1202,19 +1193,18 @@ static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
}
}
-void qeth_clear_qdio_buffers(struct qeth_card *card)
+void qeth_drain_output_queues(struct qeth_card *card)
{
int i;
QETH_CARD_TEXT(card, 2, "clearqdbf");
/* clear outbound buffers to free skbs */
for (i = 0; i < card->qdio.no_out_queues; ++i) {
- if (card->qdio.out_qs[i]) {
- qeth_clear_outq_buffers(card->qdio.out_qs[i], 0);
- }
+ if (card->qdio.out_qs[i])
+ qeth_drain_output_queue(card->qdio.out_qs[i], false);
}
}
-EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers);
+EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
static void qeth_free_buffer_pool(struct qeth_card *card)
{
@@ -1273,7 +1263,6 @@ static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
break;
channel->iob[cnt].state = BUF_STATE_FREE;
channel->iob[cnt].channel = channel;
- channel->iob[cnt].callback = qeth_send_control_data_cb;
}
if (cnt < QETH_CMD_BUFFER_NO) {
qeth_clean_channel(channel);
@@ -1285,30 +1274,28 @@ static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
return 0;
}
-static void qeth_set_single_write_queues(struct qeth_card *card)
+static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
{
- if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
- (card->qdio.no_out_queues == 4))
- qeth_free_qdio_buffers(card);
+ unsigned int count = single ? 1 : card->dev->num_tx_queues;
- card->qdio.no_out_queues = 1;
- if (card->qdio.default_out_queue != 0)
- dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
+ rtnl_lock();
+ netif_set_real_num_tx_queues(card->dev, count);
+ rtnl_unlock();
- card->qdio.default_out_queue = 0;
-}
+ if (card->qdio.no_out_queues == count)
+ return;
-static void qeth_set_multiple_write_queues(struct qeth_card *card)
-{
- if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) &&
- (card->qdio.no_out_queues == 1)) {
- qeth_free_qdio_buffers(card);
- card->qdio.default_out_queue = 2;
- }
- card->qdio.no_out_queues = 4;
+ if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
+ qeth_free_qdio_queues(card);
+
+ if (count == 1)
+ dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
+
+ card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE;
+ card->qdio.no_out_queues = count;
}
-static void qeth_update_from_chp_desc(struct qeth_card *card)
+static int qeth_update_from_chp_desc(struct qeth_card *card)
{
struct ccw_device *ccwdev;
struct channel_path_desc_fmt0 *chp_dsc;
@@ -1318,21 +1305,18 @@ static void qeth_update_from_chp_desc(struct qeth_card *card)
ccwdev = card->data.ccwdev;
chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
if (!chp_dsc)
- goto out;
+ return -ENOMEM;
card->info.func_level = 0x4100 + chp_dsc->desc;
- if (card->info.type == QETH_CARD_TYPE_IQD)
- goto out;
- /* CHPP field bit 6 == 1 -> single queue */
- if ((chp_dsc->chpp & 0x02) == 0x02)
- qeth_set_single_write_queues(card);
- else
- qeth_set_multiple_write_queues(card);
-out:
+ if (IS_OSD(card) || IS_OSX(card))
+ /* CHPP field bit 6 == 1 -> single queue */
+ qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
+
kfree(chp_dsc);
QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
+ return 0;
}
static void qeth_init_qdio_info(struct qeth_card *card)
@@ -1341,12 +1325,11 @@ static void qeth_init_qdio_info(struct qeth_card *card)
atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
- card->qdio.no_out_queues = QETH_MAX_QUEUES;
/* inbound */
card->qdio.no_in_queues = 1;
card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
- if (card->info.type == QETH_CARD_TYPE_IQD)
+ if (IS_IQD(card))
card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
else
card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
@@ -1409,9 +1392,7 @@ static void qeth_setup_card(struct qeth_card *card)
card->info.type = CARD_RDEV(card)->id.driver_info;
card->state = CARD_STATE_DOWN;
- spin_lock_init(&card->mclock);
spin_lock_init(&card->lock);
- spin_lock_init(&card->ip_lock);
spin_lock_init(&card->thread_mask_lock);
mutex_init(&card->conf_mutex);
mutex_init(&card->discipline_mutex);
@@ -1451,7 +1432,8 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
CARD_WDEV(card) = gdev->cdev[1];
CARD_DDEV(card) = gdev->cdev[2];
- card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev));
+ card->event_wq = alloc_ordered_workqueue("%s_event", 0,
+ dev_name(&gdev->dev));
if (!card->event_wq)
goto out_wq;
if (qeth_setup_channel(&card->read, true))
@@ -1571,7 +1553,7 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
QETH_QDIO_CLEANING)) {
case QETH_QDIO_ESTABLISHED:
- if (card->info.type == QETH_CARD_TYPE_IQD)
+ if (IS_IQD(card))
rc = qdio_shutdown(CARD_DDEV(card),
QDIO_FLAG_CLEANUP_USING_HALT);
else
@@ -1644,8 +1626,8 @@ static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
card->info.chpid = prcd[30];
card->info.unit_addr2 = prcd[31];
card->info.cula = prcd[63];
- card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
- (prcd[0x11] == _ascebc['M']));
+ card->info.is_vm_nic = ((prcd[0x10] == _ascebc['V']) &&
+ (prcd[0x11] == _ascebc['M']));
}
static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
@@ -1709,13 +1691,11 @@ static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
{
enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
- if (card->info.type == QETH_CARD_TYPE_OSM ||
- card->info.type == QETH_CARD_TYPE_OSN)
+ if (IS_OSM(card) || IS_OSN(card))
disc = QETH_DISCIPLINE_LAYER2;
- else if (card->info.guestlan)
- disc = (card->info.type == QETH_CARD_TYPE_IQD) ?
- QETH_DISCIPLINE_LAYER3 :
- qeth_vm_detect_layer(card);
+ else if (IS_VM_NIC(card))
+ disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
+ qeth_vm_detect_layer(card);
switch (disc) {
case QETH_DISCIPLINE_LAYER2:
@@ -1771,121 +1751,16 @@ static void qeth_init_func_level(struct qeth_card *card)
}
}
-static int qeth_idx_activate_get_answer(struct qeth_card *card,
- struct qeth_channel *channel,
- void (*reply_cb)(struct qeth_card *,
- struct qeth_channel *,
- struct qeth_cmd_buffer *))
-{
- struct qeth_cmd_buffer *iob;
- int rc;
-
- QETH_DBF_TEXT(SETUP, 2, "idxanswr");
- iob = qeth_get_buffer(channel);
- if (!iob)
- return -ENOMEM;
- iob->callback = reply_cb;
- qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
-
- wait_event(card->wait_q,
- atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
- QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
- spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
- rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
- (addr_t) iob, 0, 0, QETH_TIMEOUT);
- spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
-
- if (rc) {
- QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
- atomic_set(&channel->irq_pending, 0);
- qeth_release_buffer(channel, iob);
- wake_up(&card->wait_q);
- return rc;
- }
- rc = wait_event_interruptible_timeout(card->wait_q,
- channel->state == CH_STATE_UP, QETH_TIMEOUT);
- if (rc == -ERESTARTSYS)
- return rc;
- if (channel->state != CH_STATE_UP) {
- rc = -ETIME;
- QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
- } else
- rc = 0;
- return rc;
-}
-
-static int qeth_idx_activate_channel(struct qeth_card *card,
- struct qeth_channel *channel,
- void (*reply_cb)(struct qeth_card *,
- struct qeth_channel *,
- struct qeth_cmd_buffer *))
+static void qeth_idx_finalize_cmd(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob,
+ unsigned int length)
{
- struct qeth_cmd_buffer *iob;
- __u16 temp;
- __u8 tmp;
- int rc;
- struct ccw_dev_id temp_devid;
-
- QETH_DBF_TEXT(SETUP, 2, "idxactch");
+ qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, length, iob->data);
- iob = qeth_get_buffer(channel);
- if (!iob)
- return -ENOMEM;
- iob->callback = reply_cb;
- qeth_setup_ccw(channel->ccw, CCW_CMD_WRITE, IDX_ACTIVATE_SIZE,
- iob->data);
- if (channel == &card->write) {
- memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
- memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
- &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
+ memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
+ QETH_SEQ_NO_LENGTH);
+ if (iob->channel == &card->write)
card->seqno.trans_hdr++;
- } else {
- memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
- memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
- &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
- }
- tmp = ((u8)card->dev->dev_port) | 0x80;
- memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1);
- memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
- &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
- memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
- &card->info.func_level, sizeof(__u16));
- ccw_device_get_id(CARD_DDEV(card), &temp_devid);
- memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp_devid.devno, 2);
- temp = (card->info.cula << 8) + card->info.unit_addr2;
- memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2);
-
- wait_event(card->wait_q,
- atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
- QETH_DBF_TEXT(SETUP, 6, "noirqpnd");
- spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
- rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
- (addr_t) iob, 0, 0, QETH_TIMEOUT);
- spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
-
- if (rc) {
- QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n",
- rc);
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
- atomic_set(&channel->irq_pending, 0);
- qeth_release_buffer(channel, iob);
- wake_up(&card->wait_q);
- return rc;
- }
- rc = wait_event_interruptible_timeout(card->wait_q,
- channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT);
- if (rc == -ERESTARTSYS)
- return rc;
- if (channel->state != CH_STATE_ACTIVATING) {
- dev_warn(&channel->ccwdev->dev, "The qeth device driver"
- " failed to recover an error on the device\n");
- QETH_DBF_MESSAGE(2, "IDX activate timed out on channel %x\n",
- CCW_DEVID(channel->ccwdev));
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
- return -ETIME;
- }
- return qeth_idx_activate_get_answer(card, channel, reply_cb);
}
static int qeth_peer_func_level(int level)
@@ -1897,112 +1772,21 @@ static int qeth_peer_func_level(int level)
return level;
}
-static void qeth_idx_write_cb(struct qeth_card *card,
- struct qeth_channel *channel,
- struct qeth_cmd_buffer *iob)
-{
- __u16 temp;
-
- QETH_DBF_TEXT(SETUP , 2, "idxwrcb");
-
- if (channel->state == CH_STATE_DOWN) {
- channel->state = CH_STATE_ACTIVATING;
- goto out;
- }
-
- if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
- if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL)
- dev_err(&channel->ccwdev->dev,
- "The adapter is used exclusively by another "
- "host\n");
- else
- QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
- CCW_DEVID(channel->ccwdev));
- goto out;
- }
- memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
- if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
- QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
- CCW_DEVID(channel->ccwdev),
- card->info.func_level, temp);
- goto out;
- }
- channel->state = CH_STATE_UP;
-out:
- qeth_release_buffer(channel, iob);
-}
-
-static void qeth_idx_read_cb(struct qeth_card *card,
- struct qeth_channel *channel,
- struct qeth_cmd_buffer *iob)
-{
- __u16 temp;
-
- QETH_DBF_TEXT(SETUP , 2, "idxrdcb");
- if (channel->state == CH_STATE_DOWN) {
- channel->state = CH_STATE_ACTIVATING;
- goto out;
- }
-
- if (qeth_check_idx_response(card, iob->data))
- goto out;
-
- if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) {
- switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
- case QETH_IDX_ACT_ERR_EXCL:
- dev_err(&channel->ccwdev->dev,
- "The adapter is used exclusively by another "
- "host\n");
- break;
- case QETH_IDX_ACT_ERR_AUTH:
- case QETH_IDX_ACT_ERR_AUTH_USER:
- dev_err(&channel->ccwdev->dev,
- "Setting the device online failed because of "
- "insufficient authorization\n");
- break;
- default:
- QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
- CCW_DEVID(channel->ccwdev));
- }
- QETH_CARD_TEXT_(card, 2, "idxread%c",
- QETH_IDX_ACT_CAUSE_CODE(iob->data));
- goto out;
- }
-
- memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
- if (temp != qeth_peer_func_level(card->info.func_level)) {
- QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
- CCW_DEVID(channel->ccwdev),
- card->info.func_level, temp);
- goto out;
- }
- memcpy(&card->token.issuer_rm_r,
- QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
- QETH_MPC_TOKEN_LENGTH);
- memcpy(&card->info.mcl_level[0],
- QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
- channel->state = CH_STATE_UP;
-out:
- qeth_release_buffer(channel, iob);
-}
-
-void qeth_prepare_control_data(struct qeth_card *card, int len,
- struct qeth_cmd_buffer *iob)
+static void qeth_mpc_finalize_cmd(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob,
+ unsigned int length)
{
- qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, len, iob->data);
- iob->callback = qeth_release_buffer_cb;
+ qeth_idx_finalize_cmd(card, iob, length);
- memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data),
- &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH);
- card->seqno.trans_hdr++;
memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
&card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
card->seqno.pdu_hdr++;
memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
&card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
- QETH_DBF_HEX(CTRL, 2, iob->data, min(len, QETH_DBF_CTRL_LEN));
+
+ iob->reply->seqno = QETH_IDX_COMMAND_SEQNO;
+ iob->callback = qeth_release_buffer_cb;
}
-EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
/**
* qeth_send_control_data() - send control command to the card
@@ -2035,17 +1819,12 @@ static int qeth_send_control_data(struct qeth_card *card, int len,
void *reply_param)
{
struct qeth_channel *channel = iob->channel;
+ long timeout = iob->timeout;
int rc;
struct qeth_reply *reply = NULL;
- unsigned long timeout, event_timeout;
- struct qeth_ipa_cmd *cmd = NULL;
QETH_CARD_TEXT(card, 2, "sendctl");
- if (card->read_or_write_problem) {
- qeth_release_buffer(channel, iob);
- return -EIO;
- }
reply = qeth_alloc_reply(card);
if (!reply) {
qeth_release_buffer(channel, iob);
@@ -2058,27 +1837,24 @@ static int qeth_send_control_data(struct qeth_card *card, int len,
qeth_get_reply(reply);
iob->reply = reply;
- while (atomic_cmpxchg(&channel->irq_pending, 0, 1)) ;
-
- if (IS_IPA(iob->data)) {
- cmd = __ipa_cmd(iob);
- cmd->hdr.seqno = card->seqno.ipa++;
- reply->seqno = cmd->hdr.seqno;
- event_timeout = QETH_IPA_TIMEOUT;
- } else {
- reply->seqno = QETH_IDX_COMMAND_SEQNO;
- event_timeout = QETH_TIMEOUT;
+ timeout = wait_event_interruptible_timeout(card->wait_q,
+ qeth_trylock_channel(channel),
+ timeout);
+ if (timeout <= 0) {
+ qeth_put_reply(reply);
+ qeth_release_buffer(channel, iob);
+ return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
}
- qeth_prepare_control_data(card, len, iob);
- qeth_enqueue_reply(card, reply);
+ iob->finalize(card, iob, len);
+ QETH_DBF_HEX(CTRL, 2, iob->data, min(len, QETH_DBF_CTRL_LEN));
- timeout = jiffies + event_timeout;
+ qeth_enqueue_reply(card, reply);
QETH_CARD_TEXT(card, 6, "noirqpnd");
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
- (addr_t) iob, 0, 0, event_timeout);
+ (addr_t) iob, 0, 0, timeout);
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc) {
QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
@@ -2092,30 +1868,211 @@ static int qeth_send_control_data(struct qeth_card *card, int len,
return rc;
}
- /* we have only one long running ipassist, since we can ensure
- process context of this command we can sleep */
- if (cmd && cmd->hdr.command == IPA_CMD_SETIP &&
- cmd->hdr.prot_version == QETH_PROT_IPV4) {
- if (!wait_event_timeout(reply->wait_q,
- atomic_read(&reply->received), event_timeout))
- goto time_err;
- } else {
- while (!atomic_read(&reply->received)) {
- if (time_after(jiffies, timeout))
- goto time_err;
- cpu_relax();
- }
- }
+ timeout = wait_for_completion_interruptible_timeout(&reply->received,
+ timeout);
+ if (timeout <= 0)
+ rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
qeth_dequeue_reply(card, reply);
- rc = reply->rc;
+ if (!rc)
+ rc = reply->rc;
qeth_put_reply(reply);
return rc;
+}
-time_err:
- qeth_dequeue_reply(card, reply);
- qeth_put_reply(reply);
- return -ETIME;
+static int qeth_idx_check_activate_response(struct qeth_card *card,
+ struct qeth_channel *channel,
+ struct qeth_cmd_buffer *iob)
+{
+ int rc;
+
+ rc = qeth_check_idx_response(card, iob->data);
+ if (rc)
+ return rc;
+
+ if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
+ return 0;
+
+ /* negative reply: */
+ QETH_DBF_TEXT_(SETUP, 2, "idxneg%c",
+ QETH_IDX_ACT_CAUSE_CODE(iob->data));
+
+ switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
+ case QETH_IDX_ACT_ERR_EXCL:
+ dev_err(&channel->ccwdev->dev,
+ "The adapter is used exclusively by another host\n");
+ return -EBUSY;
+ case QETH_IDX_ACT_ERR_AUTH:
+ case QETH_IDX_ACT_ERR_AUTH_USER:
+ dev_err(&channel->ccwdev->dev,
+ "Setting the device online failed because of insufficient authorization\n");
+ return -EPERM;
+ default:
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
+ CCW_DEVID(channel->ccwdev));
+ return -EIO;
+ }
+}
+
+static void qeth_idx_query_read_cb(struct qeth_card *card,
+ struct qeth_channel *channel,
+ struct qeth_cmd_buffer *iob)
+{
+ u16 peer_level;
+ int rc;
+
+ QETH_DBF_TEXT(SETUP, 2, "idxrdcb");
+
+ rc = qeth_idx_check_activate_response(card, channel, iob);
+ if (rc)
+ goto out;
+
+ memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
+ if (peer_level != qeth_peer_func_level(card->info.func_level)) {
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
+ CCW_DEVID(channel->ccwdev),
+ card->info.func_level, peer_level);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&card->token.issuer_rm_r,
+ QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
+ QETH_MPC_TOKEN_LENGTH);
+ memcpy(&card->info.mcl_level[0],
+ QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
+
+out:
+ qeth_notify_reply(iob->reply, rc);
+ qeth_release_buffer(channel, iob);
+}
+
+static void qeth_idx_query_write_cb(struct qeth_card *card,
+ struct qeth_channel *channel,
+ struct qeth_cmd_buffer *iob)
+{
+ u16 peer_level;
+ int rc;
+
+ QETH_DBF_TEXT(SETUP, 2, "idxwrcb");
+
+ rc = qeth_idx_check_activate_response(card, channel, iob);
+ if (rc)
+ goto out;
+
+ memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
+ if ((peer_level & ~0x0100) !=
+ qeth_peer_func_level(card->info.func_level)) {
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
+ CCW_DEVID(channel->ccwdev),
+ card->info.func_level, peer_level);
+ rc = -EINVAL;
+ }
+
+out:
+ qeth_notify_reply(iob->reply, rc);
+ qeth_release_buffer(channel, iob);
+}
+
+static void qeth_idx_finalize_query_cmd(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob,
+ unsigned int length)
+{
+ qeth_setup_ccw(iob->channel->ccw, CCW_CMD_READ, length, iob->data);
+}
+
+static void qeth_idx_activate_cb(struct qeth_card *card,
+ struct qeth_channel *channel,
+ struct qeth_cmd_buffer *iob)
+{
+ qeth_notify_reply(iob->reply, 0);
+ qeth_release_buffer(channel, iob);
+}
+
+static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob)
+{
+ u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
+ u8 port = ((u8)card->dev->dev_port) | 0x80;
+ struct ccw_dev_id dev_id;
+
+ ccw_device_get_id(CARD_DDEV(card), &dev_id);
+ iob->finalize = qeth_idx_finalize_cmd;
+ iob->callback = qeth_idx_activate_cb;
+
+ memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
+ memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
+ &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
+ memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
+ &card->info.func_level, 2);
+ memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &dev_id.devno, 2);
+ memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
+}
+
+static int qeth_idx_activate_read_channel(struct qeth_card *card)
+{
+ struct qeth_channel *channel = &card->read;
+ struct qeth_cmd_buffer *iob;
+ int rc;
+
+ QETH_DBF_TEXT(SETUP, 2, "idxread");
+
+ iob = qeth_get_buffer(channel);
+ if (!iob)
+ return -ENOMEM;
+
+ memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
+ qeth_idx_setup_activate_cmd(card, iob);
+
+ rc = qeth_send_control_data(card, IDX_ACTIVATE_SIZE, iob, NULL, NULL);
+ if (rc)
+ return rc;
+
+ iob = qeth_get_buffer(channel);
+ if (!iob)
+ return -ENOMEM;
+
+ iob->finalize = qeth_idx_finalize_query_cmd;
+ iob->callback = qeth_idx_query_read_cb;
+ rc = qeth_send_control_data(card, QETH_BUFSIZE, iob, NULL, NULL);
+ if (rc)
+ return rc;
+
+ channel->state = CH_STATE_UP;
+ return 0;
+}
+
+static int qeth_idx_activate_write_channel(struct qeth_card *card)
+{
+ struct qeth_channel *channel = &card->write;
+ struct qeth_cmd_buffer *iob;
+ int rc;
+
+ QETH_DBF_TEXT(SETUP, 2, "idxwrite");
+
+ iob = qeth_get_buffer(channel);
+ if (!iob)
+ return -ENOMEM;
+
+ memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
+ qeth_idx_setup_activate_cmd(card, iob);
+
+ rc = qeth_send_control_data(card, IDX_ACTIVATE_SIZE, iob, NULL, NULL);
+ if (rc)
+ return rc;
+
+ iob = qeth_get_buffer(channel);
+ if (!iob)
+ return -ENOMEM;
+
+ iob->finalize = qeth_idx_finalize_query_cmd;
+ iob->callback = qeth_idx_query_write_cb;
+ rc = qeth_send_control_data(card, QETH_BUFSIZE, iob, NULL, NULL);
+ if (rc)
+ return rc;
+
+ channel->state = CH_STATE_UP;
+ return 0;
}
static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
@@ -2140,7 +2097,9 @@ static int qeth_cm_enable(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "cmenable");
iob = qeth_wait_for_buffer(&card->write);
+ iob->finalize = qeth_mpc_finalize_cmd;
memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE);
+
memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
&card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
@@ -2173,7 +2132,9 @@ static int qeth_cm_setup(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "cmsetup");
iob = qeth_wait_for_buffer(&card->write);
+ iob->finalize = qeth_mpc_finalize_cmd;
memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE);
+
memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
&card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
@@ -2206,7 +2167,7 @@ static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
/* adjust RX buffer size to new max MTU: */
card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
if (dev->max_mtu && dev->max_mtu != max_mtu)
- qeth_free_qdio_buffers(card);
+ qeth_free_qdio_queues(card);
} else {
if (dev->mtu)
new_mtu = dev->mtu;
@@ -2253,7 +2214,7 @@ static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
memcpy(&card->token.ulp_filter_r,
QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
QETH_MPC_TOKEN_LENGTH);
- if (card->info.type == QETH_CARD_TYPE_IQD) {
+ if (IS_IQD(card)) {
memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
mtu = qeth_get_mtu_outof_framesize(framesize);
} else {
@@ -2290,6 +2251,7 @@ static int qeth_ulp_enable(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "ulpenabl");
iob = qeth_wait_for_buffer(&card->write);
+ iob->finalize = qeth_mpc_finalize_cmd;
memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE);
*(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
@@ -2336,6 +2298,7 @@ static int qeth_ulp_setup(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "ulpsetup");
iob = qeth_wait_for_buffer(&card->write);
+ iob->finalize = qeth_mpc_finalize_cmd;
memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE);
memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
@@ -2377,12 +2340,12 @@ static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
if (!q)
return;
- qeth_clear_outq_buffers(q, 1);
+ qeth_drain_output_queue(q, true);
qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
kfree(q);
}
-static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void)
+static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
{
struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
@@ -2396,7 +2359,7 @@ static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void)
return q;
}
-static int qeth_alloc_qdio_buffers(struct qeth_card *card)
+static int qeth_alloc_qdio_queues(struct qeth_card *card)
{
int i, j;
@@ -2417,11 +2380,12 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card)
/* outbound */
for (i = 0; i < card->qdio.no_out_queues; ++i) {
- card->qdio.out_qs[i] = qeth_alloc_qdio_out_buf();
+ card->qdio.out_qs[i] = qeth_alloc_output_queue();
if (!card->qdio.out_qs[i])
goto out_freeoutq;
QETH_DBF_TEXT_(SETUP, 2, "outq %i", i);
QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *));
+ card->qdio.out_qs[i]->card = card;
card->qdio.out_qs[i]->queue_no = i;
/* give outbound qeth_qdio_buffers their qdio_buffers */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
@@ -2458,7 +2422,7 @@ out_nomem:
return -ENOMEM;
}
-static void qeth_free_qdio_buffers(struct qeth_card *card)
+static void qeth_free_qdio_queues(struct qeth_card *card)
{
int i, j;
@@ -2523,6 +2487,7 @@ static int qeth_dm_act(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "dmact");
iob = qeth_wait_for_buffer(&card->write);
+ iob->finalize = qeth_mpc_finalize_cmd;
memcpy(iob->data, DM_ACT, DM_ACT_SIZE);
memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
@@ -2564,7 +2529,7 @@ static int qeth_mpc_initialize(struct qeth_card *card)
QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
goto out_qdio;
}
- rc = qeth_alloc_qdio_buffers(card);
+ rc = qeth_alloc_qdio_queues(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
goto out_qdio;
@@ -2572,7 +2537,7 @@ static int qeth_mpc_initialize(struct qeth_card *card)
rc = qeth_qdio_establish(card);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
- qeth_free_qdio_buffers(card);
+ qeth_free_qdio_queues(card);
goto out_qdio;
}
rc = qeth_qdio_activate(card);
@@ -2588,7 +2553,7 @@ static int qeth_mpc_initialize(struct qeth_card *card)
return 0;
out_qdio:
- qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
+ qeth_qdio_clear_card(card, !IS_IQD(card));
qdio_free(CARD_DDEV(card));
return rc;
}
@@ -2611,8 +2576,7 @@ void qeth_print_status_message(struct qeth_card *card)
}
/* fallthrough */
case QETH_CARD_TYPE_IQD:
- if ((card->info.guestlan) ||
- (card->info.mcl_level[0] & 0x80)) {
+ if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
card->info.mcl_level[0] = (char) _ebcasc[(__u8)
card->info.mcl_level[0]];
card->info.mcl_level[1] = (char) _ebcasc[(__u8)
@@ -2733,7 +2697,7 @@ static int qeth_init_input_buffer(struct qeth_card *card,
int qeth_init_qdio_queues(struct qeth_card *card)
{
- int i, j;
+ unsigned int i;
int rc;
QETH_DBF_TEXT(SETUP, 2, "initqdqs");
@@ -2762,19 +2726,15 @@ int qeth_init_qdio_queues(struct qeth_card *card)
/* outbound queue */
for (i = 0; i < card->qdio.no_out_queues; ++i) {
- qdio_reset_buffers(card->qdio.out_qs[i]->qdio_bufs,
- QDIO_MAX_BUFFERS_PER_Q);
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
- qeth_clear_output_buffer(card->qdio.out_qs[i],
- card->qdio.out_qs[i]->bufs[j]);
- }
- card->qdio.out_qs[i]->card = card;
- card->qdio.out_qs[i]->next_buf_to_fill = 0;
- card->qdio.out_qs[i]->do_pack = 0;
- atomic_set(&card->qdio.out_qs[i]->used_buffers, 0);
- atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0);
- atomic_set(&card->qdio.out_qs[i]->state,
- QETH_OUT_Q_UNLOCKED);
+ struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
+
+ qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
+ queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
+ queue->next_buf_to_fill = 0;
+ queue->do_pack = 0;
+ atomic_set(&queue->used_buffers, 0);
+ atomic_set(&queue->set_pci_flags_count, 0);
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
}
return 0;
}
@@ -2805,12 +2765,26 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
cmd->hdr.prot_version = prot;
}
+static void qeth_ipa_finalize_cmd(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob,
+ unsigned int length)
+{
+ qeth_mpc_finalize_cmd(card, iob, length);
+
+ /* override with IPA-specific values: */
+ __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa;
+ iob->reply->seqno = card->seqno.ipa++;
+}
+
void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
u16 cmd_length)
{
u16 total_length = IPA_PDU_HEADER_SIZE + cmd_length;
u8 prot_type = qeth_mpc_select_prot_type(card);
+ iob->finalize = qeth_ipa_finalize_cmd;
+ iob->timeout = QETH_IPA_TIMEOUT;
+
memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
@@ -2866,6 +2840,11 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
QETH_CARD_TEXT(card, 4, "sendipa");
+ if (card->read_or_write_problem) {
+ qeth_release_buffer(iob->channel, iob);
+ return -EIO;
+ }
+
if (reply_cb == NULL)
reply_cb = qeth_send_ipa_cmd_cb;
memcpy(&length, QETH_IPA_PDU_LEN_TOTAL(iob->data), 2);
@@ -3251,7 +3230,7 @@ static void qeth_handle_send_error(struct qeth_card *card,
int sbalf15 = buffer->buffer->element[15].sflags;
QETH_CARD_TEXT(card, 6, "hdsnderr");
- if (card->info.type == QETH_CARD_TYPE_IQD) {
+ if (IS_IQD(card)) {
if (sbalf15 == 0) {
qdio_err = 0;
} else {
@@ -3348,7 +3327,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
if (queue->bufstates)
queue->bufstates[bidx].user = buf;
- if (queue->card->info.type == QETH_CARD_TYPE_IQD)
+ if (IS_IQD(queue->card))
continue;
if (!queue->do_pack) {
@@ -3378,11 +3357,9 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
}
QETH_TXQ_STAT_ADD(queue, bufs, count);
- netif_trans_update(queue->card->dev);
qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
if (atomic_read(&queue->set_pci_flags_count))
qdio_flags |= QDIO_FLAG_PCI_OUT;
- atomic_add(count, &queue->used_buffers);
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
queue->queue_no, index, count);
if (rc) {
@@ -3422,7 +3399,6 @@ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
* do_send_packet. So, we check if there is a
* packing buffer to be flushed here.
*/
- netif_stop_queue(queue->card->dev);
index = queue->next_buf_to_fill;
q_was_packing = queue->do_pack;
/* queue->do_pack may change */
@@ -3467,7 +3443,7 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
goto out;
}
- qeth_free_qdio_buffers(card);
+ qeth_free_qdio_queues(card);
card->options.cq = cq;
rc = 0;
}
@@ -3493,7 +3469,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
if (qdio_err) {
- netif_stop_queue(card->dev);
+ netif_tx_stop_all_queues(card->dev);
qeth_schedule_recovery(card);
return;
}
@@ -3549,12 +3525,14 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
struct qeth_card *card = (struct qeth_card *) card_ptr;
struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
struct qeth_qdio_out_buffer *buffer;
+ struct net_device *dev = card->dev;
+ struct netdev_queue *txq;
int i;
QETH_CARD_TEXT(card, 6, "qdouhdl");
if (qdio_error & QDIO_ERROR_FATAL) {
QETH_CARD_TEXT(card, 2, "achkcond");
- netif_stop_queue(card->dev);
+ netif_tx_stop_all_queues(dev);
qeth_schedule_recovery(card);
return;
}
@@ -3580,7 +3558,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
/* prepare the queue slot for re-use: */
qeth_scrub_qdio_buffer(buffer->buffer,
- QETH_MAX_BUFFER_ELEMENTS(card));
+ queue->max_elements);
if (qeth_init_qdio_out_buf(queue, bidx)) {
QETH_CARD_TEXT(card, 2, "outofbuf");
qeth_schedule_recovery(card);
@@ -3600,33 +3578,32 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
}
atomic_sub(count, &queue->used_buffers);
/* check if we need to do something on this outbound queue */
- if (card->info.type != QETH_CARD_TYPE_IQD)
+ if (!IS_IQD(card))
qeth_check_outbound_queue(queue);
- netif_wake_queue(queue->card->dev);
-}
-
-/* We cannot use outbound queue 3 for unicast packets on HiperSockets */
-static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num)
-{
- if ((card->info.type == QETH_CARD_TYPE_IQD) && (queue_num == 3))
- return 2;
- return queue_num;
+ if (IS_IQD(card))
+ __queue = qeth_iqd_translate_txq(dev, __queue);
+ txq = netdev_get_tx_queue(dev, __queue);
+ /* xmit may have observed the full-condition, but not yet stopped the
+ * txq. In which case the code below won't trigger. So before returning,
+ * xmit will re-check the txq's fill level and wake it up if needed.
+ */
+ if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue))
+ netif_tx_wake_queue(txq);
}
/**
* Note: Function assumes that we have 4 outbound queues.
*/
-int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
- int ipv)
+int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
{
- __be16 *tci;
+ struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
u8 tos;
switch (card->qdio.do_prio_queueing) {
case QETH_PRIO_Q_ING_TOS:
case QETH_PRIO_Q_ING_PREC:
- switch (ipv) {
+ switch (qeth_get_ip_version(skb)) {
case 4:
tos = ipv4_get_dsfield(ip_hdr(skb));
break;
@@ -3637,9 +3614,9 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
return card->qdio.default_out_queue;
}
if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
- return qeth_cut_iqd_prio(card, ~tos >> 6 & 3);
+ return ~tos >> 6 & 3;
if (tos & IPTOS_MINCOST)
- return qeth_cut_iqd_prio(card, 3);
+ return 3;
if (tos & IPTOS_RELIABILITY)
return 2;
if (tos & IPTOS_THROUGHPUT)
@@ -3650,12 +3627,11 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
case QETH_PRIO_Q_ING_SKB:
if (skb->priority > 5)
return 0;
- return qeth_cut_iqd_prio(card, ~skb->priority >> 1 & 3);
+ return ~skb->priority >> 1 & 3;
case QETH_PRIO_Q_ING_VLAN:
- tci = &((struct ethhdr *)skb->data)->h_proto;
- if (be16_to_cpu(*tci) == ETH_P_8021Q)
- return qeth_cut_iqd_prio(card,
- ~be16_to_cpu(*(tci + 1)) >> (VLAN_PRIO_SHIFT + 1) & 3);
+ if (veth->h_vlan_proto == htons(ETH_P_8021Q))
+ return ~ntohs(veth->h_vlan_TCI) >>
+ (VLAN_PRIO_SHIFT + 1) & 3;
break;
default:
break;
@@ -3729,8 +3705,8 @@ static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
unsigned int hdr_len, unsigned int proto_len,
unsigned int *elements)
{
- const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(queue->card);
const unsigned int contiguous = proto_len ? proto_len : 1;
+ const unsigned int max_elements = queue->max_elements;
unsigned int __elements;
addr_t start, end;
bool push_ok;
@@ -3867,11 +3843,13 @@ static void __qeth_fill_buffer(struct sk_buff *skb,
* from qeth_core_header_cache.
* @offset: when mapping the skb, start at skb->data + offset
* @hd_len: if > 0, build a dedicated header element of this size
+ * flush: Prepare the buffer to be flushed, regardless of its fill level.
*/
static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf,
struct sk_buff *skb, struct qeth_hdr *hdr,
- unsigned int offset, unsigned int hd_len)
+ unsigned int offset, unsigned int hd_len,
+ bool flush)
{
struct qdio_buffer *buffer = buf->buffer;
bool is_first_elem = true;
@@ -3900,8 +3878,8 @@ static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
QETH_TXQ_STAT_INC(queue, skbs_pack);
/* If the buffer still has free elements, keep using it. */
- if (buf->next_element_to_fill <
- QETH_MAX_BUFFER_ELEMENTS(queue->card))
+ if (!flush &&
+ buf->next_element_to_fill < queue->max_elements)
return 0;
}
@@ -3918,15 +3896,31 @@ static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
{
int index = queue->next_buf_to_fill;
struct qeth_qdio_out_buffer *buffer = queue->bufs[index];
+ struct netdev_queue *txq;
+ bool stopped = false;
- /*
- * check if buffer is empty to make sure that we do not 'overtake'
- * ourselves and try to fill a buffer that is already primed
+ /* Just a sanity check, the wake/stop logic should ensure that we always
+ * get a free buffer.
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
return -EBUSY;
- qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
+
+ txq = netdev_get_tx_queue(queue->card->dev, skb_get_queue_mapping(skb));
+
+ if (atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
+ /* If a TX completion happens right _here_ and misses to wake
+ * the txq, then our re-check below will catch the race.
+ */
+ QETH_TXQ_STAT_INC(queue, stopped);
+ netif_tx_stop_queue(txq);
+ stopped = true;
+ }
+
+ qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len, stopped);
qeth_flush_buffers(queue, index, 1);
+
+ if (stopped && !qeth_out_queue_is_full(queue))
+ netif_tx_start_queue(txq);
return 0;
}
@@ -3936,6 +3930,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
int elements_needed)
{
struct qeth_qdio_out_buffer *buffer;
+ struct netdev_queue *txq;
+ bool stopped = false;
int start_index;
int flush_count = 0;
int do_pack = 0;
@@ -3947,21 +3943,24 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
start_index = queue->next_buf_to_fill;
buffer = queue->bufs[queue->next_buf_to_fill];
- /*
- * check if buffer is empty to make sure that we do not 'overtake'
- * ourselves and try to fill a buffer that is already primed
+
+ /* Just a sanity check, the wake/stop logic should ensure that we always
+ * get a free buffer.
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
return -EBUSY;
}
+
+ txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
+
/* check if we need to switch packing state of this queue */
qeth_switch_to_packing_if_needed(queue);
if (queue->do_pack) {
do_pack = 1;
/* does packet fit in current buffer? */
- if ((QETH_MAX_BUFFER_ELEMENTS(card) -
- buffer->next_element_to_fill) < elements_needed) {
+ if (buffer->next_element_to_fill + elements_needed >
+ queue->max_elements) {
/* ... no -> set state PRIMED */
atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
flush_count++;
@@ -3969,8 +3968,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
(queue->next_buf_to_fill + 1) %
QDIO_MAX_BUFFERS_PER_Q;
buffer = queue->bufs[queue->next_buf_to_fill];
- /* we did a step forward, so check buffer state
- * again */
+
+ /* We stepped forward, so sanity-check again: */
if (atomic_read(&buffer->state) !=
QETH_QDIO_BUF_EMPTY) {
qeth_flush_buffers(queue, start_index,
@@ -3983,8 +3982,18 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
}
}
- flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset,
- hd_len);
+ if (buffer->next_element_to_fill == 0 &&
+ atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
+ /* If a TX completion happens right _here_ and misses to wake
+ * the txq, then our re-check below will catch the race.
+ */
+ QETH_TXQ_STAT_INC(queue, stopped);
+ netif_tx_stop_queue(txq);
+ stopped = true;
+ }
+
+ flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len,
+ stopped);
if (flush_count)
qeth_flush_buffers(queue, start_index, flush_count);
else if (!atomic_read(&queue->set_pci_flags_count))
@@ -4015,6 +4024,8 @@ out:
if (do_pack)
QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
+ if (stopped && !qeth_out_queue_is_full(queue))
+ netif_tx_start_queue(txq);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_do_send_packet);
@@ -4101,9 +4112,6 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
} else {
if (!push_len)
kmem_cache_free(qeth_core_header_cache, hdr);
- if (rc == -EBUSY)
- /* roll back to ETH header */
- skb_pull(skb, push_len);
}
return rc;
}
@@ -4321,9 +4329,8 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
QETH_CARD_TEXT(card, 4, "setactlo");
- if ((card->info.type == QETH_CARD_TYPE_OSD ||
- card->info.type == QETH_CARD_TYPE_OSX) &&
- qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
+ if ((IS_OSD(card) || IS_OSX(card)) &&
+ qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
rc = qeth_setadpparms_set_access_ctrl(card,
card->options.isolation, fallback);
if (rc) {
@@ -4348,7 +4355,6 @@ void qeth_tx_timeout(struct net_device *dev)
card = dev->ml_priv;
QETH_CARD_TEXT(card, 4, "txtimeo");
- QETH_CARD_STAT_INC(card, tx_errors);
qeth_schedule_recovery(card);
}
EXPORT_SYMBOL_GPL(qeth_tx_timeout);
@@ -4489,7 +4495,7 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
QETH_CARD_TEXT(card, 3, "snmpcmd");
- if (card->info.guestlan)
+ if (IS_VM_NIC(card))
return -EOPNOTSUPP;
if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
@@ -4732,14 +4738,6 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
-static int qeth_get_qdio_q_format(struct qeth_card *card)
-{
- if (card->info.type == QETH_CARD_TYPE_IQD)
- return QDIO_IQDIO_QFMT;
- else
- return QDIO_QETH_QFMT;
-}
-
static void qeth_determine_capabilities(struct qeth_card *card)
{
int rc;
@@ -4878,7 +4876,8 @@ static int qeth_qdio_establish(struct qeth_card *card)
memset(&init_data, 0, sizeof(struct qdio_initialize));
init_data.cdev = CARD_DDEV(card);
- init_data.q_format = qeth_get_qdio_q_format(card);
+ init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT :
+ QDIO_QETH_QFMT;
init_data.qib_param_field_format = 0;
init_data.qib_param_field = qib_param_field;
init_data.no_input_qs = card->qdio.no_in_queues;
@@ -4890,8 +4889,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.input_sbal_addr_array = in_sbal_ptrs;
init_data.output_sbal_addr_array = out_sbal_ptrs;
init_data.output_sbal_state_array = card->qdio.out_bufstates;
- init_data.scan_threshold =
- (card->info.type == QETH_CARD_TYPE_IQD) ? 1 : 32;
+ init_data.scan_threshold = IS_IQD(card) ? 1 : 32;
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
@@ -4937,7 +4935,7 @@ static void qeth_core_free_card(struct qeth_card *card)
qeth_clean_channel(&card->write);
qeth_clean_channel(&card->data);
destroy_workqueue(card->event_wq);
- qeth_free_qdio_buffers(card);
+ qeth_free_qdio_queues(card);
unregister_service_level(&card->qeth_service_level);
dev_set_drvdata(&card->gdev->dev, NULL);
kfree(card);
@@ -4986,12 +4984,14 @@ int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
atomic_set(&card->force_alloc_skb, 0);
- qeth_update_from_chp_desc(card);
+ rc = qeth_update_from_chp_desc(card);
+ if (rc)
+ return rc;
retry:
if (retries < 3)
QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
CARD_DEVID(card));
- rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
+ rc = qeth_qdio_clear_card(card, !IS_IQD(card));
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
@@ -5019,8 +5019,9 @@ retriable:
qeth_determine_capabilities(card);
qeth_init_tokens(card);
qeth_init_func_level(card);
- rc = qeth_idx_activate_channel(card, &card->read, qeth_idx_read_cb);
- if (rc == -ERESTARTSYS) {
+
+ rc = qeth_idx_activate_read_channel(card);
+ if (rc == -EINTR) {
QETH_DBF_TEXT(SETUP, 2, "break2");
return rc;
} else if (rc) {
@@ -5030,8 +5031,9 @@ retriable:
else
goto retry;
}
- rc = qeth_idx_activate_channel(card, &card->write, qeth_idx_write_cb);
- if (rc == -ERESTARTSYS) {
+
+ rc = qeth_idx_activate_write_channel(card);
+ if (rc == -EINTR) {
QETH_DBF_TEXT(SETUP, 2, "break3");
return rc;
} else if (rc) {
@@ -5171,7 +5173,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
return NULL;
if (((skb_len >= card->options.rx_sg_cb) &&
- (!(card->info.type == QETH_CARD_TYPE_OSN)) &&
+ !IS_OSN(card) &&
(!atomic_read(&card->force_alloc_skb))) ||
(card->options.cq == QETH_CQ_ENABLED))
use_rx_sg = 1;
@@ -5562,13 +5564,17 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
switch (card->info.type) {
case QETH_CARD_TYPE_IQD:
- dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, ether_setup);
+ dev = alloc_netdev_mqs(0, "hsi%d", NET_NAME_UNKNOWN,
+ ether_setup, QETH_MAX_QUEUES, 1);
+ break;
+ case QETH_CARD_TYPE_OSM:
+ dev = alloc_etherdev(0);
break;
case QETH_CARD_TYPE_OSN:
dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
break;
default:
- dev = alloc_etherdev(0);
+ dev = alloc_etherdev_mqs(0, QETH_MAX_QUEUES, 1);
}
if (!dev)
@@ -5590,8 +5596,10 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->hw_features |= NETIF_F_SG;
dev->vlan_features |= NETIF_F_SG;
- if (IS_IQD(card))
+ if (IS_IQD(card)) {
+ netif_set_real_num_tx_queues(dev, QETH_IQD_MIN_TXQ);
dev->features |= NETIF_F_SG;
+ }
}
return dev;
@@ -5641,14 +5649,16 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
}
qeth_setup_card(card);
- qeth_update_from_chp_desc(card);
-
card->dev = qeth_alloc_netdev(card);
if (!card->dev) {
rc = -ENOMEM;
goto err_card;
}
+ card->qdio.no_out_queues = card->dev->num_tx_queues;
+ rc = qeth_update_from_chp_desc(card);
+ if (rc)
+ goto err_chp_desc;
qeth_determine_capabilities(card);
enforced_disc = qeth_enforce_discipline(card);
switch (enforced_disc) {
@@ -5661,9 +5671,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
if (rc)
goto err_load;
- gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN)
- ? card->discipline->devtype
- : &qeth_osn_devtype;
+ gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
+ card->discipline->devtype;
rc = card->discipline->setup(card->gdev);
if (rc)
goto err_disc;
@@ -5675,6 +5684,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
err_disc:
qeth_core_free_discipline(card);
err_load:
+err_chp_desc:
free_netdev(card->dev);
err_card:
qeth_core_free_card(card);
@@ -5706,10 +5716,8 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
enum qeth_discipline_id def_discipline;
if (!card->discipline) {
- if (card->info.type == QETH_CARD_TYPE_IQD)
- def_discipline = QETH_DISCIPLINE_LAYER3;
- else
- def_discipline = QETH_DISCIPLINE_LAYER2;
+ def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
+ QETH_DISCIPLINE_LAYER2;
rc = qeth_core_load_discipline(card, def_discipline);
if (rc)
goto err;
@@ -5737,7 +5745,7 @@ static void qeth_core_shutdown(struct ccwgroup_device *gdev)
if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
qeth_qdio_clear_card(card, 0);
- qeth_clear_qdio_buffers(card);
+ qeth_drain_output_queues(card);
qdio_free(CARD_DDEV(card));
}
@@ -5837,13 +5845,10 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
break;
case SIOC_QETH_GET_CARD_TYPE:
- if ((card->info.type == QETH_CARD_TYPE_OSD ||
- card->info.type == QETH_CARD_TYPE_OSM ||
- card->info.type == QETH_CARD_TYPE_OSX) &&
- !card->info.guestlan)
+ if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
+ !IS_VM_NIC(card))
return 1;
- else
- return 0;
+ return 0;
case SIOCGMIIPHY:
mii_data = if_mii(rq);
mii_data->phy_id = 0;
@@ -6193,7 +6198,6 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->rx_errors = card->stats.rx_errors;
stats->rx_dropped = card->stats.rx_dropped;
stats->multicast = card->stats.rx_multicast;
- stats->tx_errors = card->stats.tx_errors;
for (i = 0; i < card->qdio.no_out_queues; i++) {
queue = card->qdio.out_qs[i];
@@ -6206,6 +6210,15 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
}
EXPORT_SYMBOL_GPL(qeth_get_stats64);
+u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
+ u8 cast_type, struct net_device *sb_dev)
+{
+ if (cast_type != RTN_UNICAST)
+ return QETH_IQD_MCAST_TXQ;
+ return QETH_IQD_MIN_UCAST_TXQ;
+}
+EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
+
int qeth_open(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
@@ -6216,7 +6229,7 @@ int qeth_open(struct net_device *dev)
return -EIO;
card->data.state = CH_STATE_UP;
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
napi_enable(&card->napi);
local_bh_disable();
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index f8c5d4a9be13..f5237b7c14c4 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -82,7 +82,7 @@ enum qeth_card_types {
#define IS_OSM(card) ((card)->info.type == QETH_CARD_TYPE_OSM)
#define IS_OSN(card) ((card)->info.type == QETH_CARD_TYPE_OSN)
#define IS_OSX(card) ((card)->info.type == QETH_CARD_TYPE_OSX)
-#define IS_VM_NIC(card) ((card)->info.guestlan)
+#define IS_VM_NIC(card) ((card)->info.is_vm_nic)
#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18
/* only the first two bytes are looked at in qeth_get_cardname_short */
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 56deeb6f7bc0..9f392497d570 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -198,6 +198,9 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
if (!card)
return -EINVAL;
+ if (IS_IQD(card))
+ return -EOPNOTSUPP;
+
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
@@ -239,10 +242,6 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 2;
} else if (sysfs_streq(buf, "no_prio_queueing:3")) {
- if (card->info.type == QETH_CARD_TYPE_IQD) {
- rc = -EPERM;
- goto out;
- }
card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING;
card->qdio.default_out_queue = 3;
} else if (sysfs_streq(buf, "no_prio_queueing")) {
@@ -480,8 +479,7 @@ static ssize_t qeth_dev_isolation_store(struct device *dev,
return -EINVAL;
mutex_lock(&card->conf_mutex);
- if (card->info.type != QETH_CARD_TYPE_OSD &&
- card->info.type != QETH_CARD_TYPE_OSX) {
+ if (!IS_OSD(card) && !IS_OSX(card)) {
rc = -EOPNOTSUPP;
dev_err(&card->gdev->dev, "Adapter does not "
"support QDIO data connection isolation\n");
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index 93a53fed4cf8..4166eb29f0bd 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -38,6 +38,7 @@ static const struct qeth_stats txq_stats[] = {
QETH_TXQ_STAT("linearized+error skbs", skbs_linearized_fail),
QETH_TXQ_STAT("TSO bytes", tso_bytes),
QETH_TXQ_STAT("Packing mode switches", packing_mode_switch),
+ QETH_TXQ_STAT("Queue stopped", stopped),
};
static const struct qeth_stats card_stats[] = {
@@ -154,6 +155,21 @@ static void qeth_get_drvinfo(struct net_device *dev,
CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
}
+static void qeth_get_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ channels->max_rx = dev->num_rx_queues;
+ channels->max_tx = card->qdio.no_out_queues;
+ channels->max_other = 0;
+ channels->max_combined = 0;
+ channels->rx_count = dev->real_num_rx_queues;
+ channels->tx_count = dev->real_num_tx_queues;
+ channels->other_count = 0;
+ channels->combined_count = 0;
+}
+
/* Helper function to fill 'advertising' and 'supported' which are the same. */
/* Autoneg and full-duplex are supported and advertised unconditionally. */
/* Always advertise and support all speeds up to specified, and only one */
@@ -359,6 +375,7 @@ const struct ethtool_ops qeth_ethtool_ops = {
.get_ethtool_stats = qeth_get_ethtool_stats,
.get_sset_count = qeth_get_sset_count,
.get_drvinfo = qeth_get_drvinfo,
+ .get_channels = qeth_get_channels,
.get_link_ksettings = qeth_get_link_ksettings,
};
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index c3067fd3bd9e..218801232ca2 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -149,29 +149,16 @@ static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
return rc;
}
-static void qeth_l2_del_all_macs(struct qeth_card *card)
+static void qeth_l2_drain_rx_mode_cache(struct qeth_card *card)
{
struct qeth_mac *mac;
struct hlist_node *tmp;
int i;
- spin_lock_bh(&card->mclock);
hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
hash_del(&mac->hnode);
kfree(mac);
}
- spin_unlock_bh(&card->mclock);
-}
-
-static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
-{
- if (card->info.type == QETH_CARD_TYPE_OSN)
- return RTN_UNICAST;
- if (is_broadcast_ether_addr(skb->data))
- return RTN_BROADCAST;
- if (is_multicast_ether_addr(skb->data))
- return RTN_MULTICAST;
- return RTN_UNICAST;
}
static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
@@ -292,14 +279,16 @@ static void qeth_l2_stop_card(struct qeth_card *card)
qeth_set_allowed_threads(card, 0, 1);
+ cancel_work_sync(&card->rx_mode_work);
+ qeth_l2_drain_rx_mode_cache(card);
+
if (card->state == CARD_STATE_SOFTSETUP) {
- qeth_l2_del_all_macs(card);
qeth_clear_ipacmd_list(card);
card->state = CARD_STATE_HARDSETUP;
}
if (card->state == CARD_STATE_HARDSETUP) {
qeth_qdio_clear_card(card, 0);
- qeth_clear_qdio_buffers(card);
+ qeth_drain_output_queues(card);
qeth_clear_working_pool_list(card);
card->state = CARD_STATE_DOWN;
}
@@ -334,13 +323,11 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
case QETH_HEADER_TYPE_LAYER2:
skb->protocol = eth_type_trans(skb, skb->dev);
qeth_rx_csum(card, skb, hdr->hdr.l2.flags[1]);
- if (skb->protocol == htons(ETH_P_802_2))
- *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno;
len = skb->len;
napi_gro_receive(&card->napi, skb);
break;
case QETH_HEADER_TYPE_OSN:
- if (card->info.type == QETH_CARD_TYPE_OSN) {
+ if (IS_OSN(card)) {
skb_push(skb, sizeof(struct qeth_hdr));
skb_copy_to_linear_data(skb, hdr,
sizeof(struct qeth_hdr));
@@ -391,8 +378,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
}
/* some devices don't support a custom MAC address: */
- if (card->info.type == QETH_CARD_TYPE_OSM ||
- card->info.type == QETH_CARD_TYPE_OSX)
+ if (IS_OSM(card) || IS_OSX(card))
return (rc) ? rc : -EADDRNOTAVAIL;
eth_hw_addr_random(card->dev);
@@ -515,9 +501,11 @@ static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha)
hash_add(card->mac_htable, &mac->hnode, mac_hash);
}
-static void qeth_l2_set_rx_mode(struct net_device *dev)
+static void qeth_l2_rx_mode_work(struct work_struct *work)
{
- struct qeth_card *card = dev->ml_priv;
+ struct qeth_card *card = container_of(work, struct qeth_card,
+ rx_mode_work);
+ struct net_device *dev = card->dev;
struct netdev_hw_addr *ha;
struct qeth_mac *mac;
struct hlist_node *tmp;
@@ -526,12 +514,12 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
QETH_CARD_TEXT(card, 3, "setmulti");
- spin_lock_bh(&card->mclock);
-
+ netif_addr_lock_bh(dev);
netdev_for_each_mc_addr(ha, dev)
qeth_l2_add_mac(card, ha);
netdev_for_each_uc_addr(ha, dev)
qeth_l2_add_mac(card, ha);
+ netif_addr_unlock_bh(dev);
hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
switch (mac->disp_flag) {
@@ -554,8 +542,6 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
}
}
- spin_unlock_bh(&card->mclock);
-
if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
qeth_setadp_promisc_mode(card);
else
@@ -586,7 +572,7 @@ static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb,
}
elements += qeth_count_elements(skb, hd_len);
- if (elements > QETH_MAX_BUFFER_ELEMENTS(card)) {
+ if (elements > queue->max_elements) {
rc = -E2BIG;
goto out;
}
@@ -603,37 +589,45 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
- int cast_type = qeth_l2_get_cast_type(card, skb);
- int ipv = qeth_get_ip_version(skb);
+ u16 txq = skb_get_queue_mapping(skb);
struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len;
int rc;
- queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
-
- netif_stop_queue(dev);
+ if (IS_IQD(card))
+ txq = qeth_iqd_translate_txq(dev, txq);
+ queue = card->qdio.out_qs[txq];
if (IS_OSN(card))
rc = qeth_l2_xmit_osn(card, skb, queue);
else
- rc = qeth_xmit(card, skb, queue, ipv, cast_type,
+ rc = qeth_xmit(card, skb, queue, qeth_get_ip_version(skb),
+ qeth_get_ether_cast_type(skb),
qeth_l2_fill_header);
if (!rc) {
QETH_TXQ_STAT_INC(queue, tx_packets);
QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
- netif_wake_queue(dev);
return NETDEV_TX_OK;
- } else if (rc == -EBUSY) {
- return NETDEV_TX_BUSY;
- } /* else fall through */
+ }
QETH_TXQ_STAT_INC(queue, tx_dropped);
kfree_skb(skb);
- netif_wake_queue(dev);
return NETDEV_TX_OK;
}
+static u16 qeth_l2_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ if (IS_IQD(card))
+ return qeth_iqd_select_queue(dev, skb,
+ qeth_get_ether_cast_type(skb),
+ sb_dev);
+ return qeth_get_priority_queue(card, skb);
+}
+
static const struct device_type qeth_l2_devtype = {
.name = "qeth_layer2",
.groups = qeth_l2_attr_groups,
@@ -653,6 +647,7 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
}
hash_init(card->mac_htable);
+ INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work);
return 0;
}
@@ -673,12 +668,20 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
unregister_netdev(card->dev);
}
+static void qeth_l2_set_rx_mode(struct net_device *dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ schedule_work(&card->rx_mode_work);
+}
+
static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_open = qeth_open,
.ndo_stop = qeth_stop,
.ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l2_hard_start_xmit,
.ndo_features_check = qeth_features_check,
+ .ndo_select_queue = qeth_l2_select_queue,
.ndo_validate_addr = qeth_l2_validate_addr,
.ndo_set_rx_mode = qeth_l2_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
@@ -721,7 +724,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
- if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
+ if (IS_OSD(card) && !IS_VM_NIC(card)) {
card->dev->features |= NETIF_F_SG;
/* OSA 3S and earlier has no RX/TX support */
if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) {
@@ -831,8 +834,7 @@ static int qeth_l2_set_online(struct ccwgroup_device *gdev)
/* softsetup */
QETH_DBF_TEXT(SETUP, 2, "softsetp");
- if ((card->info.type == QETH_CARD_TYPE_OSD) ||
- (card->info.type == QETH_CARD_TYPE_OSX)) {
+ if (IS_OSD(card) || IS_OSX(card)) {
rc = qeth_l2_start_ipassists(card);
if (rc)
goto out_remove;
@@ -1042,13 +1044,13 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len,
QETH_CARD_TEXT(card, 5, "osndctrd");
- wait_event(card->wait_q,
- atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0);
- qeth_prepare_control_data(card, len, iob);
+ wait_event(card->wait_q, qeth_trylock_channel(channel));
+ iob->finalize(card, iob, len);
+ QETH_DBF_HEX(CTRL, 2, iob->data, min(len, QETH_DBF_CTRL_LEN));
QETH_CARD_TEXT(card, 6, "osnoirqp");
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw,
- (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT);
+ (addr_t) iob, 0, 0, iob->timeout);
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc) {
QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: "
@@ -1456,9 +1458,8 @@ static struct qeth_cmd_buffer *qeth_sbp_build_cmd(struct qeth_card *card,
enum qeth_ipa_sbp_cmd sbp_cmd,
unsigned int cmd_length)
{
- enum qeth_ipa_cmds ipa_cmd = (card->info.type == QETH_CARD_TYPE_IQD) ?
- IPA_CMD_SETBRIDGEPORT_IQD :
- IPA_CMD_SETBRIDGEPORT_OSA;
+ enum qeth_ipa_cmds ipa_cmd = IS_IQD(card) ? IPA_CMD_SETBRIDGEPORT_IQD :
+ IPA_CMD_SETBRIDGEPORT_OSA;
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 53712cf26406..0271833da6a2 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -246,9 +246,9 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
*/
if (addr->proto == QETH_PROT_IPV4) {
addr->in_progress = 1;
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
rc = qeth_l3_register_addr_entry(card, addr);
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
addr->in_progress = 0;
} else
rc = qeth_l3_register_addr_entry(card, addr);
@@ -268,6 +268,30 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
return rc;
}
+static int qeth_l3_modify_ip(struct qeth_card *card, struct qeth_ipaddr *addr,
+ bool add)
+{
+ int rc;
+
+ mutex_lock(&card->ip_lock);
+ rc = add ? qeth_l3_add_ip(card, addr) : qeth_l3_delete_ip(card, addr);
+ mutex_unlock(&card->ip_lock);
+
+ return rc;
+}
+
+static void qeth_l3_drain_rx_mode_cache(struct qeth_card *card)
+{
+ struct qeth_ipaddr *addr;
+ struct hlist_node *tmp;
+ int i;
+
+ hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
+ hash_del(&addr->hnode);
+ kfree(addr);
+ }
+}
+
static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
{
struct qeth_ipaddr *addr;
@@ -276,7 +300,7 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
QETH_CARD_TEXT(card, 4, "clearip");
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
if (!recover) {
@@ -287,19 +311,9 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
addr->disp_flag = QETH_DISP_ADDR_ADD;
}
- spin_unlock_bh(&card->ip_lock);
-
- spin_lock_bh(&card->mclock);
-
- hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) {
- hash_del(&addr->hnode);
- kfree(addr);
- }
-
- spin_unlock_bh(&card->mclock);
-
-
+ mutex_unlock(&card->ip_lock);
}
+
static void qeth_l3_recover_ip(struct qeth_card *card)
{
struct qeth_ipaddr *addr;
@@ -309,15 +323,15 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
QETH_CARD_TEXT(card, 4, "recovrip");
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
if (addr->proto == QETH_PROT_IPV4) {
addr->in_progress = 1;
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
rc = qeth_l3_register_addr_entry(card, addr);
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
addr->in_progress = 0;
} else
rc = qeth_l3_register_addr_entry(card, addr);
@@ -333,8 +347,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
}
}
- spin_unlock_bh(&card->ip_lock);
-
+ mutex_unlock(&card->ip_lock);
}
static int qeth_l3_setdelip_cb(struct qeth_card *card, struct qeth_reply *reply,
@@ -461,7 +474,7 @@ static int qeth_l3_send_setrouting(struct qeth_card *card,
static int qeth_l3_correct_routing_type(struct qeth_card *card,
enum qeth_routing_types *type, enum qeth_prot_versions prot)
{
- if (card->info.type == QETH_CARD_TYPE_IQD) {
+ if (IS_IQD(card)) {
switch (*type) {
case NO_ROUTER:
case PRIMARY_CONNECTOR:
@@ -559,7 +572,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card)
{
struct qeth_ipato_entry *ipatoe, *tmp;
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
list_del(&ipatoe->entry);
@@ -567,7 +580,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card)
}
qeth_l3_update_ipato(card);
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
}
int qeth_l3_add_ipato_entry(struct qeth_card *card,
@@ -578,7 +591,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "addipato");
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
if (ipatoe->proto != new->proto)
@@ -596,7 +609,7 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
qeth_l3_update_ipato(card);
}
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
return rc;
}
@@ -610,7 +623,7 @@ int qeth_l3_del_ipato_entry(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "delipato");
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
if (ipatoe->proto != proto)
@@ -625,7 +638,7 @@ int qeth_l3_del_ipato_entry(struct qeth_card *card,
}
}
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
return rc;
}
@@ -634,7 +647,6 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr addr;
- int rc;
qeth_l3_init_ipaddr(&addr, type, proto);
if (proto == QETH_PROT_IPV4)
@@ -642,16 +654,13 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
else
memcpy(&addr.u.a6.addr, ip, 16);
- spin_lock_bh(&card->ip_lock);
- rc = add ? qeth_l3_add_ip(card, &addr) : qeth_l3_delete_ip(card, &addr);
- spin_unlock_bh(&card->ip_lock);
- return rc;
+ return qeth_l3_modify_ip(card, &addr, add);
}
int qeth_l3_modify_hsuid(struct qeth_card *card, bool add)
{
struct qeth_ipaddr addr;
- int rc, i;
+ unsigned int i;
qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6);
addr.u.a6.addr.s6_addr[0] = 0xfe;
@@ -659,10 +668,7 @@ int qeth_l3_modify_hsuid(struct qeth_card *card, bool add)
for (i = 0; i < 8; i++)
addr.u.a6.addr.s6_addr[8+i] = card->options.hsuid[i];
- spin_lock_bh(&card->ip_lock);
- rc = add ? qeth_l3_add_ip(card, &addr) : qeth_l3_delete_ip(card, &addr);
- spin_unlock_bh(&card->ip_lock);
- return rc;
+ return qeth_l3_modify_ip(card, &addr, add);
}
static int qeth_l3_register_addr_entry(struct qeth_card *card,
@@ -848,7 +854,7 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
QETH_CARD_TEXT(card, 3, "softipv6");
- if (card->info.type == QETH_CARD_TYPE_IQD)
+ if (IS_IQD(card))
goto out;
rc = qeth_send_simple_setassparms(card, IPA_IPV6,
@@ -1374,8 +1380,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
switch (hdr->hdr.l3.id) {
case QETH_HEADER_TYPE_LAYER3:
magic = *(__u16 *)skb->data;
- if ((card->info.type == QETH_CARD_TYPE_IQD) &&
- (magic == ETH_P_AF_IUCV)) {
+ if (IS_IQD(card) && magic == ETH_P_AF_IUCV) {
len = skb->len;
dev_hard_header(skb, dev, ETH_P_AF_IUCV,
dev->dev_addr, "FAKELL", len);
@@ -1413,6 +1418,9 @@ static void qeth_l3_stop_card(struct qeth_card *card)
qeth_set_allowed_threads(card, 0, 1);
+ cancel_work_sync(&card->rx_mode_work);
+ qeth_l3_drain_rx_mode_cache(card);
+
if (card->options.sniffer &&
(card->info.promisc_mode == SET_PROMISC_MODE_ON))
qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
@@ -1424,7 +1432,7 @@ static void qeth_l3_stop_card(struct qeth_card *card)
}
if (card->state == CARD_STATE_HARDSETUP) {
qeth_qdio_clear_card(card, 0);
- qeth_clear_qdio_buffers(card);
+ qeth_drain_output_queues(card);
qeth_clear_working_pool_list(card);
card->state = CARD_STATE_DOWN;
}
@@ -1451,7 +1459,7 @@ qeth_l3_handle_promisc_mode(struct qeth_card *card)
(card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
return;
- if (card->info.guestlan) { /* Guestlan trace */
+ if (IS_VM_NIC(card)) { /* Guestlan trace */
if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
qeth_setadp_promisc_mode(card);
} else if (card->options.sniffer && /* HiperSockets trace */
@@ -1466,9 +1474,10 @@ qeth_l3_handle_promisc_mode(struct qeth_card *card)
}
}
-static void qeth_l3_set_rx_mode(struct net_device *dev)
+static void qeth_l3_rx_mode_work(struct work_struct *work)
{
- struct qeth_card *card = dev->ml_priv;
+ struct qeth_card *card = container_of(work, struct qeth_card,
+ rx_mode_work);
struct qeth_ipaddr *addr;
struct hlist_node *tmp;
int i, rc;
@@ -1476,8 +1485,6 @@ static void qeth_l3_set_rx_mode(struct net_device *dev)
QETH_CARD_TEXT(card, 3, "setmulti");
if (!card->options.sniffer) {
- spin_lock_bh(&card->mclock);
-
qeth_l3_add_multicast_ipv4(card);
qeth_l3_add_multicast_ipv6(card);
@@ -1505,8 +1512,6 @@ static void qeth_l3_set_rx_mode(struct net_device *dev)
}
}
- spin_unlock_bh(&card->mclock);
-
if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
return;
}
@@ -1551,7 +1556,7 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
* IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES;
* thus we say EOPNOTSUPP for this ARP function
*/
- if (card->info.guestlan)
+ if (IS_VM_NIC(card))
return -EOPNOTSUPP;
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
return -EOPNOTSUPP;
@@ -1783,7 +1788,7 @@ static int qeth_l3_arp_modify_entry(struct qeth_card *card,
* IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY;
* thus we say EOPNOTSUPP for this ARP function
*/
- if (card->info.guestlan)
+ if (IS_VM_NIC(card))
return -EOPNOTSUPP;
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
return -EOPNOTSUPP;
@@ -1816,7 +1821,7 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card)
* IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE;
* thus we say EOPNOTSUPP for this ARP function
*/
- if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD))
+ if (IS_VM_NIC(card) || IS_IQD(card))
return -EOPNOTSUPP;
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
return -EOPNOTSUPP;
@@ -1913,13 +1918,7 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb)
RTN_MULTICAST : RTN_UNICAST;
default:
/* ... and MAC address */
- if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
- skb->dev->broadcast))
- return RTN_BROADCAST;
- if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
- return RTN_MULTICAST;
- /* default to unicast */
- return RTN_UNICAST;
+ return qeth_get_ether_cast_type(skb);
}
}
@@ -1977,19 +1976,14 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
hdr->hdr.l3.vlan_id = ntohs(veth->h_vlan_TCI);
}
+ l3_hdr->flags = qeth_l3_cast_type_to_flag(cast_type);
+
/* OSA only: */
if (!ipv) {
- hdr->hdr.l3.flags = QETH_HDR_PASSTHRU;
- if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest,
- skb->dev->broadcast))
- hdr->hdr.l3.flags |= QETH_CAST_BROADCAST;
- else
- hdr->hdr.l3.flags |= (cast_type == RTN_MULTICAST) ?
- QETH_CAST_MULTICAST : QETH_CAST_UNICAST;
+ l3_hdr->flags |= QETH_HDR_PASSTHRU;
return;
}
- hdr->hdr.l3.flags = qeth_l3_cast_type_to_flag(cast_type);
rcu_read_lock();
if (ipv == 4) {
struct rtable *rt = skb_rtable(skb);
@@ -2007,7 +2001,7 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
l3_hdr->next_hop.ipv6_addr = ipv6_hdr(skb)->daddr;
hdr->hdr.l3.flags |= QETH_HDR_IPV6;
- if (card->info.type != QETH_CARD_TYPE_IQD)
+ if (!IS_IQD(card))
hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU;
}
rcu_read_unlock();
@@ -2030,7 +2024,6 @@ static void qeth_l3_fixup_headers(struct sk_buff *skb)
static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
struct qeth_qdio_out_q *queue, int ipv, int cast_type)
{
- unsigned char eth_hdr[ETH_HLEN];
unsigned int hw_hdr_len;
int rc;
@@ -2040,45 +2033,44 @@ static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb,
rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN);
if (rc)
return rc;
- skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN);
skb_pull(skb, ETH_HLEN);
qeth_l3_fixup_headers(skb);
- rc = qeth_xmit(card, skb, queue, ipv, cast_type, qeth_l3_fill_header);
- if (rc == -EBUSY) {
- /* roll back to ETH header */
- skb_push(skb, ETH_HLEN);
- skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN);
- }
- return rc;
+ return qeth_xmit(card, skb, queue, ipv, cast_type, qeth_l3_fill_header);
}
static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- int cast_type = qeth_l3_get_cast_type(skb);
struct qeth_card *card = dev->ml_priv;
+ u16 txq = skb_get_queue_mapping(skb);
int ipv = qeth_get_ip_version(skb);
struct qeth_qdio_out_q *queue;
int tx_bytes = skb->len;
- int rc;
-
- queue = qeth_get_tx_queue(card, skb, ipv, cast_type);
+ int cast_type, rc;
if (IS_IQD(card)) {
+ queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)];
+
if (card->options.sniffer)
goto tx_drop;
if ((card->options.cq != QETH_CQ_ENABLED && !ipv) ||
(card->options.cq == QETH_CQ_ENABLED &&
skb->protocol != htons(ETH_P_AF_IUCV)))
goto tx_drop;
+
+ if (txq == QETH_IQD_MCAST_TXQ)
+ cast_type = qeth_l3_get_cast_type(skb);
+ else
+ cast_type = RTN_UNICAST;
+ } else {
+ queue = card->qdio.out_qs[txq];
+ cast_type = qeth_l3_get_cast_type(skb);
}
if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable)
goto tx_drop;
- netif_stop_queue(dev);
-
if (ipv == 4 || IS_IQD(card))
rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type);
else
@@ -2088,19 +2080,22 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
if (!rc) {
QETH_TXQ_STAT_INC(queue, tx_packets);
QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
- netif_wake_queue(dev);
return NETDEV_TX_OK;
- } else if (rc == -EBUSY) {
- return NETDEV_TX_BUSY;
- } /* else fall through */
+ }
tx_drop:
QETH_TXQ_STAT_INC(queue, tx_dropped);
kfree_skb(skb);
- netif_wake_queue(dev);
return NETDEV_TX_OK;
}
+static void qeth_l3_set_rx_mode(struct net_device *dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ schedule_work(&card->rx_mode_work);
+}
+
/*
* we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting
* NOARP on the netdevice is no option because it also turns off neighbor
@@ -2134,11 +2129,27 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
return qeth_features_check(skb, dev, features);
}
+static u16 qeth_l3_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ return qeth_iqd_select_queue(dev, skb, qeth_l3_get_cast_type(skb),
+ sb_dev);
+}
+
+static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct qeth_card *card = dev->ml_priv;
+
+ return qeth_get_priority_queue(card, skb);
+}
+
static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_open = qeth_open,
.ndo_stop = qeth_stop,
.ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
+ .ndo_select_queue = qeth_l3_iqd_select_queue,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
@@ -2155,6 +2166,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_get_stats64 = qeth_get_stats64,
.ndo_start_xmit = qeth_l3_hard_start_xmit,
.ndo_features_check = qeth_l3_osa_features_check,
+ .ndo_select_queue = qeth_l3_osa_select_queue,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_do_ioctl = qeth_do_ioctl,
@@ -2171,8 +2183,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
unsigned int headroom;
int rc;
- if (card->info.type == QETH_CARD_TYPE_OSD ||
- card->info.type == QETH_CARD_TYPE_OSX) {
+ if (IS_OSD(card) || IS_OSX(card)) {
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
(card->info.link_type == QETH_LINK_TYPE_HSTR)) {
pr_info("qeth_l3: ignoring TR device\n");
@@ -2186,7 +2197,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD))
card->dev->dev_id = card->info.unique_id & 0xffff;
- if (!card->info.guestlan) {
+ if (!IS_VM_NIC(card)) {
card->dev->features |= NETIF_F_SG;
card->dev->hw_features |= NETIF_F_TSO |
NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
@@ -2210,7 +2221,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
headroom = sizeof(struct qeth_hdr_tso);
else
headroom = sizeof(struct qeth_hdr) + VLAN_HLEN;
- } else if (card->info.type == QETH_CARD_TYPE_IQD) {
+ } else if (IS_IQD(card)) {
card->dev->flags |= IFF_NOARP;
card->dev->netdev_ops = &qeth_l3_netdev_ops;
headroom = sizeof(struct qeth_hdr) - ETH_HLEN;
@@ -2253,14 +2264,22 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
int rc;
hash_init(card->ip_htable);
+ mutex_init(&card->ip_lock);
+ card->cmd_wq = alloc_ordered_workqueue("%s_cmd", 0,
+ dev_name(&gdev->dev));
+ if (!card->cmd_wq)
+ return -ENOMEM;
if (gdev->dev.type == &qeth_generic_devtype) {
rc = qeth_l3_create_device_attributes(&gdev->dev);
- if (rc)
+ if (rc) {
+ destroy_workqueue(card->cmd_wq);
return rc;
+ }
}
hash_init(card->ip_mc_htable);
+ INIT_WORK(&card->rx_mode_work, qeth_l3_rx_mode_work);
return 0;
}
@@ -2280,6 +2299,9 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
cancel_work_sync(&card->close_dev_work);
if (qeth_netdev_is_registered(card->dev))
unregister_netdev(card->dev);
+
+ flush_workqueue(card->cmd_wq);
+ destroy_workqueue(card->cmd_wq);
qeth_l3_clear_ip_htable(card, 0);
qeth_l3_clear_ipato_list(card);
}
@@ -2517,20 +2539,40 @@ static int qeth_l3_handle_ip_event(struct qeth_card *card,
{
switch (event) {
case NETDEV_UP:
- spin_lock_bh(&card->ip_lock);
- qeth_l3_add_ip(card, addr);
- spin_unlock_bh(&card->ip_lock);
+ qeth_l3_modify_ip(card, addr, true);
return NOTIFY_OK;
case NETDEV_DOWN:
- spin_lock_bh(&card->ip_lock);
- qeth_l3_delete_ip(card, addr);
- spin_unlock_bh(&card->ip_lock);
+ qeth_l3_modify_ip(card, addr, false);
return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
}
+struct qeth_l3_ip_event_work {
+ struct work_struct work;
+ struct qeth_card *card;
+ struct qeth_ipaddr addr;
+};
+
+#define to_ip_work(w) container_of((w), struct qeth_l3_ip_event_work, work)
+
+static void qeth_l3_add_ip_worker(struct work_struct *work)
+{
+ struct qeth_l3_ip_event_work *ip_work = to_ip_work(work);
+
+ qeth_l3_modify_ip(ip_work->card, &ip_work->addr, true);
+ kfree(work);
+}
+
+static void qeth_l3_delete_ip_worker(struct work_struct *work)
+{
+ struct qeth_l3_ip_event_work *ip_work = to_ip_work(work);
+
+ qeth_l3_modify_ip(ip_work->card, &ip_work->addr, false);
+ kfree(work);
+}
+
static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
{
if (is_vlan_dev(dev))
@@ -2575,9 +2617,12 @@ static int qeth_l3_ip6_event(struct notifier_block *this,
{
struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
struct net_device *dev = ifa->idev->dev;
- struct qeth_ipaddr addr;
+ struct qeth_l3_ip_event_work *ip_work;
struct qeth_card *card;
+ if (event != NETDEV_UP && event != NETDEV_DOWN)
+ return NOTIFY_DONE;
+
card = qeth_l3_get_card_from_dev(dev);
if (!card)
return NOTIFY_DONE;
@@ -2585,11 +2630,23 @@ static int qeth_l3_ip6_event(struct notifier_block *this,
if (!qeth_is_supported(card, IPA_IPV6))
return NOTIFY_DONE;
- qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6);
- addr.u.a6.addr = ifa->addr;
- addr.u.a6.pfxlen = ifa->prefix_len;
+ ip_work = kmalloc(sizeof(*ip_work), GFP_ATOMIC);
+ if (!ip_work)
+ return NOTIFY_DONE;
- return qeth_l3_handle_ip_event(card, &addr, event);
+ if (event == NETDEV_UP)
+ INIT_WORK(&ip_work->work, qeth_l3_add_ip_worker);
+ else
+ INIT_WORK(&ip_work->work, qeth_l3_delete_ip_worker);
+
+ ip_work->card = card;
+ qeth_l3_init_ipaddr(&ip_work->addr, QETH_IP_TYPE_NORMAL,
+ QETH_PROT_IPV6);
+ ip_work->addr.u.a6.addr = ifa->addr;
+ ip_work->addr.u.a6.pfxlen = ifa->prefix_len;
+
+ queue_work(card->cmd_wq, &ip_work->work);
+ return NOTIFY_OK;
}
static struct notifier_block qeth_l3_ip6_notifier = {
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index cff518b0f904..2f73b33c9347 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -206,7 +206,7 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
if (!card)
return -EINVAL;
- if (card->info.type != QETH_CARD_TYPE_IQD)
+ if (!IS_IQD(card))
return -EPERM;
if (card->options.cq == QETH_CQ_ENABLED)
return -EPERM;
@@ -258,7 +258,7 @@ static ssize_t qeth_l3_dev_hsuid_show(struct device *dev,
if (!card)
return -EINVAL;
- if (card->info.type != QETH_CARD_TYPE_IQD)
+ if (!IS_IQD(card))
return -EPERM;
memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid));
@@ -276,7 +276,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
if (!card)
return -EINVAL;
- if (card->info.type != QETH_CARD_TYPE_IQD)
+ if (!IS_IQD(card))
return -EPERM;
if (card->state != CARD_STATE_DOWN)
return -EPERM;
@@ -367,9 +367,9 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
if (card->ipato.enabled != enable) {
card->ipato.enabled = enable;
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
qeth_l3_update_ipato(card);
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
}
out:
mutex_unlock(&card->conf_mutex);
@@ -412,9 +412,9 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
if (card->ipato.invert4 != invert) {
card->ipato.invert4 = invert;
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
qeth_l3_update_ipato(card);
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
}
out:
mutex_unlock(&card->conf_mutex);
@@ -436,7 +436,7 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
/* add strlen for "/<mask>\n" */
entry_len += (proto == QETH_PROT_IPV4)? 5 : 6;
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
if (ipatoe->proto != proto)
continue;
@@ -449,7 +449,7 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
i += snprintf(buf + i, PAGE_SIZE - i,
"%s/%i\n", addr_str, ipatoe->mask_bits);
}
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
i += snprintf(buf + i, PAGE_SIZE - i, "\n");
return i;
@@ -598,9 +598,9 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
if (card->ipato.invert6 != invert) {
card->ipato.invert6 = invert;
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
qeth_l3_update_ipato(card);
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
}
out:
mutex_unlock(&card->conf_mutex);
@@ -684,7 +684,7 @@ static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf,
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
- spin_lock_bh(&card->ip_lock);
+ mutex_lock(&card->ip_lock);
hash_for_each(card->ip_htable, i, ipaddr, hnode) {
if (ipaddr->proto != proto || ipaddr->type != type)
continue;
@@ -698,7 +698,7 @@ static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf,
str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
addr_str);
}
- spin_unlock_bh(&card->ip_lock);
+ mutex_unlock(&card->ip_lock);
str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
return str_len;
diff --git a/drivers/ssb/bridge_pcmcia_80211.c b/drivers/ssb/bridge_pcmcia_80211.c
index f51f150307df..ffa379efff83 100644
--- a/drivers/ssb/bridge_pcmcia_80211.c
+++ b/drivers/ssb/bridge_pcmcia_80211.c
@@ -113,16 +113,21 @@ static struct pcmcia_driver ssb_host_pcmcia_driver = {
.resume = ssb_host_pcmcia_resume,
};
+static int pcmcia_init_failed;
+
/*
* These are not module init/exit functions!
* The module_pcmcia_driver() helper cannot be used here.
*/
int ssb_host_pcmcia_init(void)
{
- return pcmcia_register_driver(&ssb_host_pcmcia_driver);
+ pcmcia_init_failed = pcmcia_register_driver(&ssb_host_pcmcia_driver);
+
+ return pcmcia_init_failed;
}
void ssb_host_pcmcia_exit(void)
{
- pcmcia_unregister_driver(&ssb_host_pcmcia_driver);
+ if (!pcmcia_init_failed)
+ pcmcia_unregister_driver(&ssb_host_pcmcia_driver);
}
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 986db76705cc..8847a11c212f 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -421,7 +421,7 @@ int cvm_oct_common_init(struct net_device *dev)
if (priv->of_node)
mac = of_get_mac_address(priv->of_node);
- if (mac)
+ if (!IS_ERR_OR_NULL(mac))
ether_addr_copy(dev->dev_addr, mac);
else
eth_hw_addr_random(dev);
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 8dde5a40e253..2c088af44c8b 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -245,8 +245,7 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
}
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index 9021bf519605..8a9d838af24e 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -401,8 +401,7 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 9704b135a7bc..40b29ca5a98d 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -441,26 +441,26 @@ static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
static const struct genl_ops tcmu_genl_ops[] = {
{
.cmd = TCMU_CMD_SET_FEATURES,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
- .policy = tcmu_attr_policy,
.doit = tcmu_genl_set_features,
},
{
.cmd = TCMU_CMD_ADDED_DEVICE_DONE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
- .policy = tcmu_attr_policy,
.doit = tcmu_genl_add_dev_done,
},
{
.cmd = TCMU_CMD_REMOVED_DEVICE_DONE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
- .policy = tcmu_attr_policy,
.doit = tcmu_genl_rm_dev_done,
},
{
.cmd = TCMU_CMD_RECONFIG_DEVICE_DONE,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM,
- .policy = tcmu_attr_policy,
.doit = tcmu_genl_reconfig_dev_done,
},
};
@@ -472,6 +472,7 @@ static struct genl_family tcmu_genl_family __ro_after_init = {
.name = "TCM-USER",
.version = 2,
.maxattr = TCMU_ATTR_MAX,
+ .policy = tcmu_attr_policy,
.mcgrps = tcmu_mcgrps,
.n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
.netnsok = true,