summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-10-31 05:10:11 -1000
committerLinus Torvalds <torvalds@linux-foundation.org>2023-10-31 05:10:11 -1000
commit89ed67ef126c4160349c1b96fdb775ea6170ac90 (patch)
tree98caaf8bba44b21f9345a0af1dd2bd9987764e27 /drivers/net/ethernet
parent5a6a09e97199d6600d31383055f9d43fbbcbe86f (diff)
parentf1c73396133cb3d913e2075298005644ee8dfade (diff)
Merge tag 'net-next-6.7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking updates from Jakub Kicinski: "Core & protocols: - Support usec resolution of TCP timestamps, enabled selectively by a route attribute. - Defer regular TCP ACK while processing socket backlog, try to send a cumulative ACK at the end. Increase single TCP flow performance on a 200Gbit NIC by 20% (100Gbit -> 120Gbit). - The Fair Queuing (FQ) packet scheduler: - add built-in 3 band prio / WRR scheduling - support bypass if the qdisc is mostly idle (5% speed up for TCP RR) - improve inactive flow reporting - optimize the layout of structures for better cache locality - Support TCP Authentication Option (RFC 5925, TCP-AO), a more modern replacement for the old MD5 option. - Add more retransmission timeout (RTO) related statistics to TCP_INFO. - Support sending fragmented skbs over vsock sockets. - Make sure we send SIGPIPE for vsock sockets if socket was shutdown(). - Add sysctl for ignoring lower limit on lifetime in Router Advertisement PIO, based on an in-progress IETF draft. - Add sysctl to control activation of TCP ping-pong mode. - Add sysctl to make connection timeout in MPTCP configurable. - Support rcvlowat and notsent_lowat on MPTCP sockets, to help apps limit the number of wakeups. - Support netlink GET for MDB (multicast forwarding), allowing user space to request a single MDB entry instead of dumping the entire table. - Support selective FDB flushing in the VXLAN tunnel driver. - Allow limiting learned FDB entries in bridges, prevent OOM attacks. - Allow controlling via configfs netconsole targets which were created via the kernel cmdline at boot, rather than via configfs at runtime. - Support multiple PTP timestamp event queue readers with different filters. - MCTP over I3C. BPF: - Add new veth-like netdevice where BPF program defines the logic of the xmit routine. It can operate in L3 and L2 mode. - Support exceptions - allow asserting conditions which should never be true but are hard for the verifier to infer. With some extra flexibility around handling of the exit / failure: https://lwn.net/Articles/938435/ - Add support for local per-cpu kptr, allow allocating and storing per-cpu objects in maps. Access to those objects operates on the value for the current CPU. This allows to deprecate local one-off implementations of per-CPU storage like BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE maps. - Extend cgroup BPF sockaddr hooks for UNIX sockets. The use case is for systemd to re-implement the LogNamespace feature which allows running multiple instances of systemd-journald to process the logs of different services. - Enable open-coded task_vma iteration, after maple tree conversion made it hard to directly walk VMAs in tracing programs. - Add open-coded task, css_task and css iterator support. One of the use cases is customizable OOM victim selection via BPF. - Allow source address selection with bpf_*_fib_lookup(). - Add ability to pin BPF timer to the current CPU. - Prevent creation of infinite loops by combining tail calls and fentry/fexit programs. - Add missed stats for kprobes to retrieve the number of missed kprobe executions and subsequent executions of BPF programs. - Inherit system settings for CPU security mitigations. - Add BPF v4 CPU instruction support for arm32 and s390x. Changes to common code: - overflow: add DEFINE_FLEX() for on-stack definition of structs with flexible array members. - Process doc update with more guidance for reviewers. Driver API: - Simplify locking in WiFi (cfg80211 and mac80211 layers), use wiphy mutex in most places and remove a lot of smaller locks. - Create a common DPLL configuration API. Allow configuring and querying state of PLL circuits used for clock syntonization, in network time distribution. - Unify fragmented and full page allocation APIs in page pool code. Let drivers be ignorant of PAGE_SIZE. - Rework PHY state machine to avoid races with calls to phy_stop(). - Notify DSA drivers of MAC address changes on user ports, improve correctness of offloads which depend on matching port MAC addresses. - Allow antenna control on injected WiFi frames. - Reduce the number of variants of napi_schedule(). - Simplify error handling when composing devlink health messages. Misc: - A lot of KCSAN data race "fixes", from Eric. - A lot of __counted_by() annotations, from Kees. - A lot of strncpy -> strscpy and printf format fixes. - Replace master/slave terminology with conduit/user in DSA drivers. - Handful of KUnit tests for netdev and WiFi core. Removed: - AppleTalk COPS. - AppleTalk ipddp. - TI AR7 CPMAC Ethernet driver. Drivers: - Ethernet high-speed NICs: - Intel (100G, ice, idpf): - add a driver for the Intel E2000 IPUs - make CRC/FCS stripping configurable - cross-timestamping for E823 devices - basic support for E830 devices - use aux-bus for managing client drivers - i40e: report firmware versions via devlink - nVidia/Mellanox: - support 4-port NICs - increase max number of channels to 256 - optimize / parallelize SF creation flow - Broadcom (bnxt): - enhance NIC temperature reporting - support PAM4 speeds and lane configuration - Marvell OcteonTX2: - PTP pulse-per-second output support - enable hardware timestamping for VFs - Solarflare/AMD: - conntrack NAT offload and offload for tunnels - Wangxun (ngbe/txgbe): - expose HW statistics - Pensando/AMD: - support PCI level reset - narrow down the condition under which skbs are linearized - Netronome/Corigine (nfp): - support CHACHA20-POLY1305 crypto in IPsec offload - Ethernet NICs embedded, slower, virtual: - Synopsys (stmmac): - add Loongson-1 SoC support - enable use of HW queues with no offload capabilities - enable PPS input support on all 5 channels - increase TX coalesce timer to 5ms - RealTek USB (r8152): improve efficiency of Rx by using GRO frags - xen: support SW packet timestamping - add drivers for implementations based on TI's PRUSS (AM64x EVM) - nVidia/Mellanox Ethernet datacenter switches: - avoid poor HW resource use on Spectrum-4 by better block selection for IPv6 multicast forwarding and ordering of blocks in ACL region - Ethernet embedded switches: - Microchip: - support configuring the drive strength for EMI compliance - ksz9477: partial ACL support - ksz9477: HSR offload - ksz9477: Wake on LAN - Realtek: - rtl8366rb: respect device tree config of the CPU port - Ethernet PHYs: - support Broadcom BCM5221 PHYs - TI dp83867: support hardware LED blinking - CAN: - add support for Linux-PHY based CAN transceivers - at91_can: clean up and use rx-offload helpers - WiFi: - MediaTek (mt76): - new sub-driver for mt7925 USB/PCIe devices - HW wireless <> Ethernet bridging in MT7988 chips - mt7603/mt7628 stability improvements - Qualcomm (ath12k): - WCN7850: - enable 320 MHz channels in 6 GHz band - hardware rfkill support - enable IEEE80211_HW_SINGLE_SCAN_ON_ALL_BANDS to make scan faster - read board data variant name from SMBIOS - QCN9274: mesh support - RealTek (rtw89): - TDMA-based multi-channel concurrency (MCC) - Silicon Labs (wfx): - Remain-On-Channel (ROC) support - Bluetooth: - ISO: many improvements for broadcast support - mark BCM4378/BCM4387 as BROKEN_LE_CODED - add support for QCA2066 - btmtksdio: enable Bluetooth wakeup from suspend" * tag 'net-next-6.7' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1816 commits) net: pcs: xpcs: Add 2500BASE-X case in get state for XPCS drivers net: bpf: Use sockopt_lock_sock() in ip_sock_set_tos() net: mana: Use xdp_set_features_flag instead of direct assignment vxlan: Cleanup IFLA_VXLAN_PORT_RANGE entry in vxlan_get_size() iavf: delete the iavf client interface iavf: add a common function for undoing the interrupt scheme iavf: use unregister_netdev iavf: rely on netdev's own registered state iavf: fix the waiting time for initial reset iavf: in iavf_down, don't queue watchdog_task if comms failed iavf: simplify mutex_trylock+sleep loops iavf: fix comments about old bit locks doc/netlink: Update schema to support cmd-cnt-name and cmd-max-name tools: ynl: introduce option to process unknown attributes or types ipvlan: properly track tx_errors netdevsim: Block until all devices are released nfp: using napi_build_skb() to replace build_skb() net: dsa: microchip: ksz9477: Fix spelling mistake "Enery" -> "Energy" net: dsa: microchip: Ensure Stable PME Pin State for Wake-on-LAN net: dsa: microchip: Refactor switch shutdown routine for WoL preparation ...
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/8390/ax88796.c6
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c5
-rw-r--r--drivers/net/ethernet/8390/ne.c5
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c6
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c6
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c5
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h2
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c19
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c2
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c6
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.c50
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h7
-rw-r--r--drivers/net/ethernet/amd/pds_core/dev.c11
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c31
-rw-r--r--drivers/net/ethernet/amd/pds_core/main.c50
-rw-r--r--drivers/net/ethernet/amd/sunlance.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c48
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c6
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c21
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h3
-rw-r--r--drivers/net/ethernet/apple/macmace.c6
-rw-r--r--drivers/net/ethernet/arc/emac_arc.c6
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c5
-rw-r--r--drivers/net/ethernet/asix/ax88796c_ioctl.c2
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c8
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h3
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c80
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c2
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c6
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c14
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c8
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c275
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h13
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c95
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c678
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h545
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c241
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.h30
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c26
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c26
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c81
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c2
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c6
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c18
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c11
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/smt.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c43
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h36
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c2
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c5
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c8
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c5
-rw-r--r--drivers/net/ethernet/cortina/gemini.c12
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip.h2
-rw-r--r--drivers/net/ethernet/dnet.c6
-rw-r--r--drivers/net/ethernet/engleder/tsnep.h2
-rw-r--r--drivers/net/ethernet/engleder/tsnep_hw.h2
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c121
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c5
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c5
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c83
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c11
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c18
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c10
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c12
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c17
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c116
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c161
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h18
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c5
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.c217
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c8
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c5
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c10
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c8
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c5
-rw-r--r--drivers/net/ethernet/intel/Kconfig14
-rw-r--r--drivers/net/ethernet/intel/Makefile1
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h212
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_alloc.h24
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c69
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ddp.c31
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debug.h47
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devlink.c236
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devlink.h18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_io.h16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c125
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h59
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx_common.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h62
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h4
-rw-r--r--drivers/net/ethernet/intel/iavf/Makefile2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h46
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_client.c578
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_client.h169
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c32
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c244
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_prototype.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c46
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h12
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c115
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h306
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c756
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c465
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.h27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c2120
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.h114
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c228
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h53
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c135
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c43
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c96
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c679
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h41
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c758
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h95
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c56
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c307
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c63
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c71
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c22
-rw-r--r--drivers/net/ethernet/intel/idpf/Makefile18
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h968
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.c621
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.h130
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq_api.h169
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c171
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c165
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_devids.h10
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c1369
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h124
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h293
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_vf_regs.h128
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c2379
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c279
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_mem.h20
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c1183
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c4292
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h1023
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c163
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c3798
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h1273
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2_lan_desc.h451
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c55
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c2
-rw-r--r--drivers/net/ethernet/korina.c6
-rw-r--r--drivers/net/ethernet/lantiq_etop.c6
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c6
-rw-r--r--drivers/net/ethernet/litex/litex_liteeth.c6
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c11
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c6
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c8
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c6
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c12
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c168
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_config.h22
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c24
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h18
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c213
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.h13
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h4
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_rx.h3
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c86
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c53
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c464
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c62
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c58
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c5
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c11
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h19
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c12
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c1432
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.h57
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_debugfs.c400
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_mcu.c150
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_regs.h369
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c122
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dpll.c432
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c187
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c422
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c346
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c144
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c105
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c146
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c129
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c542
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c242
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c178
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c95
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c5
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c6
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c3
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c51
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h8
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c9
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c7
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c6
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c2
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c5
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c6
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c6
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c6
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c6
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c45
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/xsk.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c2
-rw-r--r--drivers/net/ethernet/ni/nixge.c11
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c77
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c6
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c46
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c6
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c4
-rw-r--r--drivers/net/ethernet/renesas/Kconfig9
-rw-r--r--drivers/net/ethernet/renesas/Makefile4
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c6
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c55
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c6
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c6
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c6
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c2
-rw-r--r--drivers/net/ethernet/sfc/mae.c62
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c3
-rw-r--r--drivers/net/ethernet/sfc/ptp.c27
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_channels.c2
-rw-r--r--drivers/net/ethernet/sfc/tc.c337
-rw-r--r--drivers/net/ethernet/sfc/tc.h8
-rw-r--r--drivers/net/ethernet/sfc/tc_conntrack.c91
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c6
-rw-r--r--drivers/net/ethernet/sgi/meth.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c6
-rw-r--r--drivers/net/ethernet/socionext/netsec.c8
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c34
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c209
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c53
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c130
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c50
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c73
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c34
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c5
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c6
-rw-r--r--drivers/net/ethernet/sun/sunqe.c6
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_driver.c6
-rw-r--r--drivers/net/ethernet/ti/Kconfig9
-rw-r--r--drivers/net/ethernet/ti/Makefile1
-rw-r--r--drivers/net/ethernet/ti/cpmac.c1251
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c40
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c6
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c14
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c49
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c5
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c4
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c10
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c6
-rw-r--r--drivers/net/ethernet/via/via-rhine.c6
-rw-r--r--drivers/net/ethernet/via/via-velocity.c6
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c169
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.h8
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c191
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h9
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c20
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h82
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c5
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c2
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c7
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c119
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c5
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c110
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h1
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c10
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c56
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h6
-rw-r--r--drivers/net/ethernet/wiznet/w5100-spi.c12
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c10
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c5
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c5
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c8
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c74
500 files changed, 35617 insertions, 8833 deletions
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index af603256b724..2874680ef24d 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -811,7 +811,7 @@ static int ax_init_dev(struct net_device *dev)
return ret;
}
-static int ax_remove(struct platform_device *pdev)
+static void ax_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct ei_device *ei_local = netdev_priv(dev);
@@ -832,8 +832,6 @@ static int ax_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
free_netdev(dev);
-
- return 0;
}
/*
@@ -1011,7 +1009,7 @@ static struct platform_driver axdrv = {
.name = "ax88796",
},
.probe = ax_probe,
- .remove = ax_remove,
+ .remove_new = ax_remove,
.suspend = ax_suspend,
.resume = ax_resume,
};
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index 217838b28220..5a0fa995e643 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -441,7 +441,7 @@ static int mcf8390_probe(struct platform_device *pdev)
return 0;
}
-static int mcf8390_remove(struct platform_device *pdev)
+static void mcf8390_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct resource *mem;
@@ -450,7 +450,6 @@ static int mcf8390_remove(struct platform_device *pdev)
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
free_netdev(dev);
- return 0;
}
static struct platform_driver mcf8390_drv = {
@@ -458,7 +457,7 @@ static struct platform_driver mcf8390_drv = {
.name = "mcf8390",
},
.probe = mcf8390_probe,
- .remove = mcf8390_remove,
+ .remove_new = mcf8390_remove,
};
module_platform_driver(mcf8390_drv);
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 7d89ec1cf273..350683a09d2e 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -823,7 +823,7 @@ static int __init ne_drv_probe(struct platform_device *pdev)
return 0;
}
-static int ne_drv_remove(struct platform_device *pdev)
+static void ne_drv_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
@@ -842,7 +842,6 @@ static int ne_drv_remove(struct platform_device *pdev)
release_region(dev->base_addr, NE_IO_EXTENT);
free_netdev(dev);
}
- return 0;
}
/* Remove unused devices or all if true. */
@@ -895,7 +894,7 @@ static int ne_drv_resume(struct platform_device *pdev)
#endif
static struct platform_driver ne_driver = {
- .remove = ne_drv_remove,
+ .remove_new = ne_drv_remove,
.suspend = ne_drv_suspend,
.resume = ne_drv_resume,
.driver = {
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
index c6f8f852bff1..e03193da5874 100644
--- a/drivers/net/ethernet/actions/owl-emac.c
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -1582,15 +1582,13 @@ static int owl_emac_probe(struct platform_device *pdev)
return 0;
}
-static int owl_emac_remove(struct platform_device *pdev)
+static void owl_emac_remove(struct platform_device *pdev)
{
struct owl_emac_priv *priv = platform_get_drvdata(pdev);
netif_napi_del(&priv->napi);
phy_disconnect(priv->netdev->phydev);
cancel_work_sync(&priv->mac_reset_task);
-
- return 0;
}
static const struct of_device_id owl_emac_of_match[] = {
@@ -1609,7 +1607,7 @@ static struct platform_driver owl_emac_driver = {
.pm = &owl_emac_pm_ops,
},
.probe = owl_emac_probe,
- .remove = owl_emac_remove,
+ .remove_new = owl_emac_remove,
};
module_platform_driver(owl_emac_driver);
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 597a02c75d52..27af7746d645 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1525,7 +1525,7 @@ error1:
return err;
}
-static int greth_of_remove(struct platform_device *of_dev)
+static void greth_of_remove(struct platform_device *of_dev)
{
struct net_device *ndev = platform_get_drvdata(of_dev);
struct greth_private *greth = netdev_priv(ndev);
@@ -1544,8 +1544,6 @@ static int greth_of_remove(struct platform_device *of_dev)
of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
free_netdev(ndev);
-
- return 0;
}
static const struct of_device_id greth_of_match[] = {
@@ -1566,7 +1564,7 @@ static struct platform_driver greth_of_driver = {
.of_match_table = greth_of_match,
},
.probe = greth_of_probe,
- .remove = greth_of_remove,
+ .remove_new = greth_of_remove,
};
module_platform_driver(greth_of_driver);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index a94c62956eed..d761c08fe5c1 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -1083,7 +1083,7 @@ out:
return ret;
}
-static int emac_remove(struct platform_device *pdev)
+static void emac_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct emac_board_info *db = netdev_priv(ndev);
@@ -1101,7 +1101,6 @@ static int emac_remove(struct platform_device *pdev)
free_netdev(ndev);
dev_dbg(&pdev->dev, "released and freed device\n");
- return 0;
}
static int emac_suspend(struct platform_device *dev, pm_message_t state)
@@ -1143,7 +1142,7 @@ static struct platform_driver emac_driver = {
.of_match_table = emac_of_match,
},
.probe = emac_probe,
- .remove = emac_remove,
+ .remove_new = emac_remove,
.suspend = emac_suspend,
.resume = emac_resume,
};
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index db5eed06e92d..82f2363a45cd 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -472,7 +472,7 @@ struct altera_tse_private {
/* ethtool msglvl option */
u32 msg_enable;
- struct altera_dmaops *dmaops;
+ const struct altera_dmaops *dmaops;
struct phylink *phylink;
struct phylink_config phylink_config;
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 2e15800e5310..1c8763be0e4b 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -29,13 +29,13 @@
#include <linux/mii.h>
#include <linux/mdio/mdio-regmap.h>
#include <linux/netdevice.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/of_platform.h>
#include <linux/pcs-lynx.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/skbuff.h>
#include <asm/cacheflush.h>
@@ -82,8 +82,6 @@ MODULE_PARM_DESC(dma_tx_num, "Number of descriptors in the TX list");
#define TXQUEUESTOP_THRESHHOLD 2
-static const struct of_device_id altera_tse_ids[];
-
static inline u32 tse_tx_avail(struct altera_tse_private *priv)
{
return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
@@ -1133,7 +1131,6 @@ static int request_and_map(struct platform_device *pdev, const char *name,
*/
static int altera_tse_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id = NULL;
struct regmap_config pcs_regmap_cfg;
struct altera_tse_private *priv;
struct mdio_regmap_config mrc;
@@ -1159,11 +1156,7 @@ static int altera_tse_probe(struct platform_device *pdev)
priv->dev = ndev;
priv->msg_enable = netif_msg_init(debug, default_msg_level);
- of_id = of_match_device(altera_tse_ids, &pdev->dev);
-
- if (of_id)
- priv->dmaops = (struct altera_dmaops *)of_id->data;
-
+ priv->dmaops = device_get_match_data(&pdev->dev);
if (priv->dmaops &&
priv->dmaops->altera_dtype == ALTERA_DTYPE_SGDMA) {
@@ -1464,7 +1457,7 @@ err_free_netdev:
/* Remove Altera TSE MAC device
*/
-static int altera_tse_remove(struct platform_device *pdev)
+static void altera_tse_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct altera_tse_private *priv = netdev_priv(ndev);
@@ -1476,8 +1469,6 @@ static int altera_tse_remove(struct platform_device *pdev)
lynx_pcs_destroy(priv->pcs);
free_netdev(ndev);
-
- return 0;
}
static const struct altera_dmaops altera_dtype_sgdma = {
@@ -1528,7 +1519,7 @@ MODULE_DEVICE_TABLE(of, altera_tse_ids);
static struct platform_driver altera_tse_driver = {
.probe = altera_tse_probe,
- .remove = altera_tse_remove,
+ .remove_new = altera_tse_remove,
.suspend = NULL,
.resume = NULL,
.driver = {
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index f955bde10cf9..b5bca4814830 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1828,7 +1828,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
}
if (xdp_flags & ENA_XDP_REDIRECT)
- xdp_do_flush_map();
+ xdp_do_flush();
return work_done;
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index c5cec4e79489..85c978149bf6 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1323,7 +1323,7 @@ out:
return err;
}
-static int au1000_remove(struct platform_device *pdev)
+static void au1000_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct au1000_private *aup = netdev_priv(dev);
@@ -1359,13 +1359,11 @@ static int au1000_remove(struct platform_device *pdev)
release_mem_region(macen->start, resource_size(macen));
free_netdev(dev);
-
- return 0;
}
static struct platform_driver au1000_eth_driver = {
.probe = au1000_probe,
- .remove = au1000_remove,
+ .remove_new = au1000_remove,
.driver = {
.name = "au1000-eth",
},
diff --git a/drivers/net/ethernet/amd/pds_core/core.c b/drivers/net/ethernet/amd/pds_core/core.c
index 36f9b932b9e2..0d2091e9eb28 100644
--- a/drivers/net/ethernet/amd/pds_core/core.c
+++ b/drivers/net/ethernet/amd/pds_core/core.c
@@ -152,11 +152,8 @@ void pdsc_qcq_free(struct pdsc *pdsc, struct pdsc_qcq *qcq)
dma_free_coherent(dev, qcq->cq_size,
qcq->cq_base, qcq->cq_base_pa);
- if (qcq->cq.info)
- vfree(qcq->cq.info);
-
- if (qcq->q.info)
- vfree(qcq->q.info);
+ vfree(qcq->cq.info);
+ vfree(qcq->q.info);
memset(qcq, 0, sizeof(*qcq));
}
@@ -445,12 +442,13 @@ int pdsc_setup(struct pdsc *pdsc, bool init)
goto err_out_teardown;
/* Set up the VIFs */
- err = pdsc_viftypes_init(pdsc);
- if (err)
- goto err_out_teardown;
+ if (init) {
+ err = pdsc_viftypes_init(pdsc);
+ if (err)
+ goto err_out_teardown;
- if (init)
pdsc_debugfs_add_viftype(pdsc);
+ }
clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
return 0;
@@ -469,8 +467,10 @@ void pdsc_teardown(struct pdsc *pdsc, bool removing)
pdsc_qcq_free(pdsc, &pdsc->notifyqcq);
pdsc_qcq_free(pdsc, &pdsc->adminqcq);
- kfree(pdsc->viftype_status);
- pdsc->viftype_status = NULL;
+ if (removing) {
+ kfree(pdsc->viftype_status);
+ pdsc->viftype_status = NULL;
+ }
if (pdsc->intr_info) {
for (i = 0; i < pdsc->nintrs; i++)
@@ -512,7 +512,7 @@ void pdsc_stop(struct pdsc *pdsc)
PDS_CORE_INTR_MASK_SET);
}
-static void pdsc_fw_down(struct pdsc *pdsc)
+void pdsc_fw_down(struct pdsc *pdsc)
{
union pds_core_notifyq_comp reset_event = {
.reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
@@ -520,10 +520,13 @@ static void pdsc_fw_down(struct pdsc *pdsc)
};
if (test_and_set_bit(PDSC_S_FW_DEAD, &pdsc->state)) {
- dev_err(pdsc->dev, "%s: already happening\n", __func__);
+ dev_warn(pdsc->dev, "%s: already happening\n", __func__);
return;
}
+ if (pdsc->pdev->is_virtfn)
+ return;
+
/* Notify clients of fw_down */
if (pdsc->fw_reporter)
devlink_health_report(pdsc->fw_reporter, "FW down reported", pdsc);
@@ -533,7 +536,7 @@ static void pdsc_fw_down(struct pdsc *pdsc)
pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
}
-static void pdsc_fw_up(struct pdsc *pdsc)
+void pdsc_fw_up(struct pdsc *pdsc)
{
union pds_core_notifyq_comp reset_event = {
.reset.ecode = cpu_to_le16(PDS_EVENT_RESET),
@@ -546,6 +549,11 @@ static void pdsc_fw_up(struct pdsc *pdsc)
return;
}
+ if (pdsc->pdev->is_virtfn) {
+ clear_bit(PDSC_S_FW_DEAD, &pdsc->state);
+ return;
+ }
+
err = pdsc_setup(pdsc, PDSC_SETUP_RECOVERY);
if (err)
goto err_out;
@@ -567,6 +575,18 @@ err_out:
pdsc_teardown(pdsc, PDSC_TEARDOWN_RECOVERY);
}
+static void pdsc_check_pci_health(struct pdsc *pdsc)
+{
+ u8 fw_status = ioread8(&pdsc->info_regs->fw_status);
+
+ /* is PCI broken? */
+ if (fw_status != PDS_RC_BAD_PCI)
+ return;
+
+ pdsc_reset_prepare(pdsc->pdev);
+ pdsc_reset_done(pdsc->pdev);
+}
+
void pdsc_health_thread(struct work_struct *work)
{
struct pdsc *pdsc = container_of(work, struct pdsc, health_work);
@@ -593,6 +613,8 @@ void pdsc_health_thread(struct work_struct *work)
pdsc_fw_down(pdsc);
}
+ pdsc_check_pci_health(pdsc);
+
pdsc->fw_generation = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION;
out_unlock:
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index e545fafc4819..f3a7deda9972 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -283,6 +283,9 @@ int pdsc_devcmd_reset(struct pdsc *pdsc);
int pdsc_dev_reinit(struct pdsc *pdsc);
int pdsc_dev_init(struct pdsc *pdsc);
+void pdsc_reset_prepare(struct pci_dev *pdev);
+void pdsc_reset_done(struct pci_dev *pdev);
+
int pdsc_intr_alloc(struct pdsc *pdsc, char *name,
irq_handler_t handler, void *data);
void pdsc_intr_free(struct pdsc *pdsc, int index);
@@ -309,4 +312,8 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data);
int pdsc_firmware_update(struct pdsc *pdsc, const struct firmware *fw,
struct netlink_ext_ack *extack);
+
+void pdsc_fw_down(struct pdsc *pdsc);
+void pdsc_fw_up(struct pdsc *pdsc);
+
#endif /* _PDSC_H_ */
diff --git a/drivers/net/ethernet/amd/pds_core/dev.c b/drivers/net/ethernet/amd/pds_core/dev.c
index f77cd9f5a2fd..7c1b965d61a9 100644
--- a/drivers/net/ethernet/amd/pds_core/dev.c
+++ b/drivers/net/ethernet/amd/pds_core/dev.c
@@ -42,6 +42,8 @@ int pdsc_err_to_errno(enum pds_core_status_code code)
return -ERANGE;
case PDS_RC_BAD_ADDR:
return -EFAULT;
+ case PDS_RC_BAD_PCI:
+ return -ENXIO;
case PDS_RC_EOPCODE:
case PDS_RC_EINTR:
case PDS_RC_DEV_CMD:
@@ -62,7 +64,7 @@ bool pdsc_is_fw_running(struct pdsc *pdsc)
/* Firmware is useful only if the running bit is set and
* fw_status != 0xff (bad PCI read)
*/
- return (pdsc->fw_status != 0xff) &&
+ return (pdsc->fw_status != PDS_RC_BAD_PCI) &&
(pdsc->fw_status & PDS_CORE_FW_STS_F_RUNNING);
}
@@ -128,6 +130,7 @@ static int pdsc_devcmd_wait(struct pdsc *pdsc, u8 opcode, int max_seconds)
unsigned long max_wait;
unsigned long duration;
int timeout = 0;
+ bool running;
int done = 0;
int err = 0;
int status;
@@ -136,6 +139,10 @@ static int pdsc_devcmd_wait(struct pdsc *pdsc, u8 opcode, int max_seconds)
max_wait = start_time + (max_seconds * HZ);
while (!done && !timeout) {
+ running = pdsc_is_fw_running(pdsc);
+ if (!running)
+ break;
+
done = pdsc_devcmd_done(pdsc);
if (done)
break;
@@ -152,7 +159,7 @@ static int pdsc_devcmd_wait(struct pdsc *pdsc, u8 opcode, int max_seconds)
dev_dbg(dev, "DEVCMD %d %s after %ld secs\n",
opcode, pdsc_devcmd_str(opcode), duration / HZ);
- if (!done || timeout) {
+ if ((!done || timeout) && running) {
dev_err(dev, "DEVCMD %d %s timeout, done %d timeout %d max_seconds=%d\n",
opcode, pdsc_devcmd_str(opcode), done, timeout,
max_seconds);
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
index d9607033bbf2..57f88c8b37de 100644
--- a/drivers/net/ethernet/amd/pds_core/devlink.c
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -124,6 +124,8 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
snprintf(buf, sizeof(buf), "fw.slot_%d", i);
err = devlink_info_version_stored_put(req, buf,
fw_list.fw_names[i].fw_version);
+ if (err)
+ return err;
}
err = devlink_info_version_running_put(req,
@@ -154,33 +156,20 @@ int pdsc_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
struct netlink_ext_ack *extack)
{
struct pdsc *pdsc = devlink_health_reporter_priv(reporter);
- int err;
mutex_lock(&pdsc->config_lock);
-
if (test_bit(PDSC_S_FW_DEAD, &pdsc->state))
- err = devlink_fmsg_string_pair_put(fmsg, "Status", "dead");
+ devlink_fmsg_string_pair_put(fmsg, "Status", "dead");
else if (!pdsc_is_fw_good(pdsc))
- err = devlink_fmsg_string_pair_put(fmsg, "Status", "unhealthy");
+ devlink_fmsg_string_pair_put(fmsg, "Status", "unhealthy");
else
- err = devlink_fmsg_string_pair_put(fmsg, "Status", "healthy");
-
+ devlink_fmsg_string_pair_put(fmsg, "Status", "healthy");
mutex_unlock(&pdsc->config_lock);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "State",
- pdsc->fw_status &
- ~PDS_CORE_FW_STS_F_GENERATION);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "Generation",
- pdsc->fw_generation >> 4);
- if (err)
- return err;
+ devlink_fmsg_u32_pair_put(fmsg, "State",
+ pdsc->fw_status & ~PDS_CORE_FW_STS_F_GENERATION);
+ devlink_fmsg_u32_pair_put(fmsg, "Generation", pdsc->fw_generation >> 4);
+ devlink_fmsg_u32_pair_put(fmsg, "Recoveries", pdsc->fw_recoveries);
- return devlink_fmsg_u32_pair_put(fmsg, "Recoveries",
- pdsc->fw_recoveries);
+ return 0;
}
diff --git a/drivers/net/ethernet/amd/pds_core/main.c b/drivers/net/ethernet/amd/pds_core/main.c
index 3a45bf474a19..3080898d7b95 100644
--- a/drivers/net/ethernet/amd/pds_core/main.c
+++ b/drivers/net/ethernet/amd/pds_core/main.c
@@ -445,12 +445,62 @@ static void pdsc_remove(struct pci_dev *pdev)
devlink_free(dl);
}
+void pdsc_reset_prepare(struct pci_dev *pdev)
+{
+ struct pdsc *pdsc = pci_get_drvdata(pdev);
+
+ pdsc_fw_down(pdsc);
+
+ pci_free_irq_vectors(pdev);
+ pdsc_unmap_bars(pdsc);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+void pdsc_reset_done(struct pci_dev *pdev)
+{
+ struct pdsc *pdsc = pci_get_drvdata(pdev);
+ struct device *dev = pdsc->dev;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Cannot enable PCI device: %pe\n", ERR_PTR(err));
+ return;
+ }
+ pci_set_master(pdev);
+
+ if (!pdev->is_virtfn) {
+ pcie_print_link_status(pdsc->pdev);
+
+ err = pci_request_regions(pdsc->pdev, PDS_CORE_DRV_NAME);
+ if (err) {
+ dev_err(pdsc->dev, "Cannot request PCI regions: %pe\n",
+ ERR_PTR(err));
+ return;
+ }
+
+ err = pdsc_map_bars(pdsc);
+ if (err)
+ return;
+ }
+
+ pdsc_fw_up(pdsc);
+}
+
+static const struct pci_error_handlers pdsc_err_handler = {
+ /* FLR handling */
+ .reset_prepare = pdsc_reset_prepare,
+ .reset_done = pdsc_reset_done,
+};
+
static struct pci_driver pdsc_driver = {
.name = PDS_CORE_DRV_NAME,
.id_table = pdsc_id_table,
.probe = pdsc_probe,
.remove = pdsc_remove,
.sriov_configure = pdsc_sriov_configure,
+ .err_handler = &pdsc_err_handler,
};
void *pdsc_get_pf_struct(struct pci_dev *vf_pdev)
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 33bb539ad70a..c78706d21a6a 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1487,7 +1487,7 @@ static int sunlance_sbus_probe(struct platform_device *op)
return err;
}
-static int sunlance_sbus_remove(struct platform_device *op)
+static void sunlance_sbus_remove(struct platform_device *op)
{
struct lance_private *lp = platform_get_drvdata(op);
struct net_device *net_dev = lp->dev;
@@ -1497,8 +1497,6 @@ static int sunlance_sbus_remove(struct platform_device *op)
lance_free_hwresources(lp);
free_netdev(net_dev);
-
- return 0;
}
static const struct of_device_id sunlance_sbus_match[] = {
@@ -1516,7 +1514,7 @@ static struct platform_driver sunlance_sbus_driver = {
.of_match_table = sunlance_sbus_match,
},
.probe = sunlance_sbus_probe,
- .remove = sunlance_sbus_remove,
+ .remove_new = sunlance_sbus_remove,
};
module_platform_driver(sunlance_sbus_driver);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
index 4d790a89fe77..9131020d06af 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
@@ -123,9 +123,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_net.h>
-#include <linux/of_address.h>
#include <linux/of_platform.h>
-#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/property.h>
#include <linux/acpi.h>
@@ -135,17 +133,6 @@
#include "xgbe-common.h"
#ifdef CONFIG_ACPI
-static const struct acpi_device_id xgbe_acpi_match[];
-
-static struct xgbe_version_data *xgbe_acpi_vdata(struct xgbe_prv_data *pdata)
-{
- const struct acpi_device_id *id;
-
- id = acpi_match_device(xgbe_acpi_match, pdata->dev);
-
- return id ? (struct xgbe_version_data *)id->driver_data : NULL;
-}
-
static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
{
struct device *dev = pdata->dev;
@@ -173,11 +160,6 @@ static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
return 0;
}
#else /* CONFIG_ACPI */
-static struct xgbe_version_data *xgbe_acpi_vdata(struct xgbe_prv_data *pdata)
-{
- return NULL;
-}
-
static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
{
return -EINVAL;
@@ -185,17 +167,6 @@ static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
#endif /* CONFIG_ACPI */
#ifdef CONFIG_OF
-static const struct of_device_id xgbe_of_match[];
-
-static struct xgbe_version_data *xgbe_of_vdata(struct xgbe_prv_data *pdata)
-{
- const struct of_device_id *id;
-
- id = of_match_device(xgbe_of_match, pdata->dev);
-
- return id ? (struct xgbe_version_data *)id->data : NULL;
-}
-
static int xgbe_of_support(struct xgbe_prv_data *pdata)
{
struct device *dev = pdata->dev;
@@ -244,11 +215,6 @@ static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
return phy_pdev;
}
#else /* CONFIG_OF */
-static struct xgbe_version_data *xgbe_of_vdata(struct xgbe_prv_data *pdata)
-{
- return NULL;
-}
-
static int xgbe_of_support(struct xgbe_prv_data *pdata)
{
return -EINVAL;
@@ -290,12 +256,6 @@ static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata)
return phy_pdev;
}
-static struct xgbe_version_data *xgbe_get_vdata(struct xgbe_prv_data *pdata)
-{
- return pdata->use_acpi ? xgbe_acpi_vdata(pdata)
- : xgbe_of_vdata(pdata);
-}
-
static int xgbe_platform_probe(struct platform_device *pdev)
{
struct xgbe_prv_data *pdata;
@@ -321,7 +281,7 @@ static int xgbe_platform_probe(struct platform_device *pdev)
pdata->use_acpi = dev->of_node ? 0 : 1;
/* Get the version data */
- pdata->vdata = xgbe_get_vdata(pdata);
+ pdata->vdata = (struct xgbe_version_data *)device_get_match_data(dev);
phy_pdev = xgbe_get_phy_pdev(pdata);
if (!phy_pdev) {
@@ -512,7 +472,7 @@ err_alloc:
return ret;
}
-static int xgbe_platform_remove(struct platform_device *pdev)
+static void xgbe_platform_remove(struct platform_device *pdev)
{
struct xgbe_prv_data *pdata = platform_get_drvdata(pdev);
@@ -521,8 +481,6 @@ static int xgbe_platform_remove(struct platform_device *pdev)
platform_device_put(pdata->phy_platdev);
xgbe_free_pdata(pdata);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -615,7 +573,7 @@ static struct platform_driver xgbe_driver = {
.pm = &xgbe_platform_pm_ops,
},
.probe = xgbe_platform_probe,
- .remove = xgbe_platform_remove,
+ .remove_new = xgbe_platform_remove,
};
int xgbe_platform_init(void)
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 379d19d18dbe..9e90c2381491 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -690,7 +690,7 @@ err:
return ret;
}
-static int xge_remove(struct platform_device *pdev)
+static void xge_remove(struct platform_device *pdev)
{
struct xge_pdata *pdata;
struct net_device *ndev;
@@ -706,8 +706,6 @@ static int xge_remove(struct platform_device *pdev)
xge_mdio_remove(ndev);
unregister_netdev(ndev);
free_netdev(ndev);
-
- return 0;
}
static void xge_shutdown(struct platform_device *pdev)
@@ -736,7 +734,7 @@ static struct platform_driver xge_driver = {
.acpi_match_table = ACPI_PTR(xge_acpi_match),
},
.probe = xge_probe,
- .remove = xge_remove,
+ .remove_new = xge_remove,
.shutdown = xge_shutdown,
};
module_platform_driver(xge_driver);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index f3305c434c95..44900026d11b 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -2018,7 +2018,6 @@ static int xgene_enet_probe(struct platform_device *pdev)
struct xgene_enet_pdata *pdata;
struct device *dev = &pdev->dev;
void (*link_state)(struct work_struct *);
- const struct of_device_id *of_id;
int ret;
ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
@@ -2039,19 +2038,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
NETIF_F_GRO |
NETIF_F_SG;
- of_id = of_match_device(xgene_enet_of_match, &pdev->dev);
- if (of_id) {
- pdata->enet_id = (uintptr_t)of_id->data;
- }
-#ifdef CONFIG_ACPI
- else {
- const struct acpi_device_id *acpi_id;
-
- acpi_id = acpi_match_device(xgene_enet_acpi_match, &pdev->dev);
- if (acpi_id)
- pdata->enet_id = (enum xgene_enet_id) acpi_id->driver_data;
- }
-#endif
+ pdata->enet_id = (enum xgene_enet_id)device_get_match_data(&pdev->dev);
if (!pdata->enet_id) {
ret = -ENODEV;
goto err;
@@ -2127,7 +2114,7 @@ err:
return ret;
}
-static int xgene_enet_remove(struct platform_device *pdev)
+static void xgene_enet_remove(struct platform_device *pdev)
{
struct xgene_enet_pdata *pdata;
struct net_device *ndev;
@@ -2149,8 +2136,6 @@ static int xgene_enet_remove(struct platform_device *pdev)
xgene_enet_delete_desc_rings(pdata);
pdata->port_ops->shutdown(pdata);
free_netdev(ndev);
-
- return 0;
}
static void xgene_enet_shutdown(struct platform_device *pdev)
@@ -2174,7 +2159,7 @@ static struct platform_driver xgene_enet_driver = {
.acpi_match_table = ACPI_PTR(xgene_enet_acpi_match),
},
.probe = xgene_enet_probe,
- .remove = xgene_enet_remove,
+ .remove_new = xgene_enet_remove,
.shutdown = xgene_enet_shutdown,
};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 643f5e646740..bce2c19e3f22 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -15,9 +15,10 @@
#include <linux/efi.h>
#include <linux/irq.h>
#include <linux/io.h>
-#include <linux/of_platform.h>
+#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
#include <linux/mdio/mdio-xgene.h>
#include <linux/module.h>
#include <net/ip.h>
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 8775c3234e91..766ab78256fe 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -739,7 +739,7 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
MODULE_ALIAS("platform:macmace");
-static int mac_mace_device_remove(struct platform_device *pdev)
+static void mac_mace_device_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct mace_data *mp = netdev_priv(dev);
@@ -755,13 +755,11 @@ static int mac_mace_device_remove(struct platform_device *pdev)
mp->tx_ring, mp->tx_ring_phys);
free_netdev(dev);
-
- return 0;
}
static struct platform_driver mac_mace_driver = {
.probe = mace_probe,
- .remove = mac_mace_device_remove,
+ .remove_new = mac_mace_device_remove,
.driver = {
.name = mac_mace_string,
},
diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c
index ce3147e886a1..a3afddb23ee8 100644
--- a/drivers/net/ethernet/arc/emac_arc.c
+++ b/drivers/net/ethernet/arc/emac_arc.c
@@ -58,14 +58,12 @@ out_netdev:
return err;
}
-static int emac_arc_remove(struct platform_device *pdev)
+static void emac_arc_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
arc_emac_remove(ndev);
free_netdev(ndev);
-
- return 0;
}
static const struct of_device_id emac_arc_dt_ids[] = {
@@ -76,7 +74,7 @@ MODULE_DEVICE_TABLE(of, emac_arc_dt_ids);
static struct platform_driver emac_arc_driver = {
.probe = emac_arc_probe,
- .remove = emac_arc_remove,
+ .remove_new = emac_arc_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = emac_arc_dt_ids,
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index 509101112279..493d6356c8ca 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -244,7 +244,7 @@ out_netdev:
return err;
}
-static int emac_rockchip_remove(struct platform_device *pdev)
+static void emac_rockchip_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct rockchip_priv_data *priv = netdev_priv(ndev);
@@ -260,12 +260,11 @@ static int emac_rockchip_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->macclk);
free_netdev(ndev);
- return 0;
}
static struct platform_driver emac_rockchip_driver = {
.probe = emac_rockchip_probe,
- .remove = emac_rockchip_remove,
+ .remove_new = emac_rockchip_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = emac_rockchip_dt_ids,
diff --git a/drivers/net/ethernet/asix/ax88796c_ioctl.c b/drivers/net/ethernet/asix/ax88796c_ioctl.c
index 916ae380a004..7d2fe2e5af92 100644
--- a/drivers/net/ethernet/asix/ax88796c_ioctl.c
+++ b/drivers/net/ethernet/asix/ax88796c_ioctl.c
@@ -24,7 +24,7 @@ static void
ax88796c_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
{
/* Inherit standard device info */
- strncpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
}
static u32 ax88796c_get_msglevel(struct net_device *ndev)
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 009e0b3066fa..0f2f400b5bc4 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1968,21 +1968,19 @@ err_put_clk:
return err;
}
-static int ag71xx_remove(struct platform_device *pdev)
+static void ag71xx_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct ag71xx *ag;
if (!ndev)
- return 0;
+ return;
ag = netdev_priv(ndev);
unregister_netdev(ndev);
ag71xx_mdio_remove(ag);
clk_disable_unprepare(ag->clk_eth);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static const u32 ar71xx_fifo_ar7100[] = {
@@ -2069,7 +2067,7 @@ static const struct of_device_id ag71xx_match[] = {
static struct platform_driver ag71xx_driver = {
.probe = ag71xx_probe,
- .remove = ag71xx_remove,
+ .remove_new = ag71xx_remove,
.driver = {
.name = "ag71xx",
.of_match_table = ag71xx_match,
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index 43d821fe7a54..63ba64dbb731 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -504,15 +504,12 @@ struct atl1c_rrd_ring {
u16 next_to_use;
u16 next_to_clean;
struct napi_struct napi;
- struct page *rx_page;
- unsigned int rx_page_offset;
};
/* board specific private data structure */
struct atl1c_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
- unsigned int rx_frag_size;
struct atl1c_hw hw;
struct atl1c_hw_stats hw_stats;
struct mii_if_info mii; /* MII interface info */
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 940c5d1ff9cf..46cdc32b4e31 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -483,15 +483,10 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
struct net_device *dev)
{
- unsigned int head_size;
int mtu = dev->mtu;
adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
-
- head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD + NET_IP_ALIGN) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- adapter->rx_frag_size = roundup_pow_of_two(head_size);
}
static netdev_features_t atl1c_fix_features(struct net_device *netdev,
@@ -847,7 +842,8 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter)
}
static inline void atl1c_clean_buffer(struct pci_dev *pdev,
- struct atl1c_buffer *buffer_info)
+ struct atl1c_buffer *buffer_info,
+ int budget)
{
u16 pci_driection;
if (buffer_info->flags & ATL1C_BUFFER_FREE)
@@ -866,7 +862,7 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
buffer_info->length, pci_driection);
}
if (buffer_info->skb)
- dev_consume_skb_any(buffer_info->skb);
+ napi_consume_skb(buffer_info->skb, budget);
buffer_info->dma = 0;
buffer_info->skb = NULL;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
@@ -887,7 +883,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
ring_count = tpd_ring->count;
for (index = 0; index < ring_count; index++) {
buffer_info = &tpd_ring->buffer_info[index];
- atl1c_clean_buffer(pdev, buffer_info);
+ atl1c_clean_buffer(pdev, buffer_info, 0);
}
netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue));
@@ -914,7 +910,7 @@ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter, u32 queue)
for (j = 0; j < rfd_ring->count; j++) {
buffer_info = &rfd_ring->buffer_info[j];
- atl1c_clean_buffer(pdev, buffer_info);
+ atl1c_clean_buffer(pdev, buffer_info, 0);
}
/* zero out the descriptor ring */
memset(rfd_ring->desc, 0, rfd_ring->size);
@@ -964,7 +960,6 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
- int i;
dma_free_coherent(&pdev->dev, adapter->ring_header.size,
adapter->ring_header.desc, adapter->ring_header.dma);
@@ -977,12 +972,6 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
kfree(adapter->tpd_ring[0].buffer_info);
adapter->tpd_ring[0].buffer_info = NULL;
}
- for (i = 0; i < adapter->rx_queue_count; ++i) {
- if (adapter->rrd_ring[i].rx_page) {
- put_page(adapter->rrd_ring[i].rx_page);
- adapter->rrd_ring[i].rx_page = NULL;
- }
- }
}
/**
@@ -1619,7 +1608,7 @@ static int atl1c_clean_tx(struct napi_struct *napi, int budget)
total_bytes += buffer_info->skb->len;
total_packets++;
}
- atl1c_clean_buffer(pdev, buffer_info);
+ atl1c_clean_buffer(pdev, buffer_info, budget);
if (++next_to_clean == tpd_ring->count)
next_to_clean = 0;
atomic_set(&tpd_ring->next_to_clean, next_to_clean);
@@ -1754,48 +1743,11 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
skb_checksum_none_assert(skb);
}
-static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter,
- u32 queue, bool napi_mode)
-{
- struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
- struct sk_buff *skb;
- struct page *page;
-
- if (adapter->rx_frag_size > PAGE_SIZE) {
- if (likely(napi_mode))
- return napi_alloc_skb(&rrd_ring->napi,
- adapter->rx_buffer_len);
- else
- return netdev_alloc_skb_ip_align(adapter->netdev,
- adapter->rx_buffer_len);
- }
-
- page = rrd_ring->rx_page;
- if (!page) {
- page = alloc_page(GFP_ATOMIC);
- if (unlikely(!page))
- return NULL;
- rrd_ring->rx_page = page;
- rrd_ring->rx_page_offset = 0;
- }
-
- skb = build_skb(page_address(page) + rrd_ring->rx_page_offset,
- adapter->rx_frag_size);
- if (likely(skb)) {
- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
- rrd_ring->rx_page_offset += adapter->rx_frag_size;
- if (rrd_ring->rx_page_offset >= PAGE_SIZE)
- rrd_ring->rx_page = NULL;
- else
- get_page(page);
- }
- return skb;
-}
-
static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
bool napi_mode)
{
struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[queue];
+ struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
struct pci_dev *pdev = adapter->pdev;
struct atl1c_buffer *buffer_info, *next_info;
struct sk_buff *skb;
@@ -1814,13 +1766,27 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
while (next_info->flags & ATL1C_BUFFER_FREE) {
rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
- skb = atl1c_alloc_skb(adapter, queue, napi_mode);
+ /* When DMA RX address is set to something like
+ * 0x....fc0, it will be very likely to cause DMA
+ * RFD overflow issue.
+ *
+ * To work around it, we apply rx skb with 64 bytes
+ * longer space, and offset the address whenever
+ * 0x....fc0 is detected.
+ */
+ if (likely(napi_mode))
+ skb = napi_alloc_skb(&rrd_ring->napi, adapter->rx_buffer_len + 64);
+ else
+ skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len + 64);
if (unlikely(!skb)) {
if (netif_msg_rx_err(adapter))
dev_warn(&pdev->dev, "alloc rx buffer failed\n");
break;
}
+ if (((unsigned long)skb->data & 0xfff) == 0xfc0)
+ skb_reserve(skb, 64);
+
/*
* Make buffer alignment 2 beyond a 16 byte boundary
* this will result in a 16 byte aligned IP header after
@@ -2186,7 +2152,7 @@ static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
while (index != tpd_ring->next_to_use) {
tpd = ATL1C_TPD_DESC(tpd_ring, index);
buffer_info = &tpd_ring->buffer_info[index];
- atl1c_clean_buffer(adpt->pdev, buffer_info);
+ atl1c_clean_buffer(adpt->pdev, buffer_info, 0);
memset(tpd, 0, sizeof(struct atl1c_tpd_desc));
if (++index == tpd_ring->count)
index = 0;
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 02aa6fd8ebc2..a9014d7932db 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2446,7 +2446,7 @@ static int atl1_rings_clean(struct napi_struct *napi, int budget)
static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter)
{
- if (!napi_schedule_prep(&adapter->napi))
+ if (!napi_schedule(&adapter->napi))
/* It is possible in case even the RX/TX ints are disabled via IMR
* register the ISR bits are set anyway (but do not produce IRQ).
* To handle such situation the napi functions used to check is
@@ -2454,8 +2454,6 @@ static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter)
*/
return 0;
- __napi_schedule(&adapter->napi);
-
/*
* Disable RX/TX ints via IMR register if it is
* allowed. NAPI handler must reenable them in same
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 1b487c071cb6..bcfc9488125b 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1377,7 +1377,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->watchdog_timeo = 5 * HZ;
netdev->min_mtu = 40;
netdev->max_mtu = ETH_DATA_LEN + VLAN_HLEN;
- strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+ strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len;
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index 41a6098eb0c2..29b04a274d07 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -1344,17 +1344,15 @@ of_put_exit:
return ret;
}
-static int bcmasp_remove(struct platform_device *pdev)
+static void bcmasp_remove(struct platform_device *pdev)
{
struct bcmasp_priv *priv = dev_get_drvdata(&pdev->dev);
if (!priv)
- return 0;
+ return;
priv->destroy_wol(priv);
bcmasp_remove_intfs(priv);
-
- return 0;
}
static void bcmasp_shutdown(struct platform_device *pdev)
@@ -1428,7 +1426,7 @@ static SIMPLE_DEV_PM_OPS(bcmasp_pm_ops,
static struct platform_driver bcmasp_driver = {
.probe = bcmasp_probe,
- .remove = bcmasp_remove,
+ .remove_new = bcmasp_remove,
.shutdown = bcmasp_shutdown,
.driver = {
.name = "brcm,asp-v2",
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 33d86683af50..3e7c8671cd11 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -768,7 +768,7 @@ err_dma_free:
return err;
}
-static int bcm4908_enet_remove(struct platform_device *pdev)
+static void bcm4908_enet_remove(struct platform_device *pdev)
{
struct bcm4908_enet *enet = platform_get_drvdata(pdev);
@@ -776,8 +776,6 @@ static int bcm4908_enet_remove(struct platform_device *pdev)
netif_napi_del(&enet->rx_ring.napi);
netif_napi_del(&enet->tx_ring.napi);
bcm4908_enet_dma_free(enet);
-
- return 0;
}
static const struct of_device_id bcm4908_enet_of_match[] = {
@@ -791,7 +789,7 @@ static struct platform_driver bcm4908_enet_driver = {
.of_match_table = bcm4908_enet_of_match,
},
.probe = bcm4908_enet_probe,
- .remove = bcm4908_enet_remove,
+ .remove_new = bcm4908_enet_remove,
};
module_platform_driver(bcm4908_enet_driver);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index a741070f1f9a..3196c4dea076 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1902,7 +1902,7 @@ out:
/*
* exit func, stops hardware and unregisters netdevice
*/
-static int bcm_enet_remove(struct platform_device *pdev)
+static void bcm_enet_remove(struct platform_device *pdev)
{
struct bcm_enet_priv *priv;
struct net_device *dev;
@@ -1932,12 +1932,11 @@ static int bcm_enet_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->mac_clk);
free_netdev(dev);
- return 0;
}
static struct platform_driver bcm63xx_enet_driver = {
.probe = bcm_enet_probe,
- .remove = bcm_enet_remove,
+ .remove_new = bcm_enet_remove,
.driver = {
.name = "bcm63xx_enet",
},
@@ -2531,8 +2530,8 @@ static int bcm_enetsw_get_sset_count(struct net_device *netdev,
static void bcm_enetsw_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strncpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
- strncpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
+ strscpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
}
static void bcm_enetsw_get_ethtool_stats(struct net_device *netdev,
@@ -2739,7 +2738,7 @@ out:
/* exit func, stops hardware and unregisters netdevice */
-static int bcm_enetsw_remove(struct platform_device *pdev)
+static void bcm_enetsw_remove(struct platform_device *pdev)
{
struct bcm_enet_priv *priv;
struct net_device *dev;
@@ -2752,12 +2751,11 @@ static int bcm_enetsw_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->mac_clk);
free_netdev(dev);
- return 0;
}
static struct platform_driver bcm63xx_enetsw_driver = {
.probe = bcm_enetsw_probe,
- .remove = bcm_enetsw_remove,
+ .remove_new = bcm_enetsw_remove,
.driver = {
.name = "bcm63xx_enetsw",
},
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index bf1611cce974..c9faa8540859 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2430,7 +2430,7 @@ static int bcm_sysport_netdevice_event(struct notifier_block *nb,
if (dev->netdev_ops != &bcm_sysport_netdev_ops)
return NOTIFY_DONE;
- if (!dsa_slave_dev_check(info->upper_dev))
+ if (!dsa_user_dev_check(info->upper_dev))
return NOTIFY_DONE;
if (info->linking)
@@ -2648,7 +2648,7 @@ err_free_netdev:
return ret;
}
-static int bcm_sysport_remove(struct platform_device *pdev)
+static void bcm_sysport_remove(struct platform_device *pdev)
{
struct net_device *dev = dev_get_drvdata(&pdev->dev);
struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -2663,8 +2663,6 @@ static int bcm_sysport_remove(struct platform_device *pdev)
of_phy_deregister_fixed_link(dn);
free_netdev(dev);
dev_set_drvdata(&pdev->dev, NULL);
-
- return 0;
}
static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
@@ -2901,7 +2899,7 @@ static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
static struct platform_driver bcm_sysport_driver = {
.probe = bcm_sysport_probe,
- .remove = bcm_sysport_remove,
+ .remove_new = bcm_sysport_remove,
.driver = {
.name = "brcm-systemport",
.of_match_table = bcm_sysport_of_match,
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index b4381cd41979..0b21fd5bd457 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -246,13 +246,11 @@ static int bgmac_probe(struct platform_device *pdev)
return bgmac_enet_probe(bgmac);
}
-static int bgmac_remove(struct platform_device *pdev)
+static void bgmac_remove(struct platform_device *pdev)
{
struct bgmac *bgmac = platform_get_drvdata(pdev);
bgmac_enet_remove(bgmac);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -296,7 +294,7 @@ static struct platform_driver bgmac_enet_driver = {
.pm = BGMAC_PM_OPS
},
.probe = bgmac_probe,
- .remove = bgmac_remove,
+ .remove_new = bgmac_remove,
};
module_platform_driver(bgmac_enet_driver);
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index 2bc2b707d6ee..ba6c239d52fa 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_BNXT) += bnxt_en.o
bnxt_en-y := bnxt.o bnxt_hwrm.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o bnxt_coredump.o
bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o
bnxt_en-$(CONFIG_DEBUG_FS) += bnxt_debugfs.o
+bnxt_en-$(CONFIG_BNXT_HWMON) += bnxt_hwmon.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 7551aa8068f8..d0359b569afe 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -52,8 +52,6 @@
#include <linux/cpu_rmap.h>
#include <linux/cpumask.h>
#include <net/pkt_cls.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <net/page_pool/helpers.h>
#include <linux/align.h>
#include <net/netdev_queues.h>
@@ -71,6 +69,7 @@
#include "bnxt_tc.h"
#include "bnxt_devlink.h"
#include "bnxt_debugfs.h"
+#include "bnxt_hwmon.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
@@ -2130,7 +2129,68 @@ static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
return INVALID_HW_RING_ID;
}
-static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
+static u16 bnxt_get_force_speed(struct bnxt_link_info *link_info)
+{
+ if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4)
+ return link_info->force_pam4_link_speed;
+ return link_info->force_link_speed;
+}
+
+static void bnxt_set_force_speed(struct bnxt_link_info *link_info)
+{
+ link_info->req_link_speed = link_info->force_link_speed;
+ link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
+ if (link_info->force_pam4_link_speed) {
+ link_info->req_link_speed = link_info->force_pam4_link_speed;
+ link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
+ }
+}
+
+static void bnxt_set_auto_speed(struct bnxt_link_info *link_info)
+{
+ link_info->advertising = link_info->auto_link_speeds;
+ link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
+}
+
+static bool bnxt_force_speed_updated(struct bnxt_link_info *link_info)
+{
+ if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
+ link_info->req_link_speed != link_info->force_link_speed)
+ return true;
+ if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
+ link_info->req_link_speed != link_info->force_pam4_link_speed)
+ return true;
+ return false;
+}
+
+static bool bnxt_auto_speed_updated(struct bnxt_link_info *link_info)
+{
+ if (link_info->advertising != link_info->auto_link_speeds ||
+ link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
+ return true;
+ return false;
+}
+
+#define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \
+ ((data2) & \
+ ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
+
+#define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \
+ (((data2) & \
+ ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >>\
+ ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT)
+
+#define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \
+ ((data1) & \
+ ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK)
+
+#define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \
+ (((data1) & \
+ ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) ==\
+ ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING)
+
+/* Return true if the workqueue has to be scheduled */
+static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
{
u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
@@ -2145,11 +2205,53 @@ static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
+ u32 type = EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1);
+ char *threshold_type;
+ bool notify = false;
+ char *dir_str;
+
+ switch (type) {
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
+ threshold_type = "warning";
+ break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
+ threshold_type = "critical";
+ break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
+ threshold_type = "fatal";
+ break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
+ threshold_type = "shutdown";
+ break;
+ default:
+ netdev_err(bp->dev, "Unknown Thermal threshold type event\n");
+ return false;
+ }
+ if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1)) {
+ dir_str = "above";
+ notify = true;
+ } else {
+ dir_str = "below";
+ }
+ netdev_warn(bp->dev, "Chip temperature has gone %s the %s thermal threshold!\n",
+ dir_str, threshold_type);
+ netdev_warn(bp->dev, "Temperature (In Celsius), Current: %lu, threshold: %lu\n",
+ BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
+ BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
+ if (notify) {
+ bp->thermal_threshold_type = type;
+ set_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event);
+ return true;
+ }
+ return false;
+ }
default:
netdev_err(bp->dev, "FW reported unknown error type %u\n",
err_type);
break;
}
+ return false;
}
#define BNXT_GET_EVENT_PORT(data) \
@@ -2195,7 +2297,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
/* print unsupported speed warning in forced speed mode only */
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
(data1 & 0x20000)) {
- u16 fw_speed = link_info->force_link_speed;
+ u16 fw_speed = bnxt_get_force_speed(link_info);
u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
if (speed != SPEED_UNKNOWN)
@@ -2350,7 +2452,8 @@ static int bnxt_async_event_process(struct bnxt *bp,
goto async_event_process_exit;
}
case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
- bnxt_event_error_report(bp, data1, data2);
+ if (bnxt_event_error_report(bp, data1, data2))
+ break;
goto async_event_process_exit;
}
case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
@@ -3199,8 +3302,6 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
pp.dma_dir = bp->rx_dir;
pp.max_len = PAGE_SIZE;
pp.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
- if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
- pp.flags |= PP_FLAG_PAGE_FRAG;
rxr->page_pool = page_pool_create(&pp);
if (IS_ERR(rxr->page_pool)) {
@@ -5810,7 +5911,7 @@ static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
if (BNXT_PF(bp)) {
struct hwrm_func_cfg_input *req;
- rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (rc)
return rc;
@@ -6221,7 +6322,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
struct hwrm_func_cfg_input *req;
u32 enables = 0;
- if (hwrm_req_init(bp, req, HWRM_FUNC_CFG))
+ if (bnxt_hwrm_func_cfg_short_req_init(bp, &req))
return NULL;
req->fid = cpu_to_le16(0xffff);
@@ -8566,7 +8667,7 @@ static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
else
return -EINVAL;
- rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (rc)
return rc;
@@ -8584,7 +8685,7 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
return 0;
- rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (rc)
return rc;
@@ -9628,13 +9729,31 @@ static bool bnxt_support_dropped(u16 advertising, u16 supported)
return ((supported | diff) != supported);
}
+static bool bnxt_support_speed_dropped(struct bnxt_link_info *link_info)
+{
+ /* Check if any advertised speeds are no longer supported. The caller
+ * holds the link_lock mutex, so we can modify link_info settings.
+ */
+ if (bnxt_support_dropped(link_info->advertising,
+ link_info->support_auto_speeds)) {
+ link_info->advertising = link_info->support_auto_speeds;
+ return true;
+ }
+ if (bnxt_support_dropped(link_info->advertising_pam4,
+ link_info->support_pam4_auto_speeds)) {
+ link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
+ return true;
+ }
+ return false;
+}
+
int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
{
struct bnxt_link_info *link_info = &bp->link_info;
struct hwrm_port_phy_qcfg_output *resp;
struct hwrm_port_phy_qcfg_input *req;
u8 link_state = link_info->link_state;
- bool support_changed = false;
+ bool support_changed;
int rc;
rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
@@ -9748,19 +9867,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
if (!BNXT_PHY_CFG_ABLE(bp))
return 0;
- /* Check if any advertised speeds are no longer supported. The caller
- * holds the link_lock mutex, so we can modify link_info settings.
- */
- if (bnxt_support_dropped(link_info->advertising,
- link_info->support_auto_speeds)) {
- link_info->advertising = link_info->support_auto_speeds;
- support_changed = true;
- }
- if (bnxt_support_dropped(link_info->advertising_pam4,
- link_info->support_pam4_auto_speeds)) {
- link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
- support_changed = true;
- }
+ support_changed = bnxt_support_speed_dropped(link_info);
if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
bnxt_hwrm_set_link_setting(bp, true, false);
return 0;
@@ -10250,79 +10357,6 @@ static void bnxt_get_wol_settings(struct bnxt *bp)
} while (handle && handle != 0xffff);
}
-#ifdef CONFIG_BNXT_HWMON
-static ssize_t bnxt_show_temp(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct hwrm_temp_monitor_query_output *resp;
- struct hwrm_temp_monitor_query_input *req;
- struct bnxt *bp = dev_get_drvdata(dev);
- u32 len = 0;
- int rc;
-
- rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
- if (rc)
- return rc;
- resp = hwrm_req_hold(bp, req);
- rc = hwrm_req_send(bp, req);
- if (!rc)
- len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
- hwrm_req_drop(bp, req);
- if (rc)
- return rc;
- return len;
-}
-static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
-
-static struct attribute *bnxt_attrs[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- NULL
-};
-ATTRIBUTE_GROUPS(bnxt);
-
-static void bnxt_hwmon_close(struct bnxt *bp)
-{
- if (bp->hwmon_dev) {
- hwmon_device_unregister(bp->hwmon_dev);
- bp->hwmon_dev = NULL;
- }
-}
-
-static void bnxt_hwmon_open(struct bnxt *bp)
-{
- struct hwrm_temp_monitor_query_input *req;
- struct pci_dev *pdev = bp->pdev;
- int rc;
-
- rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
- if (!rc)
- rc = hwrm_req_send_silent(bp, req);
- if (rc == -EACCES || rc == -EOPNOTSUPP) {
- bnxt_hwmon_close(bp);
- return;
- }
-
- if (bp->hwmon_dev)
- return;
-
- bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
- DRV_MODULE_NAME, bp,
- bnxt_groups);
- if (IS_ERR(bp->hwmon_dev)) {
- bp->hwmon_dev = NULL;
- dev_warn(&pdev->dev, "Cannot register hwmon device\n");
- }
-}
-#else
-static void bnxt_hwmon_close(struct bnxt *bp)
-{
-}
-
-static void bnxt_hwmon_open(struct bnxt *bp)
-{
-}
-#endif
-
static bool bnxt_eee_config_ok(struct bnxt *bp)
{
struct ethtool_eee *eee = &bp->eee;
@@ -10374,19 +10408,14 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
if (BNXT_AUTO_MODE(link_info->auto_mode))
update_link = true;
- if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
- link_info->req_link_speed != link_info->force_link_speed)
- update_link = true;
- else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
- link_info->req_link_speed != link_info->force_pam4_link_speed)
+ if (bnxt_force_speed_updated(link_info))
update_link = true;
if (link_info->req_duplex != link_info->duplex_setting)
update_link = true;
} else {
if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
update_link = true;
- if (link_info->advertising != link_info->auto_link_speeds ||
- link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
+ if (bnxt_auto_speed_updated(link_info))
update_link = true;
}
@@ -10651,7 +10680,6 @@ static int bnxt_open(struct net_device *dev)
bnxt_reenable_sriov(bp);
}
}
- bnxt_hwmon_open(bp);
}
return rc;
@@ -10736,7 +10764,6 @@ static int bnxt_close(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
- bnxt_hwmon_close(bp);
bnxt_close_nic(bp, true, true);
bnxt_hwrm_shutdown_link(bp);
bnxt_hwrm_if_change(bp, false);
@@ -12006,16 +12033,9 @@ static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
} else {
link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
}
- link_info->advertising = link_info->auto_link_speeds;
- link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
+ bnxt_set_auto_speed(link_info);
} else {
- link_info->req_link_speed = link_info->force_link_speed;
- link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
- if (link_info->force_pam4_link_speed) {
- link_info->req_link_speed =
- link_info->force_pam4_link_speed;
- link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
- }
+ bnxt_set_force_speed(link_info);
link_info->req_duplex = link_info->duplex_setting;
}
if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
@@ -12109,6 +12129,9 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
bnxt_fw_echo_reply(bp);
+ if (test_and_clear_bit(BNXT_THERMAL_THRESHOLD_SP_EVENT, &bp->sp_event))
+ bnxt_hwmon_notify_event(bp);
+
/* These functions below will clear BNXT_STATE_IN_SP_TASK. They
* must be the last functions to be called before exiting.
*/
@@ -12237,6 +12260,20 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
}
+/* FW that pre-reserves 1 VNIC per function */
+static bool bnxt_fw_pre_resv_vnics(struct bnxt *bp)
+{
+ u16 fw_maj = BNXT_FW_MAJ(bp), fw_bld = BNXT_FW_BLD(bp);
+
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
+ (fw_maj > 218 || (fw_maj == 218 && fw_bld >= 18)))
+ return true;
+ if ((bp->flags & BNXT_FLAG_CHIP_P5) &&
+ (fw_maj > 216 || (fw_maj == 216 && fw_bld >= 172)))
+ return true;
+ return false;
+}
+
static int bnxt_fw_init_one_p1(struct bnxt *bp)
{
int rc;
@@ -12293,6 +12330,9 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
if (rc)
return -ENODEV;
+ if (bnxt_fw_pre_resv_vnics(bp))
+ bp->fw_cap |= BNXT_FW_CAP_PRE_RESV_VNICS;
+
bnxt_hwrm_func_qcfg(bp);
bnxt_hwrm_vnic_qcaps(bp);
bnxt_hwrm_port_led_qcaps(bp);
@@ -12300,6 +12340,7 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
if (bp->fw_cap & BNXT_FW_CAP_PTP)
__bnxt_hwrm_ptp_qcfg(bp);
bnxt_dcb_init(bp);
+ bnxt_hwmon_init(bp);
return 0;
}
@@ -13205,6 +13246,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp);
+ bnxt_hwmon_uninit(bp);
bnxt_ethtool_free(bp);
bnxt_dcb_free(bp);
kfree(bp->ptp_cfg);
@@ -13801,6 +13843,7 @@ init_err_dl:
init_err_pci_clean:
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp);
+ bnxt_hwmon_uninit(bp);
bnxt_ethtool_free(bp);
bnxt_ptp_clear(bp);
kfree(bp->ptp_cfg);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 84cbcfa61bc1..e702dbc3e6b1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1295,6 +1295,7 @@ struct bnxt_link_info {
u8 req_signal_mode;
#define BNXT_SIG_MODE_NRZ PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ
#define BNXT_SIG_MODE_PAM4 PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4
+#define BNXT_SIG_MODE_MAX (PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST + 1)
u8 req_duplex;
u8 req_flow_ctrl;
u16 req_link_speed;
@@ -2013,6 +2014,9 @@ struct bnxt {
#define BNXT_FW_CAP_RING_MONITOR BIT_ULL(30)
#define BNXT_FW_CAP_DBG_QCAPS BIT_ULL(31)
#define BNXT_FW_CAP_PTP BIT_ULL(32)
+ #define BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED BIT_ULL(33)
+ #define BNXT_FW_CAP_DFLT_VLAN_TPID_PCP BIT_ULL(34)
+ #define BNXT_FW_CAP_PRE_RESV_VNICS BIT_ULL(35)
u32 fw_dbg_cap;
@@ -2053,6 +2057,7 @@ struct bnxt {
#define BNXT_FW_VER_CODE(maj, min, bld, rsv) \
((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv))
#define BNXT_FW_MAJ(bp) ((bp)->fw_ver_code >> 48)
+#define BNXT_FW_BLD(bp) (((bp)->fw_ver_code >> 16) & 0xffff)
u16 vxlan_fw_dst_port_id;
u16 nge_fw_dst_port_id;
@@ -2090,6 +2095,7 @@ struct bnxt {
#define BNXT_FW_RESET_NOTIFY_SP_EVENT 18
#define BNXT_FW_EXCEPTION_SP_EVENT 19
#define BNXT_LINK_CFG_CHANGE_SP_EVENT 21
+#define BNXT_THERMAL_THRESHOLD_SP_EVENT 22
#define BNXT_FW_ECHO_REQUEST_SP_EVENT 23
struct delayed_work fw_reset_task;
@@ -2185,7 +2191,14 @@ struct bnxt {
struct bnxt_tc_info *tc_info;
struct list_head tc_indr_block_list;
struct dentry *debugfs_pdev;
+#ifdef CONFIG_BNXT_HWMON
struct device *hwmon_dev;
+ u8 warn_thresh_temp;
+ u8 crit_thresh_temp;
+ u8 fatal_thresh_temp;
+ u8 shutdown_thresh_temp;
+#endif
+ u32 thermal_threshold_type;
enum board_idx board_idx;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 8b3e7697390f..f302dac56599 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -62,7 +62,7 @@ static int bnxt_hwrm_remote_dev_reset_set(struct bnxt *bp, bool remote_reset)
if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
return -EOPNOTSUPP;
- rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (rc)
return rc;
@@ -104,20 +104,21 @@ static int bnxt_fw_diagnose(struct devlink_health_reporter *reporter,
struct bnxt *bp = devlink_health_reporter_priv(reporter);
struct bnxt_fw_health *h = bp->fw_health;
u32 fw_status, fw_resets;
- int rc;
- if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
- return devlink_fmsg_string_pair_put(fmsg, "Status", "recovering");
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ devlink_fmsg_string_pair_put(fmsg, "Status", "recovering");
+ return 0;
+ }
- if (!h->status_reliable)
- return devlink_fmsg_string_pair_put(fmsg, "Status", "unknown");
+ if (!h->status_reliable) {
+ devlink_fmsg_string_pair_put(fmsg, "Status", "unknown");
+ return 0;
+ }
mutex_lock(&h->lock);
fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
if (BNXT_FW_IS_BOOTING(fw_status)) {
- rc = devlink_fmsg_string_pair_put(fmsg, "Status", "initializing");
- if (rc)
- goto unlock;
+ devlink_fmsg_string_pair_put(fmsg, "Status", "initializing");
} else if (h->severity || fw_status != BNXT_FW_STATUS_HEALTHY) {
if (!h->severity) {
h->severity = SEVERITY_FATAL;
@@ -126,58 +127,35 @@ static int bnxt_fw_diagnose(struct devlink_health_reporter *reporter,
devlink_health_report(h->fw_reporter,
"FW error diagnosed", h);
}
- rc = devlink_fmsg_string_pair_put(fmsg, "Status", "error");
- if (rc)
- goto unlock;
- rc = devlink_fmsg_u32_pair_put(fmsg, "Syndrome", fw_status);
- if (rc)
- goto unlock;
+ devlink_fmsg_string_pair_put(fmsg, "Status", "error");
+ devlink_fmsg_u32_pair_put(fmsg, "Syndrome", fw_status);
} else {
- rc = devlink_fmsg_string_pair_put(fmsg, "Status", "healthy");
- if (rc)
- goto unlock;
+ devlink_fmsg_string_pair_put(fmsg, "Status", "healthy");
}
- rc = devlink_fmsg_string_pair_put(fmsg, "Severity",
- bnxt_health_severity_str(h->severity));
- if (rc)
- goto unlock;
+ devlink_fmsg_string_pair_put(fmsg, "Severity",
+ bnxt_health_severity_str(h->severity));
if (h->severity) {
- rc = devlink_fmsg_string_pair_put(fmsg, "Remedy",
- bnxt_health_remedy_str(h->remedy));
- if (rc)
- goto unlock;
- if (h->remedy == REMEDY_DEVLINK_RECOVER) {
- rc = devlink_fmsg_string_pair_put(fmsg, "Impact",
- "traffic+ntuple_cfg");
- if (rc)
- goto unlock;
- }
+ devlink_fmsg_string_pair_put(fmsg, "Remedy",
+ bnxt_health_remedy_str(h->remedy));
+ if (h->remedy == REMEDY_DEVLINK_RECOVER)
+ devlink_fmsg_string_pair_put(fmsg, "Impact",
+ "traffic+ntuple_cfg");
}
-unlock:
mutex_unlock(&h->lock);
- if (rc || !h->resets_reliable)
- return rc;
+ if (!h->resets_reliable)
+ return 0;
fw_resets = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
- rc = devlink_fmsg_u32_pair_put(fmsg, "Resets", fw_resets);
- if (rc)
- return rc;
- rc = devlink_fmsg_u32_pair_put(fmsg, "Arrests", h->arrests);
- if (rc)
- return rc;
- rc = devlink_fmsg_u32_pair_put(fmsg, "Survivals", h->survivals);
- if (rc)
- return rc;
- rc = devlink_fmsg_u32_pair_put(fmsg, "Discoveries", h->discoveries);
- if (rc)
- return rc;
- rc = devlink_fmsg_u32_pair_put(fmsg, "Fatalities", h->fatalities);
- if (rc)
- return rc;
- return devlink_fmsg_u32_pair_put(fmsg, "Diagnoses", h->diagnoses);
+ devlink_fmsg_u32_pair_put(fmsg, "Resets", fw_resets);
+ devlink_fmsg_u32_pair_put(fmsg, "Arrests", h->arrests);
+ devlink_fmsg_u32_pair_put(fmsg, "Survivals", h->survivals);
+ devlink_fmsg_u32_pair_put(fmsg, "Discoveries", h->discoveries);
+ devlink_fmsg_u32_pair_put(fmsg, "Fatalities", h->fatalities);
+ devlink_fmsg_u32_pair_put(fmsg, "Diagnoses", h->diagnoses);
+ return 0;
}
static int bnxt_fw_dump(struct devlink_health_reporter *reporter,
@@ -203,19 +181,12 @@ static int bnxt_fw_dump(struct devlink_health_reporter *reporter,
rc = bnxt_get_coredump(bp, BNXT_DUMP_LIVE, data, &dump_len);
if (!rc) {
- rc = devlink_fmsg_pair_nest_start(fmsg, "core");
- if (rc)
- goto exit;
- rc = devlink_fmsg_binary_pair_put(fmsg, "data", data, dump_len);
- if (rc)
- goto exit;
- rc = devlink_fmsg_u32_pair_put(fmsg, "size", dump_len);
- if (rc)
- goto exit;
- rc = devlink_fmsg_pair_nest_end(fmsg);
+ devlink_fmsg_pair_nest_start(fmsg, "core");
+ devlink_fmsg_binary_pair_put(fmsg, "data", data, dump_len);
+ devlink_fmsg_u32_pair_put(fmsg, "size", dump_len);
+ devlink_fmsg_pair_nest_end(fmsg);
}
-exit:
vfree(data);
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 547247d98eba..f3f384773ac0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -8,6 +8,7 @@
* the Free Software Foundation.
*/
+#include <linux/bitops.h>
#include <linux/ctype.h>
#include <linux/stringify.h>
#include <linux/ethtool.h>
@@ -534,6 +535,7 @@ static int bnxt_get_num_ring_stats(struct bnxt *bp)
static int bnxt_get_num_stats(struct bnxt *bp)
{
int num_stats = bnxt_get_num_ring_stats(bp);
+ int len;
num_stats += BNXT_NUM_RING_ERR_STATS;
@@ -541,8 +543,12 @@ static int bnxt_get_num_stats(struct bnxt *bp)
num_stats += BNXT_NUM_PORT_STATS;
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
- num_stats += bp->fw_rx_stats_ext_size +
- bp->fw_tx_stats_ext_size;
+ len = min_t(int, bp->fw_rx_stats_ext_size,
+ ARRAY_SIZE(bnxt_port_stats_ext_arr));
+ num_stats += len;
+ len = min_t(int, bp->fw_tx_stats_ext_size,
+ ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
+ num_stats += len;
if (bp->pri2cos_valid)
num_stats += BNXT_NUM_STATS_PRI;
}
@@ -652,12 +658,17 @@ skip_ring_stats:
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
u64 *rx_port_stats_ext = bp->rx_port_stats_ext.sw_stats;
u64 *tx_port_stats_ext = bp->tx_port_stats_ext.sw_stats;
+ u32 len;
- for (i = 0; i < bp->fw_rx_stats_ext_size; i++, j++) {
+ len = min_t(u32, bp->fw_rx_stats_ext_size,
+ ARRAY_SIZE(bnxt_port_stats_ext_arr));
+ for (i = 0; i < len; i++, j++) {
buf[j] = *(rx_port_stats_ext +
bnxt_port_stats_ext_arr[i].offset);
}
- for (i = 0; i < bp->fw_tx_stats_ext_size; i++, j++) {
+ len = min_t(u32, bp->fw_tx_stats_ext_size,
+ ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
+ for (i = 0; i < len; i++, j++) {
buf[j] = *(tx_port_stats_ext +
bnxt_tx_port_stats_ext_arr[i].offset);
}
@@ -756,11 +767,17 @@ skip_tpa_stats:
}
}
if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
- for (i = 0; i < bp->fw_rx_stats_ext_size; i++) {
+ u32 len;
+
+ len = min_t(u32, bp->fw_rx_stats_ext_size,
+ ARRAY_SIZE(bnxt_port_stats_ext_arr));
+ for (i = 0; i < len; i++) {
strcpy(buf, bnxt_port_stats_ext_arr[i].string);
buf += ETH_GSTRING_LEN;
}
- for (i = 0; i < bp->fw_tx_stats_ext_size; i++) {
+ len = min_t(u32, bp->fw_tx_stats_ext_size,
+ ARRAY_SIZE(bnxt_tx_port_stats_ext_arr));
+ for (i = 0; i < len; i++) {
strcpy(buf,
bnxt_tx_port_stats_ext_arr[i].string);
buf += ETH_GSTRING_LEN;
@@ -1506,94 +1523,388 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
return speed_mask;
}
-#define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\
-{ \
- if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- 100baseT_Full); \
- if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- 1000baseT_Full); \
- if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- 10000baseT_Full); \
- if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- 25000baseCR_Full); \
- if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- 40000baseCR4_Full);\
- if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- 50000baseCR2_Full);\
- if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100GB) \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- 100000baseCR4_Full);\
- if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- Pause); \
- if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \
- ethtool_link_ksettings_add_link_mode( \
- lk_ksettings, name, Asym_Pause);\
- } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- Asym_Pause); \
- } \
+enum bnxt_media_type {
+ BNXT_MEDIA_UNKNOWN = 0,
+ BNXT_MEDIA_TP,
+ BNXT_MEDIA_CR,
+ BNXT_MEDIA_SR,
+ BNXT_MEDIA_LR_ER_FR,
+ BNXT_MEDIA_KR,
+ BNXT_MEDIA_KX,
+ BNXT_MEDIA_X,
+ __BNXT_MEDIA_END,
+};
+
+static const enum bnxt_media_type bnxt_phy_types[] = {
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4] = BNXT_MEDIA_KR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2] = BNXT_MEDIA_KR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX] = BNXT_MEDIA_KX,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR] = BNXT_MEDIA_KR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_BASET] = BNXT_MEDIA_TP,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE] = BNXT_MEDIA_TP,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET] = BNXT_MEDIA_TP,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX] = BNXT_MEDIA_X,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX] = BNXT_MEDIA_X,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2] = BNXT_MEDIA_CR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2] = BNXT_MEDIA_SR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2] = BNXT_MEDIA_LR_ER_FR,
+ [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2] = BNXT_MEDIA_LR_ER_FR,
+};
+
+static enum bnxt_media_type
+bnxt_get_media(struct bnxt_link_info *link_info)
+{
+ switch (link_info->media_type) {
+ case PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP:
+ return BNXT_MEDIA_TP;
+ case PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC:
+ return BNXT_MEDIA_CR;
+ default:
+ if (link_info->phy_type < ARRAY_SIZE(bnxt_phy_types))
+ return bnxt_phy_types[link_info->phy_type];
+ return BNXT_MEDIA_UNKNOWN;
+ }
+}
+
+enum bnxt_link_speed_indices {
+ BNXT_LINK_SPEED_UNKNOWN = 0,
+ BNXT_LINK_SPEED_100MB_IDX,
+ BNXT_LINK_SPEED_1GB_IDX,
+ BNXT_LINK_SPEED_10GB_IDX,
+ BNXT_LINK_SPEED_25GB_IDX,
+ BNXT_LINK_SPEED_40GB_IDX,
+ BNXT_LINK_SPEED_50GB_IDX,
+ BNXT_LINK_SPEED_100GB_IDX,
+ BNXT_LINK_SPEED_200GB_IDX,
+ __BNXT_LINK_SPEED_END
+};
+
+static enum bnxt_link_speed_indices bnxt_fw_speed_idx(u16 speed)
+{
+ switch (speed) {
+ case BNXT_LINK_SPEED_100MB: return BNXT_LINK_SPEED_100MB_IDX;
+ case BNXT_LINK_SPEED_1GB: return BNXT_LINK_SPEED_1GB_IDX;
+ case BNXT_LINK_SPEED_10GB: return BNXT_LINK_SPEED_10GB_IDX;
+ case BNXT_LINK_SPEED_25GB: return BNXT_LINK_SPEED_25GB_IDX;
+ case BNXT_LINK_SPEED_40GB: return BNXT_LINK_SPEED_40GB_IDX;
+ case BNXT_LINK_SPEED_50GB: return BNXT_LINK_SPEED_50GB_IDX;
+ case BNXT_LINK_SPEED_100GB: return BNXT_LINK_SPEED_100GB_IDX;
+ case BNXT_LINK_SPEED_200GB: return BNXT_LINK_SPEED_200GB_IDX;
+ default: return BNXT_LINK_SPEED_UNKNOWN;
+ }
+}
+
+static const enum ethtool_link_mode_bit_indices
+bnxt_link_modes[__BNXT_LINK_SPEED_END][BNXT_SIG_MODE_MAX][__BNXT_MEDIA_END] = {
+ [BNXT_LINK_SPEED_100MB_IDX] = {
+ {
+ [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ },
+ },
+ [BNXT_LINK_SPEED_1GB_IDX] = {
+ {
+ [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ /* historically baseT, but DAC is more correctly baseX */
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ [BNXT_MEDIA_X] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ },
+ },
+ [BNXT_LINK_SPEED_10GB_IDX] = {
+ {
+ [BNXT_MEDIA_TP] = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ [BNXT_MEDIA_KX] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ },
+ },
+ [BNXT_LINK_SPEED_25GB_IDX] = {
+ {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ },
+ },
+ [BNXT_LINK_SPEED_40GB_IDX] = {
+ {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ },
+ },
+ [BNXT_LINK_SPEED_50GB_IDX] = {
+ [BNXT_SIG_MODE_NRZ] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ },
+ [BNXT_SIG_MODE_PAM4] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+ },
+ },
+ [BNXT_LINK_SPEED_100GB_IDX] = {
+ [BNXT_SIG_MODE_NRZ] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ },
+ [BNXT_SIG_MODE_PAM4] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
+ },
+ },
+ [BNXT_LINK_SPEED_200GB_IDX] = {
+ [BNXT_SIG_MODE_PAM4] = {
+ [BNXT_MEDIA_CR] = ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
+ [BNXT_MEDIA_SR] = ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
+ [BNXT_MEDIA_LR_ER_FR] = ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
+ [BNXT_MEDIA_KR] = ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
+ },
+ },
+};
+
+#define BNXT_LINK_MODE_UNKNOWN -1
+
+static enum ethtool_link_mode_bit_indices
+bnxt_get_link_mode(struct bnxt_link_info *link_info)
+{
+ enum ethtool_link_mode_bit_indices link_mode;
+ enum bnxt_link_speed_indices speed;
+ enum bnxt_media_type media;
+ u8 sig_mode;
+
+ if (link_info->phy_link_status != BNXT_LINK_LINK)
+ return BNXT_LINK_MODE_UNKNOWN;
+
+ media = bnxt_get_media(link_info);
+ if (BNXT_AUTO_MODE(link_info->auto_mode)) {
+ speed = bnxt_fw_speed_idx(link_info->link_speed);
+ sig_mode = link_info->active_fec_sig_mode &
+ PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
+ } else {
+ speed = bnxt_fw_speed_idx(link_info->req_link_speed);
+ sig_mode = link_info->req_signal_mode;
+ }
+ if (sig_mode >= BNXT_SIG_MODE_MAX)
+ return BNXT_LINK_MODE_UNKNOWN;
+
+ /* Note ETHTOOL_LINK_MODE_10baseT_Half_BIT == 0 is a legal Linux
+ * link mode, but since no such devices exist, the zeroes in the
+ * map can be conveniently used to represent unknown link modes.
+ */
+ link_mode = bnxt_link_modes[speed][sig_mode][media];
+ if (!link_mode)
+ return BNXT_LINK_MODE_UNKNOWN;
+
+ switch (link_mode) {
+ case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
+ if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL)
+ link_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT;
+ break;
+ case ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
+ if (~link_info->duplex & BNXT_LINK_DUPLEX_FULL)
+ link_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT;
+ break;
+ default:
+ break;
+ }
+
+ return link_mode;
+}
+
+static void bnxt_get_ethtool_modes(struct bnxt_link_info *link_info,
+ struct ethtool_link_ksettings *lk_ksettings)
+{
+ struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
+
+ if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ lk_ksettings->link_modes.supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ lk_ksettings->link_modes.supported);
+ }
+
+ if (link_info->support_auto_speeds || link_info->support_pam4_auto_speeds)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ lk_ksettings->link_modes.supported);
+
+ if (~link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
+ return;
+
+ if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ lk_ksettings->link_modes.advertising);
+ if (hweight8(link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == 1)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ lk_ksettings->link_modes.advertising);
+ if (link_info->lp_pause & BNXT_LINK_PAUSE_RX)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ lk_ksettings->link_modes.lp_advertising);
+ if (hweight8(link_info->lp_pause & BNXT_LINK_PAUSE_BOTH) == 1)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ lk_ksettings->link_modes.lp_advertising);
}
-#define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \
-{ \
- if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 100baseT_Full) || \
- ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 100baseT_Half)) \
- (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \
- if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 1000baseT_Full) || \
- ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 1000baseT_Half)) \
- (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \
- if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 10000baseT_Full)) \
- (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \
- if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 25000baseCR_Full)) \
- (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \
- if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 40000baseCR4_Full)) \
- (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \
- if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 50000baseCR2_Full)) \
- (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \
- if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 100000baseCR4_Full)) \
- (fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB; \
+static const u16 bnxt_nrz_speed_masks[] = {
+ [BNXT_LINK_SPEED_100MB_IDX] = BNXT_LINK_SPEED_MSK_100MB,
+ [BNXT_LINK_SPEED_1GB_IDX] = BNXT_LINK_SPEED_MSK_1GB,
+ [BNXT_LINK_SPEED_10GB_IDX] = BNXT_LINK_SPEED_MSK_10GB,
+ [BNXT_LINK_SPEED_25GB_IDX] = BNXT_LINK_SPEED_MSK_25GB,
+ [BNXT_LINK_SPEED_40GB_IDX] = BNXT_LINK_SPEED_MSK_40GB,
+ [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_SPEED_MSK_50GB,
+ [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_SPEED_MSK_100GB,
+ [__BNXT_LINK_SPEED_END - 1] = 0 /* make any legal speed a valid index */
+};
+
+static const u16 bnxt_pam4_speed_masks[] = {
+ [BNXT_LINK_SPEED_50GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_50GB,
+ [BNXT_LINK_SPEED_100GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_100GB,
+ [BNXT_LINK_SPEED_200GB_IDX] = BNXT_LINK_PAM4_SPEED_MSK_200GB,
+};
+
+static enum bnxt_link_speed_indices
+bnxt_encoding_speed_idx(u8 sig_mode, u16 speed_msk)
+{
+ const u16 *speeds;
+ int idx, len;
+
+ switch (sig_mode) {
+ case BNXT_SIG_MODE_NRZ:
+ speeds = bnxt_nrz_speed_masks;
+ len = ARRAY_SIZE(bnxt_nrz_speed_masks);
+ break;
+ case BNXT_SIG_MODE_PAM4:
+ speeds = bnxt_pam4_speed_masks;
+ len = ARRAY_SIZE(bnxt_pam4_speed_masks);
+ break;
+ default:
+ return BNXT_LINK_SPEED_UNKNOWN;
+ }
+
+ for (idx = 0; idx < len; idx++) {
+ if (speeds[idx] == speed_msk)
+ return idx;
+ }
+
+ return BNXT_LINK_SPEED_UNKNOWN;
}
-#define BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, name) \
-{ \
- if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_50GB) \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- 50000baseCR_Full); \
- if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_100GB) \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- 100000baseCR2_Full);\
- if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_200GB) \
- ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
- 200000baseCR4_Full);\
+#define BNXT_FW_SPEED_MSK_BITS 16
+
+static void
+__bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
+ u8 sig_mode, unsigned long *et_mask)
+{
+ enum ethtool_link_mode_bit_indices link_mode;
+ enum bnxt_link_speed_indices speed;
+ u8 bit;
+
+ for_each_set_bit(bit, &fw_mask, BNXT_FW_SPEED_MSK_BITS) {
+ speed = bnxt_encoding_speed_idx(sig_mode, 1 << bit);
+ if (!speed)
+ continue;
+
+ link_mode = bnxt_link_modes[speed][sig_mode][media];
+ if (!link_mode)
+ continue;
+
+ linkmode_set_bit(link_mode, et_mask);
+ }
}
-#define BNXT_ETHTOOL_TO_FW_PAM4_SPDS(fw_speeds, lk_ksettings, name) \
-{ \
- if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 50000baseCR_Full)) \
- (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_50GB; \
- if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 100000baseCR2_Full)) \
- (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_100GB; \
- if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
- 200000baseCR4_Full)) \
- (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_200GB; \
+static void
+bnxt_get_ethtool_speeds(unsigned long fw_mask, enum bnxt_media_type media,
+ u8 sig_mode, unsigned long *et_mask)
+{
+ if (media) {
+ __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, et_mask);
+ return;
+ }
+
+ /* list speeds for all media if unknown */
+ for (media = 1; media < __BNXT_MEDIA_END; media++)
+ __bnxt_get_ethtool_speeds(fw_mask, media, sig_mode, et_mask);
+}
+
+static void bnxt_update_speed(u32 *delta, bool installed_media, u16 *speeds,
+ u16 speed_msk, const unsigned long *et_mask,
+ enum ethtool_link_mode_bit_indices mode)
+{
+ bool mode_desired = linkmode_test_bit(mode, et_mask);
+
+ if (!mode)
+ return;
+
+ /* enabled speeds for installed media should override */
+ if (installed_media && mode_desired) {
+ *speeds |= speed_msk;
+ *delta |= speed_msk;
+ return;
+ }
+
+ /* many to one mapping, only allow one change per fw_speed bit */
+ if (!(*delta & speed_msk) && (mode_desired == !(*speeds & speed_msk))) {
+ *speeds ^= speed_msk;
+ *delta |= speed_msk;
+ }
+}
+
+static void bnxt_set_ethtool_speeds(struct bnxt_link_info *link_info,
+ const unsigned long *et_mask)
+{
+ enum bnxt_media_type media = bnxt_get_media(link_info);
+ u32 delta_pam4 = 0;
+ u32 delta_nrz = 0;
+ int i, m;
+
+ for (i = 1; i < __BNXT_LINK_SPEED_END; i++) {
+ /* accept any legal media from user */
+ for (m = 1; m < __BNXT_MEDIA_END; m++) {
+ bnxt_update_speed(&delta_nrz, m == media,
+ &link_info->advertising,
+ bnxt_nrz_speed_masks[i], et_mask,
+ bnxt_link_modes[i][BNXT_SIG_MODE_NRZ][m]);
+ bnxt_update_speed(&delta_pam4, m == media,
+ &link_info->advertising_pam4,
+ bnxt_pam4_speed_masks[i], et_mask,
+ bnxt_link_modes[i][BNXT_SIG_MODE_PAM4][m]);
+ }
+ }
}
static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info,
@@ -1617,36 +1928,6 @@ static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info,
lk_ksettings->link_modes.advertising);
}
-static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
- struct ethtool_link_ksettings *lk_ksettings)
-{
- u16 fw_speeds = link_info->advertising;
- u8 fw_pause = 0;
-
- if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
- fw_pause = link_info->auto_pause_setting;
-
- BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising);
- fw_speeds = link_info->advertising_pam4;
- BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, advertising);
- bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings);
-}
-
-static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
- struct ethtool_link_ksettings *lk_ksettings)
-{
- u16 fw_speeds = link_info->lp_auto_link_speeds;
- u8 fw_pause = 0;
-
- if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
- fw_pause = link_info->lp_pause;
-
- BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings,
- lp_advertising);
- fw_speeds = link_info->lp_auto_pam4_link_speeds;
- BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, lp_advertising);
-}
-
static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info,
struct ethtool_link_ksettings *lk_ksettings)
{
@@ -1668,30 +1949,6 @@ static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info,
lk_ksettings->link_modes.supported);
}
-static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
- struct ethtool_link_ksettings *lk_ksettings)
-{
- struct bnxt *bp = container_of(link_info, struct bnxt, link_info);
- u16 fw_speeds = link_info->support_speeds;
-
- BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
- fw_speeds = link_info->support_pam4_speeds;
- BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, supported);
-
- if (!(bp->phy_flags & BNXT_PHY_FL_NO_PAUSE)) {
- ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
- Pause);
- ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
- Asym_Pause);
- }
-
- if (link_info->support_auto_speeds ||
- link_info->support_pam4_auto_speeds)
- ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
- Autoneg);
- bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings);
-}
-
u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
{
switch (fw_link_speed) {
@@ -1720,60 +1977,95 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
}
}
+static void bnxt_get_default_speeds(struct ethtool_link_ksettings *lk_ksettings,
+ struct bnxt_link_info *link_info)
+{
+ struct ethtool_link_settings *base = &lk_ksettings->base;
+
+ if (link_info->link_state == BNXT_LINK_STATE_UP) {
+ base->speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
+ base->duplex = DUPLEX_HALF;
+ if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
+ base->duplex = DUPLEX_FULL;
+ } else if (!link_info->autoneg) {
+ base->speed = bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
+ base->duplex = DUPLEX_HALF;
+ if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
+ base->duplex = DUPLEX_FULL;
+ }
+}
+
static int bnxt_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *lk_ksettings)
{
- struct bnxt *bp = netdev_priv(dev);
- struct bnxt_link_info *link_info = &bp->link_info;
struct ethtool_link_settings *base = &lk_ksettings->base;
- u32 ethtool_speed;
+ enum ethtool_link_mode_bit_indices link_mode;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info;
+ enum bnxt_media_type media;
+ ethtool_link_ksettings_zero_link_mode(lk_ksettings, lp_advertising);
+ ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
+ base->duplex = DUPLEX_UNKNOWN;
+ base->speed = SPEED_UNKNOWN;
+ link_info = &bp->link_info;
+
mutex_lock(&bp->link_lock);
- bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
+ bnxt_get_ethtool_modes(link_info, lk_ksettings);
+ media = bnxt_get_media(link_info);
+ bnxt_get_ethtool_speeds(link_info->support_speeds,
+ media, BNXT_SIG_MODE_NRZ,
+ lk_ksettings->link_modes.supported);
+ bnxt_get_ethtool_speeds(link_info->support_pam4_speeds,
+ media, BNXT_SIG_MODE_PAM4,
+ lk_ksettings->link_modes.supported);
+ bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings);
+ link_mode = bnxt_get_link_mode(link_info);
+ if (link_mode != BNXT_LINK_MODE_UNKNOWN)
+ ethtool_params_from_link_mode(lk_ksettings, link_mode);
+ else
+ bnxt_get_default_speeds(lk_ksettings, link_info);
- ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
if (link_info->autoneg) {
- bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings);
- ethtool_link_ksettings_add_link_mode(lk_ksettings,
- advertising, Autoneg);
+ bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ lk_ksettings->link_modes.advertising);
base->autoneg = AUTONEG_ENABLE;
- base->duplex = DUPLEX_UNKNOWN;
+ bnxt_get_ethtool_speeds(link_info->advertising,
+ media, BNXT_SIG_MODE_NRZ,
+ lk_ksettings->link_modes.advertising);
+ bnxt_get_ethtool_speeds(link_info->advertising_pam4,
+ media, BNXT_SIG_MODE_PAM4,
+ lk_ksettings->link_modes.advertising);
if (link_info->phy_link_status == BNXT_LINK_LINK) {
- bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings);
- if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
- base->duplex = DUPLEX_FULL;
- else
- base->duplex = DUPLEX_HALF;
+ bnxt_get_ethtool_speeds(link_info->lp_auto_link_speeds,
+ media, BNXT_SIG_MODE_NRZ,
+ lk_ksettings->link_modes.lp_advertising);
+ bnxt_get_ethtool_speeds(link_info->lp_auto_pam4_link_speeds,
+ media, BNXT_SIG_MODE_PAM4,
+ lk_ksettings->link_modes.lp_advertising);
}
- ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
} else {
base->autoneg = AUTONEG_DISABLE;
- ethtool_speed =
- bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
- base->duplex = DUPLEX_HALF;
- if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
- base->duplex = DUPLEX_FULL;
}
- base->speed = ethtool_speed;
base->port = PORT_NONE;
if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
base->port = PORT_TP;
- ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
- TP);
- ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
- TP);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
+ lk_ksettings->link_modes.supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
+ lk_ksettings->link_modes.advertising);
} else {
- ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
- FIBRE);
- ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
- FIBRE);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ lk_ksettings->link_modes.supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ lk_ksettings->link_modes.advertising);
if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
base->port = PORT_DA;
- else if (link_info->media_type ==
- PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
+ else
base->port = PORT_FIBRE;
}
base->phy_address = link_info->phy_addr;
@@ -1782,13 +2074,15 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
return 0;
}
-static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed)
+static int
+bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed, u32 lanes)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
u16 support_pam4_spds = link_info->support_pam4_speeds;
u16 support_spds = link_info->support_speeds;
u8 sig_mode = BNXT_SIG_MODE_NRZ;
+ u32 lanes_needed = 1;
u16 fw_speed = 0;
switch (ethtool_speed) {
@@ -1809,37 +2103,46 @@ static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed)
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
break;
case SPEED_20000:
- if (support_spds & BNXT_LINK_SPEED_MSK_20GB)
+ if (support_spds & BNXT_LINK_SPEED_MSK_20GB) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB;
+ lanes_needed = 2;
+ }
break;
case SPEED_25000:
if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
break;
case SPEED_40000:
- if (support_spds & BNXT_LINK_SPEED_MSK_40GB)
+ if (support_spds & BNXT_LINK_SPEED_MSK_40GB) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
+ lanes_needed = 4;
+ }
break;
case SPEED_50000:
- if (support_spds & BNXT_LINK_SPEED_MSK_50GB) {
+ if ((support_spds & BNXT_LINK_SPEED_MSK_50GB) && lanes != 1) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
+ lanes_needed = 2;
} else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB;
sig_mode = BNXT_SIG_MODE_PAM4;
}
break;
case SPEED_100000:
- if (support_spds & BNXT_LINK_SPEED_MSK_100GB) {
+ if ((support_spds & BNXT_LINK_SPEED_MSK_100GB) &&
+ lanes != 2 && lanes != 1) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB;
+ lanes_needed = 4;
} else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB;
sig_mode = BNXT_SIG_MODE_PAM4;
+ lanes_needed = 2;
}
break;
case SPEED_200000:
if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) {
fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB;
sig_mode = BNXT_SIG_MODE_PAM4;
+ lanes_needed = 4;
}
break;
}
@@ -1849,6 +2152,11 @@ static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed)
return -EINVAL;
}
+ if (lanes && lanes != lanes_needed) {
+ netdev_err(dev, "unsupported number of lanes for speed\n");
+ return -EINVAL;
+ }
+
if (link_info->req_link_speed == fw_speed &&
link_info->req_signal_mode == sig_mode &&
link_info->autoneg == 0)
@@ -1893,7 +2201,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
struct bnxt_link_info *link_info = &bp->link_info;
const struct ethtool_link_settings *base = &lk_ksettings->base;
bool set_pause = false;
- u32 speed;
+ u32 speed, lanes = 0;
int rc = 0;
if (!BNXT_PHY_CFG_ABLE(bp))
@@ -1901,12 +2209,8 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
mutex_lock(&bp->link_lock);
if (base->autoneg == AUTONEG_ENABLE) {
- link_info->advertising = 0;
- link_info->advertising_pam4 = 0;
- BNXT_ETHTOOL_TO_FW_SPDS(link_info->advertising, lk_ksettings,
- advertising);
- BNXT_ETHTOOL_TO_FW_PAM4_SPDS(link_info->advertising_pam4,
- lk_ksettings, advertising);
+ bnxt_set_ethtool_speeds(link_info,
+ lk_ksettings->link_modes.advertising);
link_info->autoneg |= BNXT_AUTONEG_SPEED;
if (!link_info->advertising && !link_info->advertising_pam4) {
link_info->advertising = link_info->support_auto_speeds;
@@ -1934,7 +2238,8 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
goto set_setting_exit;
}
speed = base->speed;
- rc = bnxt_force_link_speed(dev, speed);
+ lanes = lk_ksettings->lanes;
+ rc = bnxt_force_link_speed(dev, speed, lanes);
if (rc) {
if (rc == -EALREADY)
rc = 0;
@@ -4140,6 +4445,7 @@ void bnxt_ethtool_free(struct bnxt *bp)
}
const struct ethtool_ops bnxt_ethtool_ops = {
+ .cap_link_lanes_supported = 1,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USECS_IRQ |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 3ae8e8af8ab3..d5fad5a3cdd1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -2,7 +2,7 @@
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2022 Broadcom Inc.
+ * Copyright (c) 2018-2023 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -191,6 +191,11 @@ struct cmd_nums {
#define HWRM_QUEUE_VLANPRI2PRI_CFG 0x85UL
#define HWRM_QUEUE_GLOBAL_CFG 0x86UL
#define HWRM_QUEUE_GLOBAL_QCFG 0x87UL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG 0x88UL
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG 0x89UL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG 0x8aUL
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG 0x8bUL
+ #define HWRM_QUEUE_QCAPS 0x8cUL
#define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
#define HWRM_CFA_L2_FILTER_FREE 0x91UL
#define HWRM_CFA_L2_FILTER_CFG 0x92UL
@@ -315,6 +320,7 @@ struct cmd_nums {
#define HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR 0x127UL
#define HWRM_CFA_TLS_FILTER_ALLOC 0x128UL
#define HWRM_CFA_TLS_FILTER_FREE 0x129UL
+ #define HWRM_CFA_RELEASE_AFM_FUNC 0x12aUL
#define HWRM_ENGINE_CKV_STATUS 0x12eUL
#define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
#define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
@@ -383,6 +389,9 @@ struct cmd_nums {
#define HWRM_FUNC_DBR_RECOVERY_COMPLETED 0x1aaUL
#define HWRM_FUNC_SYNCE_CFG 0x1abUL
#define HWRM_FUNC_SYNCE_QCFG 0x1acUL
+ #define HWRM_FUNC_KEY_CTX_FREE 0x1adUL
+ #define HWRM_FUNC_LAG_MODE_CFG 0x1aeUL
+ #define HWRM_FUNC_LAG_MODE_QCFG 0x1afUL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -408,10 +417,10 @@ struct cmd_nums {
#define HWRM_MFG_SELFTEST_QLIST 0x216UL
#define HWRM_MFG_SELFTEST_EXEC 0x217UL
#define HWRM_STAT_GENERIC_QSTATS 0x218UL
+ #define HWRM_MFG_PRVSN_EXPORT_CERT 0x219UL
#define HWRM_TF 0x2bcUL
#define HWRM_TF_VERSION_GET 0x2bdUL
#define HWRM_TF_SESSION_OPEN 0x2c6UL
- #define HWRM_TF_SESSION_ATTACH 0x2c7UL
#define HWRM_TF_SESSION_REGISTER 0x2c8UL
#define HWRM_TF_SESSION_UNREGISTER 0x2c9UL
#define HWRM_TF_SESSION_CLOSE 0x2caUL
@@ -426,14 +435,6 @@ struct cmd_nums {
#define HWRM_TF_TBL_TYPE_GET 0x2daUL
#define HWRM_TF_TBL_TYPE_SET 0x2dbUL
#define HWRM_TF_TBL_TYPE_BULK_GET 0x2dcUL
- #define HWRM_TF_CTXT_MEM_ALLOC 0x2e2UL
- #define HWRM_TF_CTXT_MEM_FREE 0x2e3UL
- #define HWRM_TF_CTXT_MEM_RGTR 0x2e4UL
- #define HWRM_TF_CTXT_MEM_UNRGTR 0x2e5UL
- #define HWRM_TF_EXT_EM_QCAPS 0x2e6UL
- #define HWRM_TF_EXT_EM_OP 0x2e7UL
- #define HWRM_TF_EXT_EM_CFG 0x2e8UL
- #define HWRM_TF_EXT_EM_QCFG 0x2e9UL
#define HWRM_TF_EM_INSERT 0x2eaUL
#define HWRM_TF_EM_DELETE 0x2ebUL
#define HWRM_TF_EM_HASH_INSERT 0x2ecUL
@@ -465,6 +466,14 @@ struct cmd_nums {
#define HWRM_TFC_IDX_TBL_GET 0x390UL
#define HWRM_TFC_IDX_TBL_FREE 0x391UL
#define HWRM_TFC_GLOBAL_ID_ALLOC 0x392UL
+ #define HWRM_TFC_TCAM_SET 0x393UL
+ #define HWRM_TFC_TCAM_GET 0x394UL
+ #define HWRM_TFC_TCAM_ALLOC 0x395UL
+ #define HWRM_TFC_TCAM_ALLOC_SET 0x396UL
+ #define HWRM_TFC_TCAM_FREE 0x397UL
+ #define HWRM_TFC_IF_TBL_SET 0x398UL
+ #define HWRM_TFC_IF_TBL_GET 0x399UL
+ #define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
#define HWRM_SV 0x400UL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
@@ -494,6 +503,8 @@ struct cmd_nums {
#define HWRM_DBG_USEQ_RUN 0xff29UL
#define HWRM_DBG_USEQ_DELIVERY_REQ 0xff2aUL
#define HWRM_DBG_USEQ_RESP_HDR 0xff2bUL
+ #define HWRM_NVM_GET_VPD_FIELD_INFO 0xffeaUL
+ #define HWRM_NVM_SET_VPD_FIELD_INFO 0xffebUL
#define HWRM_NVM_DEFRAG 0xffecUL
#define HWRM_NVM_REQ_ARBITRATION 0xffedUL
#define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
@@ -540,6 +551,7 @@ struct ret_codes {
#define HWRM_ERR_CODE_BUSY 0x10UL
#define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL
#define HWRM_ERR_CODE_PF_UNAVAILABLE 0x12UL
+ #define HWRM_ERR_CODE_ENTITY_NOT_PRESENT 0x13UL
#define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
#define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
@@ -571,8 +583,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 118
-#define HWRM_VERSION_STR "1.10.2.118"
+#define HWRM_VERSION_RSVD 171
+#define HWRM_VERSION_STR "1.10.2.171"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -761,51 +773,53 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
#define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
__le16 event_id;
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG 0xaUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x49UL
- #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG 0xaUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TCP_FLAG_ACTION_CHANGE 0x3aUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_FLOW_ACTIVE 0x3bUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CFG_CHANGE 0x3cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_DEFAULT_VNIC_CHANGE 0x3dUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_TFLIB_LINK_STATUS_CHANGE 0x3eUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE 0x43UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP 0x44UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD 0x46UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE 0x47UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE 0x48UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4bUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
__le32 event_data2;
u8 opaque_v;
#define ASYNC_EVENT_CMPL_V 0x1UL
@@ -1011,6 +1025,7 @@ struct hwrm_async_event_cmpl_vf_cfg_change {
#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
#define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TF_OWNERSHIP_RELEASE 0x20UL
};
/* hwrm_async_event_cmpl_default_vnic_change (size:128b/16B) */
@@ -1402,6 +1417,45 @@ struct hwrm_async_event_cmpl_error_report_doorbell_drop_threshold {
#define ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_SFT 8
};
+/* hwrm_async_event_cmpl_error_report_thermal (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_thermal {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK 0xff00UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT 8
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT 0x5UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_ERROR_TYPE_THERMAL_EVENT
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK 0x700UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SFT 8
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN (0x0UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL (0x1UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL (0x2UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN (0x3UL << 8)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR 0x800UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_DECREASING (0x0UL << 11)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING (0x1UL << 11)
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING
+};
+
/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
__le16 req_type;
@@ -1502,7 +1556,7 @@ struct hwrm_func_vf_free_output {
u8 valid;
};
-/* hwrm_func_vf_cfg_input (size:448b/56B) */
+/* hwrm_func_vf_cfg_input (size:576b/72B) */
struct hwrm_func_vf_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1510,20 +1564,22 @@ struct hwrm_func_vf_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 enables;
- #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
- #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
- #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
- #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_KEY_CTXS 0x1000UL
- #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_KEY_CTXS 0x2000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
+ #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
+ #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
+ #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_TX_KEY_CTXS 0x1000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_KTLS_RX_KEY_CTXS 0x2000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_TX_KEY_CTXS 0x4000UL
+ #define FUNC_VF_CFG_REQ_ENABLES_NUM_QUIC_RX_KEY_CTXS 0x8000UL
__le16 mtu;
__le16 guest_vlan;
__le16 async_event_cr;
@@ -1547,8 +1603,12 @@ struct hwrm_func_vf_cfg_input {
__le16 num_vnics;
__le16 num_stat_ctxs;
__le16 num_hw_ring_grps;
- __le16 num_tx_key_ctxs;
- __le16 num_rx_key_ctxs;
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
+ __le16 num_msix;
+ u8 unused[2];
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
};
/* hwrm_func_vf_cfg_output (size:128b/16B) */
@@ -1572,7 +1632,7 @@ struct hwrm_func_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_func_qcaps_output (size:768b/96B) */
+/* hwrm_func_qcaps_output (size:896b/112B) */
struct hwrm_func_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -1686,6 +1746,11 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT2_SYNCE_SUPPORTED 0x80UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED 0x100UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_HW_LAG_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ON_CHIP_CTX_SUPPORTED 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_STEERING_TAG_SUPPORTED 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED 0x4000UL
__le16 tunnel_disable_flag;
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
@@ -1695,7 +1760,15 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_IPINIP 0x20UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL
+ u8 key_xid_partition_cap;
+ #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_TKC 0x1UL
+ #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_RKC 0x2UL
+ #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_QUIC_TKC 0x4UL
+ #define FUNC_QCAPS_RESP_KEY_XID_PARTITION_CAP_QUIC_RKC 0x8UL
u8 unused_1;
+ u8 device_serial_number[8];
+ __le16 ctxs_per_partition;
+ u8 unused_2[5];
u8 valid;
};
@@ -1710,7 +1783,7 @@ struct hwrm_func_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_func_qcfg_output (size:896b/112B) */
+/* hwrm_func_qcfg_output (size:1024b/128B) */
struct hwrm_func_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -1870,19 +1943,24 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
#define FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
__le16 host_mtu;
- __le16 alloc_tx_key_ctxs;
- __le16 alloc_rx_key_ctxs;
+ u8 unused_3[2];
+ u8 unused_4[2];
u8 port_kdnet_mode;
#define FUNC_QCFG_RESP_PORT_KDNET_MODE_DISABLED 0x0UL
#define FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED 0x1UL
#define FUNC_QCFG_RESP_PORT_KDNET_MODE_LAST FUNC_QCFG_RESP_PORT_KDNET_MODE_ENABLED
u8 kdnet_pcie_function;
__le16 port_kdnet_fid;
- u8 unused_3;
+ u8 unused_5[2];
+ __le32 alloc_tx_key_ctxs;
+ __le32 alloc_rx_key_ctxs;
+ u8 lag_id;
+ u8 parif;
+ u8 unused_6[5];
u8 valid;
};
-/* hwrm_func_cfg_input (size:960b/120B) */
+/* hwrm_func_cfg_input (size:1088b/136B) */
struct hwrm_func_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -2061,8 +2139,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_PARTITION_MAX_BW_BW_VALUE_UNIT_PERCENT1_100
__be16 tpid;
__le16 host_mtu;
- __le16 num_tx_key_ctxs;
- __le16 num_rx_key_ctxs;
+ u8 unused_0[4];
__le32 enables2;
#define FUNC_CFG_REQ_ENABLES2_KDNET 0x1UL
#define FUNC_CFG_REQ_ENABLES2_DB_PAGE_SIZE 0x2UL
@@ -2083,7 +2160,12 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_DB_PAGE_SIZE_2MB 0x9UL
#define FUNC_CFG_REQ_DB_PAGE_SIZE_4MB 0xaUL
#define FUNC_CFG_REQ_DB_PAGE_SIZE_LAST FUNC_CFG_REQ_DB_PAGE_SIZE_4MB
- u8 unused_0[6];
+ u8 unused_1[2];
+ __le32 num_ktls_tx_key_ctxs;
+ __le32 num_ktls_rx_key_ctxs;
+ __le32 num_quic_tx_key_ctxs;
+ __le32 num_quic_rx_key_ctxs;
+ __le32 unused_2;
};
/* hwrm_func_cfg_output (size:128b/16B) */
@@ -2390,7 +2472,11 @@ struct hwrm_func_drv_qver_input {
__le64 resp_addr;
__le32 reserved;
__le16 fid;
- u8 unused_0[2];
+ u8 driver_type;
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_L2 0x0UL
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE 0x1UL
+ #define FUNC_DRV_QVER_REQ_DRIVER_TYPE_LAST FUNC_DRV_QVER_REQ_DRIVER_TYPE_ROCE
+ u8 unused_0;
};
/* hwrm_func_drv_qver_output (size:256b/32B) */
@@ -2435,7 +2521,7 @@ struct hwrm_func_resource_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_func_resource_qcaps_output (size:512b/64B) */
+/* hwrm_func_resource_qcaps_output (size:704b/88B) */
struct hwrm_func_resource_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -2467,15 +2553,20 @@ struct hwrm_func_resource_qcaps_output {
__le16 max_tx_scheduler_inputs;
__le16 flags;
#define FUNC_RESOURCE_QCAPS_RESP_FLAGS_MIN_GUARANTEED 0x1UL
- __le16 min_tx_key_ctxs;
- __le16 max_tx_key_ctxs;
- __le16 min_rx_key_ctxs;
- __le16 max_rx_key_ctxs;
- u8 unused_0[5];
+ __le16 min_msix;
+ __le32 min_ktls_tx_key_ctxs;
+ __le32 max_ktls_tx_key_ctxs;
+ __le32 min_ktls_rx_key_ctxs;
+ __le32 max_ktls_rx_key_ctxs;
+ __le32 min_quic_tx_key_ctxs;
+ __le32 max_quic_tx_key_ctxs;
+ __le32 min_quic_rx_key_ctxs;
+ __le32 max_quic_rx_key_ctxs;
+ u8 unused_0[3];
u8 valid;
};
-/* hwrm_func_vf_resource_cfg_input (size:512b/64B) */
+/* hwrm_func_vf_resource_cfg_input (size:704b/88B) */
struct hwrm_func_vf_resource_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -2502,14 +2593,18 @@ struct hwrm_func_vf_resource_cfg_input {
__le16 max_hw_ring_grps;
__le16 flags;
#define FUNC_VF_RESOURCE_CFG_REQ_FLAGS_MIN_GUARANTEED 0x1UL
- __le16 min_tx_key_ctxs;
- __le16 max_tx_key_ctxs;
- __le16 min_rx_key_ctxs;
- __le16 max_rx_key_ctxs;
- u8 unused_0[2];
-};
-
-/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */
+ __le16 min_msix;
+ __le32 min_ktls_tx_key_ctxs;
+ __le32 max_ktls_tx_key_ctxs;
+ __le32 min_ktls_rx_key_ctxs;
+ __le32 max_ktls_rx_key_ctxs;
+ __le32 min_quic_tx_key_ctxs;
+ __le32 max_quic_tx_key_ctxs;
+ __le32 min_quic_rx_key_ctxs;
+ __le32 max_quic_rx_key_ctxs;
+};
+
+/* hwrm_func_vf_resource_cfg_output (size:320b/40B) */
struct hwrm_func_vf_resource_cfg_output {
__le16 error_code;
__le16 req_type;
@@ -2523,9 +2618,9 @@ struct hwrm_func_vf_resource_cfg_output {
__le16 reserved_vnics;
__le16 reserved_stat_ctx;
__le16 reserved_hw_ring_grps;
- __le16 reserved_tx_key_ctxs;
- __le16 reserved_rx_key_ctxs;
- u8 unused_0[3];
+ __le32 reserved_tx_key_ctxs;
+ __le32 reserved_rx_key_ctxs;
+ u8 unused_0[7];
u8 valid;
};
@@ -2592,7 +2687,8 @@ struct hwrm_func_backing_store_qcaps_output {
__le16 rkc_entry_size;
__le32 tkc_max_entries;
__le32 rkc_max_entries;
- u8 rsvd1[7];
+ __le16 fast_qpmd_qp_num_entries;
+ u8 rsvd1[5];
u8 valid;
};
@@ -2630,27 +2726,28 @@ struct hwrm_func_backing_store_cfg_input {
#define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
#define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL
__le32 enables;
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TKC 0x80000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_RKC 0x100000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TKC 0x80000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_RKC 0x100000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP_FAST_QPMD 0x200000UL
u8 qpc_pg_size_qpc_lvl;
#define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL
#define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0
@@ -3047,7 +3144,7 @@ struct hwrm_func_backing_store_cfg_input {
#define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_8M (0x4UL << 4)
#define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G (0x5UL << 4)
#define FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RKC_PG_SIZE_PG_1G
- u8 rsvd[2];
+ __le16 qp_num_fast_qpmd_entries;
};
/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
@@ -3477,6 +3574,8 @@ struct hwrm_func_backing_store_cfg_v2_input {
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
__le16 instance;
@@ -3546,6 +3645,8 @@ struct hwrm_func_backing_store_qcfg_v2_input {
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
__le16 instance;
@@ -3559,22 +3660,24 @@ struct hwrm_func_backing_store_qcfg_v2_output {
__le16 seq_id;
__le16 resp_len;
__le16 type;
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RKC 0x14UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_RKC 0x1bUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TKC 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_TKC 0x1aUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
__le16 instance;
__le32 flags;
__le64 page_dir;
@@ -3609,7 +3712,8 @@ struct hwrm_func_backing_store_qcfg_v2_output {
struct qpc_split_entries {
__le32 qp_num_l2_entries;
__le32 qp_num_qp1_entries;
- __le32 rsvd[2];
+ __le32 qp_num_fast_qpmd_entries;
+ __le32 rsvd;
};
/* srq_split_entries (size:128b/16B) */
@@ -3666,6 +3770,8 @@ struct hwrm_func_backing_store_qcaps_v2_input {
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC 0x1aUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
u8 rsvd[6];
@@ -3696,13 +3802,16 @@ struct hwrm_func_backing_store_qcaps_v2_output {
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_TKC 0x1aUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_RKC 0x1bUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
__le16 entry_size;
__le32 flags;
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_TYPE_VALID 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_DRIVER_MANAGED_MEMORY 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ROCE_QP_PSEUDO_STATIC_ALLOC 0x8UL
__le32 instance_bit_map;
u8 ctx_init_value;
u8 ctx_init_offset;
@@ -3712,7 +3821,13 @@ struct hwrm_func_backing_store_qcaps_v2_output {
__le32 min_num_entries;
__le16 next_valid_type;
u8 subtype_valid_cnt;
- u8 rsvd2;
+ u8 exact_cnt_bit_map;
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_0_EXACT 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_1_EXACT 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_2_EXACT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_3_EXACT 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_EXACT_CNT_BIT_MAP_UNUSED_SFT 4
__le32 split_entry_0;
__le32 split_entry_1;
__le32 split_entry_2;
@@ -4599,7 +4714,7 @@ struct tx_port_stats_ext {
__le64 pfc_pri7_tx_transitions;
};
-/* rx_port_stats_ext (size:3776b/472B) */
+/* rx_port_stats_ext (size:3904b/488B) */
struct rx_port_stats_ext {
__le64 link_down_events;
__le64 continuous_pause_events;
@@ -4660,6 +4775,8 @@ struct rx_port_stats_ext {
__le64 rx_discard_packets_cos7;
__le64 rx_fec_corrected_blocks;
__le64 rx_fec_uncorrectable_blocks;
+ __le64 rx_filter_miss;
+ __le64 rx_fec_symbol_err;
};
/* hwrm_port_qstats_ext_input (size:320b/40B) */
@@ -6092,6 +6209,7 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
#define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
#define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
+ #define VNIC_CFG_REQ_FLAGS_PORTCOS_MAPPING_MODE 0x80UL
__le32 enables;
#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
#define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
@@ -6181,12 +6299,16 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_AH_SPI_IPV6_CAP 0x800000UL
#define VNIC_QCAPS_RESP_FLAGS_RSS_IPSEC_ESP_SPI_IPV6_CAP 0x1000000UL
#define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_TRUSTED_VF_CAP 0x2000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_PORTCOS_MAPPING_MODE 0x4000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_PROF_TCAM_MODE_ENABLED 0x8000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_VNIC_RSS_HASH_MODE_CAP 0x10000000UL
+ #define VNIC_QCAPS_RESP_FLAGS_HW_TUNNEL_TPA_CAP 0x20000000UL
__le16 max_aggs_supported;
u8 unused_1[5];
u8 valid;
};
-/* hwrm_vnic_tpa_cfg_input (size:320b/40B) */
+/* hwrm_vnic_tpa_cfg_input (size:384b/48B) */
struct hwrm_vnic_tpa_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -6208,6 +6330,7 @@ struct hwrm_vnic_tpa_cfg_input {
#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
#define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
#define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
+ #define VNIC_TPA_CFG_REQ_ENABLES_TNL_TPA_EN 0x10UL
__le16 vnic_id;
__le16 max_agg_segs;
#define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
@@ -6227,6 +6350,25 @@ struct hwrm_vnic_tpa_cfg_input {
u8 unused_0[2];
__le32 max_agg_timer;
__le32 min_agg_len;
+ __le32 tnl_tpa_en_bitmap;
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN 0x1UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GENEVE 0x2UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_NVGRE 0x4UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE 0x8UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV4 0x10UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_IPV6 0x20UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR1 0x200UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR2 0x400UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR3 0x800UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL
+ #define VNIC_TPA_CFG_REQ_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL
+ u8 unused_1[4];
};
/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
@@ -6282,7 +6424,25 @@ struct hwrm_vnic_tpa_qcfg_output {
#define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX
__le32 max_agg_timer;
__le32 min_agg_len;
- u8 unused_0[7];
+ __le32 tnl_tpa_en_bitmap;
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN 0x1UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GENEVE 0x2UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_NVGRE 0x4UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE 0x8UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV4 0x10UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_IPV6 0x20UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_GPE 0x40UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_VXLAN_CUST1 0x80UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_GRE_CUST1 0x100UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR1 0x200UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR2 0x400UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR3 0x800UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR4 0x1000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR5 0x2000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR6 0x4000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR7 0x8000UL
+ #define VNIC_TPA_QCFG_RESP_TNL_TPA_EN_BITMAP_UPAR8 0x10000UL
+ u8 unused_0[3];
u8 valid;
};
@@ -6317,8 +6477,9 @@ struct hwrm_vnic_rss_cfg_input {
__le64 hash_key_tbl_addr;
__le16 rss_ctx_idx;
u8 flags;
- #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL
- #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL
+ #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL
+ #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL
+ #define VNIC_RSS_CFG_REQ_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT 0x4UL
u8 ring_select_mode;
#define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_TOEPLITZ 0x0UL
#define VNIC_RSS_CFG_REQ_RING_SELECT_MODE_XOR 0x1UL
@@ -6480,14 +6641,15 @@ struct hwrm_ring_alloc_input {
__le16 target_id;
__le64 resp_addr;
__le32 enables;
- #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
- #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
- #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
- #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
- #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
- #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
- #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
- #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
+ #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
+ #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
+ #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+ #define RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID 0x40UL
+ #define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
+ #define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
+ #define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
+ #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
+ #define RING_ALLOC_REQ_ENABLES_STEERING_TAG_VALID 0x800UL
u8 ring_type;
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
#define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
@@ -6541,7 +6703,7 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
#define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
- __le16 unused_3;
+ __le16 steering_tag;
__le32 reserved3;
__le32 stat_ctx_id;
__le32 reserved4;
@@ -6917,6 +7079,7 @@ struct hwrm_cfa_l2_filter_alloc_input {
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 unused_4;
@@ -7099,6 +7262,7 @@ struct hwrm_cfa_tunnel_filter_alloc_input {
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 tunnel_flags;
@@ -7233,7 +7397,8 @@ struct hwrm_cfa_encap_record_alloc_input {
#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE_V1 0xaUL
#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2_ETYPE 0xbUL
#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6 0xcUL
- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE_V6
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE 0x10UL
+ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN_GPE
u8 unused_0[3];
__le32 encap_data[20];
};
@@ -7338,6 +7503,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 pri_hint;
@@ -7485,6 +7651,7 @@ struct hwrm_cfa_decap_filter_alloc_input {
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
u8 unused_0;
@@ -7628,6 +7795,7 @@ struct hwrm_cfa_flow_alloc_input {
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
#define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
};
@@ -8053,8 +8221,11 @@ struct hwrm_tunnel_dst_port_query_input {
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
#define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI
- u8 unused_0[7];
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE
+ u8 tunnel_next_proto;
+ u8 unused_0[6];
};
/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
@@ -8094,10 +8265,12 @@ struct hwrm_tunnel_dst_port_alloc_input {
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
#define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI
- u8 unused_0;
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE
+ u8 tunnel_next_proto;
__be16 tunnel_dst_port_val;
- u8 unused_1[4];
+ u8 unused_0[4];
};
/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
@@ -8141,10 +8314,12 @@ struct hwrm_tunnel_dst_port_free_input {
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
#define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI
- u8 unused_0;
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE
+ u8 tunnel_next_proto;
__le16 tunnel_dst_port_id;
- u8 unused_1[4];
+ u8 unused_0[4];
};
/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
@@ -8212,7 +8387,7 @@ struct ctx_hw_stats_ext {
__le64 rx_tpa_events;
};
-/* hwrm_stat_ctx_alloc_input (size:256b/32B) */
+/* hwrm_stat_ctx_alloc_input (size:320b/40B) */
struct hwrm_stat_ctx_alloc_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -8225,6 +8400,10 @@ struct hwrm_stat_ctx_alloc_input {
#define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
u8 unused_0;
__le16 stats_dma_length;
+ __le16 flags;
+ #define STAT_CTX_ALLOC_REQ_FLAGS_STEERING_TAG_VALID 0x1UL
+ __le16 steering_tag;
+ __le32 unused_1;
};
/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
@@ -8432,7 +8611,7 @@ struct hwrm_stat_generic_qstats_output {
u8 valid;
};
-/* generic_sw_hw_stats (size:1216b/152B) */
+/* generic_sw_hw_stats (size:1408b/176B) */
struct generic_sw_hw_stats {
__le64 pcie_statistics_tx_tlp;
__le64 pcie_statistics_rx_tlp;
@@ -8453,6 +8632,9 @@ struct generic_sw_hw_stats {
__le64 cache_miss_count_cfcs;
__le64 cache_miss_count_cfcc;
__le64 cache_miss_count_cfcm;
+ __le64 hw_db_recov_dbs_dropped;
+ __le64 hw_db_recov_drops_serviced;
+ __le64 hw_db_recov_dbs_recovered;
};
/* hwrm_fw_reset_input (size:192b/24B) */
@@ -8876,7 +9058,7 @@ struct hwrm_temp_monitor_query_input {
__le64 resp_addr;
};
-/* hwrm_temp_monitor_query_output (size:128b/16B) */
+/* hwrm_temp_monitor_query_output (size:192b/24B) */
struct hwrm_temp_monitor_query_output {
__le16 error_code;
__le16 req_type;
@@ -8886,14 +9068,20 @@ struct hwrm_temp_monitor_query_output {
u8 phy_temp;
u8 om_temp;
u8 flags;
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL
- #define TEMP_MONITOR_QUERY_RESP_FLAGS_EXT_TEMP_FIELDS_AVAILABLE 0x10UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_TEMP_NOT_AVAILABLE 0x1UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_PHY_TEMP_NOT_AVAILABLE 0x2UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_NOT_PRESENT 0x4UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_OM_TEMP_NOT_AVAILABLE 0x8UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_EXT_TEMP_FIELDS_AVAILABLE 0x10UL
+ #define TEMP_MONITOR_QUERY_RESP_FLAGS_THRESHOLD_VALUES_AVAILABLE 0x20UL
u8 temp2;
u8 phy_temp2;
u8 om_temp2;
+ u8 warn_threshold;
+ u8 critical_threshold;
+ u8 fatal_threshold;
+ u8 shutdown_threshold;
+ u8 unused_0[4];
u8 valid;
};
@@ -9317,7 +9505,8 @@ struct hwrm_dbg_ring_info_get_output {
__le32 producer_index;
__le32 consumer_index;
__le32 cag_vector_ctrl;
- u8 unused_0[3];
+ __le16 st_tag;
+ u8 unused_0;
u8 valid;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c
new file mode 100644
index 000000000000..669d24ba0e87
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.c
@@ -0,0 +1,241 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2023 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/dev_printk.h>
+#include <linux/errno.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/pci.h>
+
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_hwmon.h"
+
+void bnxt_hwmon_notify_event(struct bnxt *bp)
+{
+ u32 attr;
+
+ if (!bp->hwmon_dev)
+ return;
+
+ switch (bp->thermal_threshold_type) {
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
+ attr = hwmon_temp_max_alarm;
+ break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
+ attr = hwmon_temp_crit_alarm;
+ break;
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
+ attr = hwmon_temp_emergency_alarm;
+ break;
+ default:
+ return;
+ }
+
+ hwmon_notify_event(&bp->pdev->dev, hwmon_temp, attr, 0);
+}
+
+static int bnxt_hwrm_temp_query(struct bnxt *bp, u8 *temp)
+{
+ struct hwrm_temp_monitor_query_output *resp;
+ struct hwrm_temp_monitor_query_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
+ if (rc)
+ return rc;
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send_silent(bp, req);
+ if (rc)
+ goto drop_req;
+
+ if (temp) {
+ *temp = resp->temp;
+ } else if (resp->flags &
+ TEMP_MONITOR_QUERY_RESP_FLAGS_THRESHOLD_VALUES_AVAILABLE) {
+ bp->fw_cap |= BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED;
+ bp->warn_thresh_temp = resp->warn_threshold;
+ bp->crit_thresh_temp = resp->critical_threshold;
+ bp->fatal_thresh_temp = resp->fatal_threshold;
+ bp->shutdown_thresh_temp = resp->shutdown_threshold;
+ }
+drop_req:
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+static umode_t bnxt_hwmon_is_visible(const void *_data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct bnxt *bp = _data;
+
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ case hwmon_temp_max:
+ case hwmon_temp_crit:
+ case hwmon_temp_emergency:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_crit_alarm:
+ case hwmon_temp_emergency_alarm:
+ if (!(bp->fw_cap & BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED))
+ return 0;
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static int bnxt_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct bnxt *bp = dev_get_drvdata(dev);
+ u8 temp = 0;
+ int rc;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ rc = bnxt_hwrm_temp_query(bp, &temp);
+ if (!rc)
+ *val = temp * 1000;
+ return rc;
+ case hwmon_temp_max:
+ *val = bp->warn_thresh_temp * 1000;
+ return 0;
+ case hwmon_temp_crit:
+ *val = bp->crit_thresh_temp * 1000;
+ return 0;
+ case hwmon_temp_emergency:
+ *val = bp->fatal_thresh_temp * 1000;
+ return 0;
+ case hwmon_temp_max_alarm:
+ rc = bnxt_hwrm_temp_query(bp, &temp);
+ if (!rc)
+ *val = temp >= bp->warn_thresh_temp;
+ return rc;
+ case hwmon_temp_crit_alarm:
+ rc = bnxt_hwrm_temp_query(bp, &temp);
+ if (!rc)
+ *val = temp >= bp->crit_thresh_temp;
+ return rc;
+ case hwmon_temp_emergency_alarm:
+ rc = bnxt_hwrm_temp_query(bp, &temp);
+ if (!rc)
+ *val = temp >= bp->fatal_thresh_temp;
+ return rc;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_channel_info *bnxt_hwmon_info[] = {
+ HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_EMERGENCY | HWMON_T_MAX_ALARM |
+ HWMON_T_CRIT_ALARM | HWMON_T_EMERGENCY_ALARM),
+ NULL
+};
+
+static const struct hwmon_ops bnxt_hwmon_ops = {
+ .is_visible = bnxt_hwmon_is_visible,
+ .read = bnxt_hwmon_read,
+};
+
+static const struct hwmon_chip_info bnxt_hwmon_chip_info = {
+ .ops = &bnxt_hwmon_ops,
+ .info = bnxt_hwmon_info,
+};
+
+static ssize_t temp1_shutdown_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bnxt *bp = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%u\n", bp->shutdown_thresh_temp * 1000);
+}
+
+static ssize_t temp1_shutdown_alarm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bnxt *bp = dev_get_drvdata(dev);
+ u8 temp;
+ int rc;
+
+ rc = bnxt_hwrm_temp_query(bp, &temp);
+ if (rc)
+ return -EIO;
+
+ return sysfs_emit(buf, "%u\n", temp >= bp->shutdown_thresh_temp);
+}
+
+static DEVICE_ATTR_RO(temp1_shutdown);
+static DEVICE_ATTR_RO(temp1_shutdown_alarm);
+
+static struct attribute *bnxt_temp_extra_attrs[] = {
+ &dev_attr_temp1_shutdown.attr,
+ &dev_attr_temp1_shutdown_alarm.attr,
+ NULL,
+};
+
+static umode_t bnxt_temp_extra_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct bnxt *bp = dev_get_drvdata(dev);
+
+ /* Shutdown temperature setting in NVM is optional */
+ if (!(bp->fw_cap & BNXT_FW_CAP_THRESHOLD_TEMP_SUPPORTED) ||
+ !bp->shutdown_thresh_temp)
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group bnxt_temp_extra_group = {
+ .attrs = bnxt_temp_extra_attrs,
+ .is_visible = bnxt_temp_extra_attrs_visible,
+};
+__ATTRIBUTE_GROUPS(bnxt_temp_extra);
+
+void bnxt_hwmon_uninit(struct bnxt *bp)
+{
+ if (bp->hwmon_dev) {
+ hwmon_device_unregister(bp->hwmon_dev);
+ bp->hwmon_dev = NULL;
+ }
+}
+
+void bnxt_hwmon_init(struct bnxt *bp)
+{
+ struct pci_dev *pdev = bp->pdev;
+ int rc;
+
+ /* temp1_xxx is only sensor, ensure not registered if it will fail */
+ rc = bnxt_hwrm_temp_query(bp, NULL);
+ if (rc == -EACCES || rc == -EOPNOTSUPP) {
+ bnxt_hwmon_uninit(bp);
+ return;
+ }
+
+ if (bp->hwmon_dev)
+ return;
+
+ bp->hwmon_dev = hwmon_device_register_with_info(&pdev->dev,
+ DRV_MODULE_NAME, bp,
+ &bnxt_hwmon_chip_info,
+ bnxt_temp_extra_groups);
+ if (IS_ERR(bp->hwmon_dev)) {
+ bp->hwmon_dev = NULL;
+ dev_warn(&pdev->dev, "Cannot register hwmon device\n");
+ }
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.h
new file mode 100644
index 000000000000..de54a562e06a
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwmon.h
@@ -0,0 +1,30 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2023 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNXT_HWMON_H
+#define BNXT_HWMON_H
+
+#ifdef CONFIG_BNXT_HWMON
+void bnxt_hwmon_notify_event(struct bnxt *bp);
+void bnxt_hwmon_uninit(struct bnxt *bp);
+void bnxt_hwmon_init(struct bnxt *bp);
+#else
+static inline void bnxt_hwmon_notify_event(struct bnxt *bp)
+{
+}
+
+static inline void bnxt_hwmon_uninit(struct bnxt *bp)
+{
+}
+
+static inline void bnxt_hwmon_init(struct bnxt *bp)
+{
+}
+#endif
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
index 132442f16fe6..1df3d56cc4b5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
@@ -485,6 +485,8 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
if (msg_len > BNXT_HWRM_MAX_REQ_LEN &&
msg_len > bp->hwrm_max_ext_req_len) {
+ netdev_warn(bp->dev, "oversized hwrm request, req_type 0x%x",
+ req_type);
rc = -E2BIG;
goto exit;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
index c98032e38188..15ca51b5d204 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h
@@ -137,4 +137,18 @@ int hwrm_req_send_silent(struct bnxt *bp, void *req);
int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len);
void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t flags);
void *hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma);
+
+/* Older devices can only support req length of 128.
+ * HWRM_FUNC_CFG requests which don't need fields starting at
+ * num_quic_tx_key_ctxs can use this helper to avoid getting -E2BIG.
+ */
+static inline int
+bnxt_hwrm_func_cfg_short_req_init(struct bnxt *bp,
+ struct hwrm_func_cfg_input **req)
+{
+ u32 req_len;
+
+ req_len = min_t(u32, sizeof(**req), bp->hwrm_max_ext_req_len);
+ return __hwrm_req_init(bp, (void **)req, HWRM_FUNC_CFG, req_len);
+}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index dde327f2c57e..c722b3b41730 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -95,7 +95,7 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
/*TODO: if the driver supports VLAN filter on guest VLAN,
* the spoof check should also include vlan anti-spoofing
*/
- rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (!rc) {
req->fid = cpu_to_le16(vf->fw_fid);
req->flags = cpu_to_le32(func_flags);
@@ -146,7 +146,7 @@ static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf)
if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF))
return 0;
- rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (rc)
return rc;
@@ -232,7 +232,7 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
}
vf = &bp->pf.vf[vf_id];
- rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (rc)
return rc;
@@ -274,7 +274,7 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
if (vlan_tag == vf->vlan)
return 0;
- rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (!rc) {
req->fid = cpu_to_le16(vf->fw_fid);
req->dflt_vlan = cpu_to_le16(vlan_tag);
@@ -314,7 +314,7 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
}
if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
return 0;
- rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (!rc) {
req->fid = cpu_to_le16(vf->fw_fid);
req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW |
@@ -491,7 +491,7 @@ static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id)
struct bnxt_vf_info *vf;
int rc;
- rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (rc)
return rc;
@@ -550,7 +550,6 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
- vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs;
req->min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
@@ -572,11 +571,20 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
vf_cp_rings /= num_vfs;
vf_tx_rings /= num_vfs;
vf_rx_rings /= num_vfs;
- vf_vnics /= num_vfs;
+ if ((bp->fw_cap & BNXT_FW_CAP_PRE_RESV_VNICS) &&
+ vf_vnics >= pf->max_vfs) {
+ /* Take into account that FW has pre-reserved 1 VNIC for
+ * each pf->max_vfs.
+ */
+ vf_vnics = (vf_vnics - pf->max_vfs + num_vfs) / num_vfs;
+ } else {
+ vf_vnics /= num_vfs;
+ }
vf_stat_ctx /= num_vfs;
vf_ring_grps /= num_vfs;
vf_rss /= num_vfs;
+ vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
req->min_cmpl_rings = cpu_to_le16(vf_cp_rings);
req->min_tx_rings = cpu_to_le16(vf_tx_rings);
req->min_rx_rings = cpu_to_le16(vf_rx_rings);
@@ -645,7 +653,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
u32 mtu, i;
int rc;
- rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 24bade875ca6..9282403d1bf6 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -3247,23 +3247,6 @@ static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void bcmgenet_poll_controller(struct net_device *dev)
-{
- struct bcmgenet_priv *priv = netdev_priv(dev);
-
- /* Invoke the main RX/TX interrupt handler */
- disable_irq(priv->irq0);
- bcmgenet_isr0(priv->irq0, priv);
- enable_irq(priv->irq0);
-
- /* And the interrupt handler for RX/TX priority queues */
- disable_irq(priv->irq1);
- bcmgenet_isr1(priv->irq1, priv);
- enable_irq(priv->irq1);
-}
-#endif
-
static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
{
u32 reg;
@@ -3720,9 +3703,6 @@ static const struct net_device_ops bcmgenet_netdev_ops = {
.ndo_set_mac_address = bcmgenet_set_mac_addr,
.ndo_eth_ioctl = phy_do_ioctl_running,
.ndo_set_features = bcmgenet_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = bcmgenet_poll_controller,
-#endif
.ndo_get_stats = bcmgenet_get_stats,
.ndo_change_carrier = bcmgenet_change_carrier,
};
@@ -4164,7 +4144,7 @@ err:
return err;
}
-static int bcmgenet_remove(struct platform_device *pdev)
+static void bcmgenet_remove(struct platform_device *pdev)
{
struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
@@ -4172,8 +4152,6 @@ static int bcmgenet_remove(struct platform_device *pdev)
unregister_netdev(priv->dev);
bcmgenet_mii_exit(priv->dev);
free_netdev(priv->dev);
-
- return 0;
}
static void bcmgenet_shutdown(struct platform_device *pdev)
@@ -4352,7 +4330,7 @@ MODULE_DEVICE_TABLE(acpi, genet_acpi_match);
static struct platform_driver bcmgenet_driver = {
.probe = bcmgenet_probe,
- .remove = bcmgenet_remove,
+ .remove_new = bcmgenet_remove,
.shutdown = bcmgenet_shutdown,
.driver = {
.name = "bcmgenet",
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 3a6763c5e8b3..fcf8485f3446 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2593,7 +2593,7 @@ out_out:
return err;
}
-static int sbmac_remove(struct platform_device *pldev)
+static void sbmac_remove(struct platform_device *pldev)
{
struct net_device *dev = platform_get_drvdata(pldev);
struct sbmac_softc *sc = netdev_priv(dev);
@@ -2604,13 +2604,11 @@ static int sbmac_remove(struct platform_device *pldev)
mdiobus_free(sc->mii_bus);
iounmap(sc->sbm_base);
free_netdev(dev);
-
- return 0;
}
static struct platform_driver sbmac_driver = {
.probe = sbmac_probe,
- .remove = sbmac_remove,
+ .remove_new = sbmac_remove,
.driver = {
.name = sbmac_string,
},
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 14b311196b8f..59896bbf9e73 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6314,6 +6314,46 @@ err_out:
return -EOPNOTSUPP;
}
+static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
+ struct skb_shared_hwtstamps *timestamp)
+{
+ memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
+ timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
+ tp->ptp_adjust);
+}
+
+static void tg3_read_tx_tstamp(struct tg3 *tp, u64 *hwclock)
+{
+ *hwclock = tr32(TG3_TX_TSTAMP_LSB);
+ *hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
+}
+
+static long tg3_ptp_ts_aux_work(struct ptp_clock_info *ptp)
+{
+ struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
+ struct skb_shared_hwtstamps timestamp;
+ u64 hwclock;
+
+ if (tp->ptp_txts_retrycnt > 2)
+ goto done;
+
+ tg3_read_tx_tstamp(tp, &hwclock);
+
+ if (hwclock != tp->pre_tx_ts) {
+ tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
+ skb_tstamp_tx(tp->tx_tstamp_skb, &timestamp);
+ goto done;
+ }
+ tp->ptp_txts_retrycnt++;
+ return HZ / 10;
+done:
+ dev_consume_skb_any(tp->tx_tstamp_skb);
+ tp->tx_tstamp_skb = NULL;
+ tp->ptp_txts_retrycnt = 0;
+ tp->pre_tx_ts = 0;
+ return -1;
+}
+
static const struct ptp_clock_info tg3_ptp_caps = {
.owner = THIS_MODULE,
.name = "tg3 clock",
@@ -6325,19 +6365,12 @@ static const struct ptp_clock_info tg3_ptp_caps = {
.pps = 0,
.adjfine = tg3_ptp_adjfine,
.adjtime = tg3_ptp_adjtime,
+ .do_aux_work = tg3_ptp_ts_aux_work,
.gettimex64 = tg3_ptp_gettimex,
.settime64 = tg3_ptp_settime,
.enable = tg3_ptp_enable,
};
-static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
- struct skb_shared_hwtstamps *timestamp)
-{
- memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
- timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
- tp->ptp_adjust);
-}
-
/* tp->lock must be held */
static void tg3_ptp_init(struct tg3 *tp)
{
@@ -6368,6 +6401,8 @@ static void tg3_ptp_fini(struct tg3 *tp)
ptp_clock_unregister(tp->ptp_clock);
tp->ptp_clock = NULL;
tp->ptp_adjust = 0;
+ dev_consume_skb_any(tp->tx_tstamp_skb);
+ tp->tx_tstamp_skb = NULL;
}
static inline int tg3_irq_sync(struct tg3 *tp)
@@ -6538,6 +6573,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
while (sw_idx != hw_idx) {
struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
+ bool complete_skb_later = false;
struct sk_buff *skb = ri->skb;
int i, tx_bug = 0;
@@ -6548,12 +6584,17 @@ static void tg3_tx(struct tg3_napi *tnapi)
if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
struct skb_shared_hwtstamps timestamp;
- u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
- hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
-
- tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
+ u64 hwclock;
- skb_tstamp_tx(skb, &timestamp);
+ tg3_read_tx_tstamp(tp, &hwclock);
+ if (hwclock != tp->pre_tx_ts) {
+ tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
+ skb_tstamp_tx(skb, &timestamp);
+ tp->pre_tx_ts = 0;
+ } else {
+ tp->tx_tstamp_skb = skb;
+ complete_skb_later = true;
+ }
}
dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping),
@@ -6591,7 +6632,10 @@ static void tg3_tx(struct tg3_napi *tnapi)
pkts_compl++;
bytes_compl += skb->len;
- dev_consume_skb_any(skb);
+ if (!complete_skb_later)
+ dev_consume_skb_any(skb);
+ else
+ ptp_schedule_worker(tp->ptp_clock, 0);
if (unlikely(tx_bug)) {
tg3_tx_recover(tp);
@@ -8028,8 +8072,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
tg3_flag(tp, TX_TSTAMP_EN)) {
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- base_flags |= TXD_FLAG_HWTSTAMP;
+ tg3_full_lock(tp, 0);
+ if (!tp->pre_tx_ts) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ base_flags |= TXD_FLAG_HWTSTAMP;
+ tg3_read_tx_tstamp(tp, &tp->pre_tx_ts);
+ }
+ tg3_full_unlock(tp);
}
len = skb_headlen(skb);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 1000c894064f..ae5c01bd1110 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -3190,6 +3190,7 @@ struct tg3 {
struct ptp_clock_info ptp_info;
struct ptp_clock *ptp_clock;
s64 ptp_adjust;
+ u8 ptp_txts_retrycnt;
/* begin "tx thread" cacheline section */
void (*write32_tx_mbox) (struct tg3 *, u32,
@@ -3372,6 +3373,8 @@ struct tg3 {
struct tg3_hw_stats *hw_stats;
dma_addr_t stats_mapping;
struct work_struct reset_task;
+ struct sk_buff *tx_tstamp_skb;
+ u64 pre_tx_ts;
int nvram_lock_cnt;
u32 nvram_size;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index b07522ac3e74..9c80ab07a735 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -2839,7 +2839,7 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
static void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
{
- strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+ strscpy_pad(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
}
static void
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index b940dcd3ace6..cebae0f418f2 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -5156,7 +5156,7 @@ err_disable_clocks:
return err;
}
-static int macb_remove(struct platform_device *pdev)
+static void macb_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct macb *bp;
@@ -5181,8 +5181,6 @@ static int macb_remove(struct platform_device *pdev)
phylink_destroy(bp->phylink);
free_netdev(dev);
}
-
- return 0;
}
static int __maybe_unused macb_suspend(struct device *dev)
@@ -5398,7 +5396,7 @@ static const struct dev_pm_ops macb_pm_ops = {
static struct platform_driver macb_driver = {
.probe = macb_probe,
- .remove = macb_remove,
+ .remove_new = macb_remove,
.driver = {
.name = "macb",
.of_match_table = of_match_ptr(macb_dt_ids),
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index f4f87dfa9687..5e97f1e4e38e 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1820,7 +1820,7 @@ err_alloc:
* changes the link status, releases the DMA descriptor rings,
* unregisters the MDIO bus and unmaps the allocated memory.
*/
-static int xgmac_remove(struct platform_device *pdev)
+static void xgmac_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct xgmac_priv *priv = netdev_priv(ndev);
@@ -1840,8 +1840,6 @@ static int xgmac_remove(struct platform_device *pdev)
release_mem_region(res->start, resource_size(res));
free_netdev(ndev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1921,7 +1919,7 @@ static struct platform_driver xgmac_driver = {
.pm = &xgmac_pm_ops,
},
.probe = xgmac_probe,
- .remove = xgmac_remove,
+ .remove_new = xgmac_remove,
};
module_platform_driver(xgmac_driver);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 9d56181a301f..d3e07b6ed5e1 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -442,10 +442,11 @@ lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
oct = lio->oct_dev;
memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
- strcpy(drvinfo->driver, "liquidio");
- strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
- ETHTOOL_FWVERS_LEN);
- strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
+ strscpy(drvinfo->driver, "liquidio", sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
+ sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, pci_name(oct->pci_dev),
+ sizeof(drvinfo->bus_info));
}
static void
@@ -458,10 +459,11 @@ lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
oct = lio->oct_dev;
memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
- strcpy(drvinfo->driver, "liquidio_vf");
- strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
- ETHTOOL_FWVERS_LEN);
- strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
+ strscpy(drvinfo->driver, "liquidio_vf", sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
+ sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, pci_name(oct->pci_dev),
+ sizeof(drvinfo->bus_info));
}
static int
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 100daadbea2a..34f02a8ec2ca 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1689,7 +1689,7 @@ static int load_firmware(struct octeon_device *oct)
if (fw_type_is_auto()) {
tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
- strncpy(fw_type, tmp_fw_type, sizeof(fw_type));
+ strscpy_pad(fw_type, tmp_fw_type, sizeof(fw_type));
} else {
tmp_fw_type = fw_type;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
index 600de587d7a9..aa6c0dfb6f1c 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
@@ -638,7 +638,8 @@ lio_vf_rep_netdev_event(struct notifier_block *nb,
memset(&rep_cfg, 0, sizeof(rep_cfg));
rep_cfg.req_type = LIO_VF_REP_REQ_DEVNAME;
rep_cfg.ifidx = vf_rep->ifidx;
- strncpy(rep_cfg.rep_name.name, ndev->name, LIO_IF_NAME_SIZE);
+ strscpy(rep_cfg.rep_name.name, ndev->name,
+ sizeof(rep_cfg.rep_name.name));
ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
sizeof(rep_cfg), NULL, 0);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 364f4f912dc2..6b6cb73482d7 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -1217,10 +1217,10 @@ int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
goto core_drv_init_err;
}
- strncpy(app_name,
+ strscpy(app_name,
get_oct_app_string(
(u32)recv_pkt->rh.r_core_drv_init.app_mode),
- sizeof(app_name) - 1);
+ sizeof(app_name));
oct->app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
if (recv_pkt->rh.r_core_drv_init.app_mode == CVM_DRV_NIC_APP) {
oct->fw_info.max_nic_ports =
@@ -1257,9 +1257,10 @@ int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
memcpy(cs, get_rbd(
recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE, sizeof(*cs));
- strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME);
- strncpy(oct->boardinfo.serial_number, cs->board_serial_number,
- OCT_SERIAL_LEN);
+ strscpy(oct->boardinfo.name, cs->boardname,
+ sizeof(oct->boardinfo.name));
+ strscpy(oct->boardinfo.serial_number, cs->board_serial_number,
+ sizeof(oct->boardinfo.serial_number));
octeon_swap_8B_data((u64 *)cs, (sizeof(*cs) >> 3));
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index edde0b8fa49c..007d4b06819e 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1521,7 +1521,7 @@ err:
return result;
}
-static int octeon_mgmt_remove(struct platform_device *pdev)
+static void octeon_mgmt_remove(struct platform_device *pdev)
{
struct net_device *netdev = platform_get_drvdata(pdev);
struct octeon_mgmt *p = netdev_priv(netdev);
@@ -1529,7 +1529,6 @@ static int octeon_mgmt_remove(struct platform_device *pdev)
unregister_netdev(netdev);
of_node_put(p->phy_np);
free_netdev(netdev);
- return 0;
}
static const struct of_device_id octeon_mgmt_match[] = {
@@ -1546,7 +1545,7 @@ static struct platform_driver octeon_mgmt_driver = {
.of_match_table = octeon_mgmt_match,
},
.probe = octeon_mgmt_probe,
- .remove = octeon_mgmt_remove,
+ .remove_new = octeon_mgmt_remove,
};
module_platform_driver(octeon_mgmt_driver);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index ea75f275023f..646ca0bc25bd 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -76,7 +76,7 @@ struct l2t_data {
atomic_t nfree; /* number of free entries */
rwlock_t lock;
struct rcu_head rcu_head; /* to handle rcu cleanup */
- struct l2t_entry l2tab[];
+ struct l2t_entry l2tab[] __counted_by(nentries);
};
typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 2e9a74fe0970..6268f96cb4aa 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -2501,14 +2501,6 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
return work_done;
}
-/*
- * Returns true if the device is already scheduled for polling.
- */
-static inline int napi_is_scheduled(struct napi_struct *napi)
-{
- return test_bit(NAPI_STATE_SCHED, &napi->state);
-}
-
/**
* process_pure_responses - process pure responses from a response queue
* @adap: the adapter
@@ -2674,12 +2666,7 @@ static int rspq_check_napi(struct sge_qset *qs)
{
struct sge_rspq *q = &qs->rspq;
- if (!napi_is_scheduled(&qs->napi) &&
- is_new_response(&q->desc[q->cidx], q)) {
- napi_schedule(&qs->napi);
- return 1;
- }
- return 0;
+ return is_new_response(&q->desc[q->cidx], q) && napi_schedule(&qs->napi);
}
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
index 290c1058069a..847c7fc2bbd9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h
@@ -29,7 +29,7 @@ struct clip_tbl {
atomic_t nfree;
struct list_head ce_free_head;
void *cl_list;
- struct list_head hash_list[];
+ struct list_head hash_list[] __counted_by(clipt_size);
};
enum {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
index f59dd4b2ae6f..9050568a034c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32_parse.h
@@ -331,6 +331,6 @@ struct cxgb4_link {
struct cxgb4_tc_u32_table {
unsigned int size; /* number of entries in table */
- struct cxgb4_link table[]; /* Jump table */
+ struct cxgb4_link table[] __counted_by(size); /* Jump table */
};
#endif /* __CXGB4_TC_U32_PARSE_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index a10a6862a9a4..1e5f5b1a22a6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -59,7 +59,7 @@ struct l2t_data {
rwlock_t lock;
atomic_t nfree; /* number of free entries */
struct l2t_entry *rover; /* starting point for next allocation */
- struct l2t_entry l2tab[]; /* MUST BE LAST */
+ struct l2t_entry l2tab[] __counted_by(l2t_size); /* MUST BE LAST */
};
static inline unsigned int vlan_prio(const struct l2t_entry *e)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.h b/drivers/net/ethernet/chelsio/cxgb4/sched.h
index 5f8b871d79af..6b3c778815f0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.h
@@ -82,7 +82,7 @@ struct sched_class {
struct sched_table { /* per port scheduling table */
u8 sched_size;
- struct sched_class tab[];
+ struct sched_class tab[] __counted_by(sched_size);
};
static inline bool can_sched(struct net_device *dev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 98dd78551d89..b5ff2e1a9975 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -4261,7 +4261,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
if (fl_starving(adap, fl)) {
rxq = container_of(fl, struct sge_eth_rxq, fl);
- if (napi_reschedule(&rxq->rspq.napi))
+ if (napi_schedule(&rxq->rspq.napi))
fl->starving++;
else
set_bit(id, s->starving_fl);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.h b/drivers/net/ethernet/chelsio/cxgb4/smt.h
index 541249d78914..109c1dff563a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/smt.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/smt.h
@@ -66,7 +66,7 @@ struct smt_entry {
struct smt_data {
unsigned int smt_size;
rwlock_t lock;
- struct smt_entry smtab[];
+ struct smt_entry smtab[] __counted_by(smt_size);
};
struct smt_data *t4_init_smt(void);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 2d0cf76fb3c5..5b1d746e6563 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2094,7 +2094,7 @@ static void sge_rx_timer_cb(struct timer_list *t)
struct sge_eth_rxq *rxq;
rxq = container_of(fl, struct sge_eth_rxq, fl);
- if (napi_reschedule(&rxq->rspq.napi))
+ if (napi_schedule(&rxq->rspq.napi))
fl->starving++;
else
set_bit(id, s->starving_fl);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index bcdc7fc2f427..6482728794dd 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -361,9 +361,7 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
struct tls_context *tls_ctx,
enum tls_offload_ctx_dir direction)
{
- struct chcr_ktls_ofld_ctx_tx *tx_ctx =
- chcr_get_ktls_tx_context(tls_ctx);
- struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
+ struct chcr_ktls_info *tx_info = chcr_get_ktls_tx_info(tls_ctx);
struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_uld_ctx *u_ctx;
@@ -396,7 +394,7 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
atomic64_inc(&port_stats->ktls_tx_connection_close);
kvfree(tx_info);
- tx_ctx->chcr_info = NULL;
+ chcr_set_ktls_tx_info(tls_ctx, NULL);
/* release module refcount */
module_put(THIS_MODULE);
}
@@ -417,7 +415,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct ch_ktls_port_stats_debug *port_stats;
- struct chcr_ktls_ofld_ctx_tx *tx_ctx;
struct chcr_ktls_uld_ctx *u_ctx;
struct chcr_ktls_info *tx_info;
struct dst_entry *dst;
@@ -427,8 +424,6 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
u8 daaddr[16];
int ret = -1;
- tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
-
pi = netdev_priv(netdev);
adap = pi->adapter;
port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id];
@@ -440,7 +435,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
goto out;
}
- if (tx_ctx->chcr_info)
+ if (chcr_get_ktls_tx_info(tls_ctx))
goto out;
if (u_ctx && u_ctx->detach)
@@ -566,7 +561,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
goto free_tid;
atomic64_inc(&port_stats->ktls_tx_ctx);
- tx_ctx->chcr_info = tx_info;
+ chcr_set_ktls_tx_info(tls_ctx, tx_info);
return 0;
@@ -647,7 +642,7 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
{
const struct cpl_act_open_rpl *p = (void *)input;
struct chcr_ktls_info *tx_info = NULL;
- struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct tls_offload_context_tx *tx_ctx;
struct chcr_ktls_uld_ctx *u_ctx;
unsigned int atid, tid, status;
struct tls_context *tls_ctx;
@@ -686,7 +681,7 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
/* Adding tid */
tls_ctx = tls_get_ctx(tx_info->sk);
- tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
+ tx_ctx = tls_offload_ctx_tx(tls_ctx);
u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
if (u_ctx) {
ret = xa_insert_bh(&u_ctx->tid_list, tid, tx_ctx,
@@ -1924,7 +1919,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
{
u32 tls_end_offset, tcp_seq, skb_data_len, skb_offset;
struct ch_ktls_port_stats_debug *port_stats;
- struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct tls_offload_context_tx *tx_ctx;
struct ch_ktls_stats_debug *stats;
struct tcphdr *th = tcp_hdr(skb);
int data_len, qidx, ret = 0, mss;
@@ -1944,6 +1939,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
mss = skb_is_gso(skb) ? skb_shinfo(skb)->gso_size : data_len;
tls_ctx = tls_get_ctx(skb->sk);
+ tx_ctx = tls_offload_ctx_tx(tls_ctx);
tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
/* Don't quit on NULL: if tls_device_down is running in parallel,
* netdev might become NULL, even if tls_is_skb_tx_device_offloaded was
@@ -1952,8 +1948,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(tls_netdev && tls_netdev != dev))
goto out;
- tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
- tx_info = tx_ctx->chcr_info;
+ tx_info = chcr_get_ktls_tx_info(tls_ctx);
if (unlikely(!tx_info))
goto out;
@@ -1979,19 +1974,19 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
* we will send the complete record again.
*/
- spin_lock_irqsave(&tx_ctx->base.lock, flags);
+ spin_lock_irqsave(&tx_ctx->lock, flags);
do {
cxgb4_reclaim_completed_tx(adap, &q->q, true);
/* fetch the tls record */
- record = tls_get_record(&tx_ctx->base, tcp_seq,
+ record = tls_get_record(tx_ctx, tcp_seq,
&tx_info->record_no);
/* By the time packet reached to us, ACK is received, and record
* won't be found in that case, handle it gracefully.
*/
if (unlikely(!record)) {
- spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+ spin_unlock_irqrestore(&tx_ctx->lock, flags);
atomic64_inc(&port_stats->ktls_tx_drop_no_sync_data);
goto out;
}
@@ -2015,7 +2010,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
tls_end_offset !=
record->len);
if (ret) {
- spin_unlock_irqrestore(&tx_ctx->base.lock,
+ spin_unlock_irqrestore(&tx_ctx->lock,
flags);
goto out;
}
@@ -2046,7 +2041,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
/* free the refcount taken earlier */
if (tls_end_offset < data_len)
dev_kfree_skb_any(skb);
- spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+ spin_unlock_irqrestore(&tx_ctx->lock, flags);
goto out;
}
@@ -2082,7 +2077,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
/* if any failure, come out from the loop. */
if (ret) {
- spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+ spin_unlock_irqrestore(&tx_ctx->lock, flags);
if (th->fin)
dev_kfree_skb_any(skb);
@@ -2097,7 +2092,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
} while (data_len > 0);
- spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
+ spin_unlock_irqrestore(&tx_ctx->lock, flags);
atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
atomic64_add(skb_data_len, &port_stats->ktls_tx_encrypted_bytes);
@@ -2185,17 +2180,17 @@ static void clear_conn_resources(struct chcr_ktls_info *tx_info)
static void ch_ktls_reset_all_conn(struct chcr_ktls_uld_ctx *u_ctx)
{
struct ch_ktls_port_stats_debug *port_stats;
- struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct tls_offload_context_tx *tx_ctx;
struct chcr_ktls_info *tx_info;
unsigned long index;
xa_for_each(&u_ctx->tid_list, index, tx_ctx) {
- tx_info = tx_ctx->chcr_info;
+ tx_info = __chcr_get_ktls_tx_info(tx_ctx);
clear_conn_resources(tx_info);
port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
atomic64_inc(&port_stats->ktls_tx_connection_close);
kvfree(tx_info);
- tx_ctx->chcr_info = NULL;
+ memset(tx_ctx->driver_state, 0, TLS_DRIVER_STATE_SIZE_TX);
/* release module refcount */
module_put(THIS_MODULE);
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
index 10572dc55365..dbbba92bf540 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
@@ -67,8 +67,7 @@ struct chcr_ktls_info {
bool pending_close;
};
-struct chcr_ktls_ofld_ctx_tx {
- struct tls_offload_context_tx base;
+struct chcr_ktls_ctx_tx {
struct chcr_ktls_info *chcr_info;
};
@@ -79,14 +78,33 @@ struct chcr_ktls_uld_ctx {
bool detach;
};
-static inline struct chcr_ktls_ofld_ctx_tx *
-chcr_get_ktls_tx_context(struct tls_context *tls_ctx)
+static inline struct chcr_ktls_info *
+__chcr_get_ktls_tx_info(struct tls_offload_context_tx *octx)
{
- BUILD_BUG_ON(sizeof(struct chcr_ktls_ofld_ctx_tx) >
- TLS_OFFLOAD_CONTEXT_SIZE_TX);
- return container_of(tls_offload_ctx_tx(tls_ctx),
- struct chcr_ktls_ofld_ctx_tx,
- base);
+ struct chcr_ktls_ctx_tx *priv_ctx;
+
+ BUILD_BUG_ON(sizeof(struct chcr_ktls_ctx_tx) > TLS_DRIVER_STATE_SIZE_TX);
+ priv_ctx = (struct chcr_ktls_ctx_tx *)octx->driver_state;
+ return priv_ctx->chcr_info;
+}
+
+static inline struct chcr_ktls_info *
+chcr_get_ktls_tx_info(struct tls_context *tls_ctx)
+{
+ struct chcr_ktls_ctx_tx *priv_ctx;
+
+ BUILD_BUG_ON(sizeof(struct chcr_ktls_ctx_tx) > TLS_DRIVER_STATE_SIZE_TX);
+ priv_ctx = (struct chcr_ktls_ctx_tx *)__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
+ return priv_ctx->chcr_info;
+}
+
+static inline void
+chcr_set_ktls_tx_info(struct tls_context *tls_ctx, struct chcr_ktls_info *chcr_info)
+{
+ struct chcr_ktls_ctx_tx *priv_ctx;
+
+ priv_ctx = __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX);
+ priv_ctx->chcr_info = chcr_info;
}
static inline int chcr_get_first_rx_qid(struct adapter *adap)
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index 7750702900fa..6f6525983130 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -2259,7 +2259,7 @@ static void chtls_rx_ack(struct sock *sk, struct sk_buff *skb)
if (tp->snd_una != snd_una) {
tp->snd_una = snd_una;
- tp->rcv_tstamp = tcp_time_stamp(tp);
+ tp->rcv_tstamp = tcp_jiffies32;
if (tp->snd_una == tp->snd_nxt &&
!csk_flag_nochk(csk, CSK_TX_FAILOVER))
csk_reset_flag(csk, CSK_TX_WAIT_IDLE);
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index d323c5c23521..0a21a10a791c 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1879,7 +1879,7 @@ free:
return err;
}
-static int cs89x0_platform_remove(struct platform_device *pdev)
+static void cs89x0_platform_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
@@ -1889,7 +1889,6 @@ static int cs89x0_platform_remove(struct platform_device *pdev)
*/
unregister_netdev(dev);
free_netdev(dev);
- return 0;
}
static const struct of_device_id __maybe_unused cs89x0_match[] = {
@@ -1904,7 +1903,7 @@ static struct platform_driver cs89x0_driver = {
.name = DRV_NAME,
.of_match_table = of_match_ptr(cs89x0_match),
},
- .remove = cs89x0_platform_remove,
+ .remove_new = cs89x0_platform_remove,
};
module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe);
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 8627ab19d470..1c2a540db13d 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -757,7 +757,7 @@ static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data)
}
-static int ep93xx_eth_remove(struct platform_device *pdev)
+static void ep93xx_eth_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct ep93xx_priv *ep;
@@ -765,7 +765,7 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
dev = platform_get_drvdata(pdev);
if (dev == NULL)
- return 0;
+ return;
ep = netdev_priv(dev);
@@ -782,8 +782,6 @@ static int ep93xx_eth_remove(struct platform_device *pdev)
}
free_netdev(dev);
-
- return 0;
}
static int ep93xx_eth_probe(struct platform_device *pdev)
@@ -862,7 +860,7 @@ err_out:
static struct platform_driver ep93xx_eth_driver = {
.probe = ep93xx_eth_probe,
- .remove = ep93xx_eth_remove,
+ .remove_new = ep93xx_eth_remove,
.driver = {
.name = "ep93xx-eth",
},
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 21a70b1f0ac5..887876f35f10 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -556,19 +556,18 @@ static int set_mac_address(struct net_device *dev, void *addr)
MODULE_LICENSE("GPL");
-static int mac89x0_device_remove(struct platform_device *pdev)
+static void mac89x0_device_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
unregister_netdev(dev);
nubus_writew(0, dev->base_addr + ADD_PORT);
free_netdev(dev);
- return 0;
}
static struct platform_driver mac89x0_platform_driver = {
.probe = mac89x0_device_probe,
- .remove = mac89x0_device_remove,
+ .remove_new = mac89x0_device_remove,
.driver = {
.name = "mac89x0",
},
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index a8b9d1a3e4d5..5423fe26b4ef 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -2518,13 +2518,11 @@ unprepare:
return ret;
}
-static int gemini_ethernet_port_remove(struct platform_device *pdev)
+static void gemini_ethernet_port_remove(struct platform_device *pdev)
{
struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
gemini_port_remove(port);
-
- return 0;
}
static const struct of_device_id gemini_ethernet_port_of_match[] = {
@@ -2541,7 +2539,7 @@ static struct platform_driver gemini_ethernet_port_driver = {
.of_match_table = gemini_ethernet_port_of_match,
},
.probe = gemini_ethernet_port_probe,
- .remove = gemini_ethernet_port_remove,
+ .remove_new = gemini_ethernet_port_remove,
};
static int gemini_ethernet_probe(struct platform_device *pdev)
@@ -2583,14 +2581,12 @@ static int gemini_ethernet_probe(struct platform_device *pdev)
return devm_of_platform_populate(dev);
}
-static int gemini_ethernet_remove(struct platform_device *pdev)
+static void gemini_ethernet_remove(struct platform_device *pdev)
{
struct gemini_ethernet *geth = platform_get_drvdata(pdev);
geth_cleanup_freeq(geth);
geth->initialized = false;
-
- return 0;
}
static const struct of_device_id gemini_ethernet_of_match[] = {
@@ -2607,7 +2603,7 @@ static struct platform_driver gemini_ethernet_driver = {
.of_match_table = gemini_ethernet_of_match,
},
.probe = gemini_ethernet_probe,
- .remove = gemini_ethernet_remove,
+ .remove_new = gemini_ethernet_remove,
};
static int __init gemini_ethernet_module_init(void)
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 05a89ab6766c..150cc94ae9f8 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1770,8 +1770,7 @@ static const struct dev_pm_ops dm9000_drv_pm_ops = {
.resume = dm9000_drv_resume,
};
-static int
-dm9000_drv_remove(struct platform_device *pdev)
+static void dm9000_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct board_info *dm = to_dm9000_board(ndev);
@@ -1783,7 +1782,6 @@ dm9000_drv_remove(struct platform_device *pdev)
regulator_disable(dm->power_supply);
dev_dbg(&pdev->dev, "released and freed device\n");
- return 0;
}
#ifdef CONFIG_OF
@@ -1801,7 +1799,7 @@ static struct platform_driver dm9000_driver = {
.of_match_table = of_match_ptr(dm9000_of_matches),
},
.probe = dm9000_probe,
- .remove = dm9000_drv_remove,
+ .remove_new = dm9000_drv_remove,
};
module_platform_driver(dm9000_driver);
diff --git a/drivers/net/ethernet/dec/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h
index 0ed598dc7569..bd786dfbc066 100644
--- a/drivers/net/ethernet/dec/tulip/tulip.h
+++ b/drivers/net/ethernet/dec/tulip/tulip.h
@@ -381,7 +381,7 @@ struct mediatable {
unsigned has_reset:6;
u32 csr15dir;
u32 csr15val; /* 21143 NWay setting. */
- struct medialeaf mleaf[];
+ struct medialeaf mleaf[] __counted_by(leafcount);
};
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 151ca9573be9..2a18df3605f1 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -841,7 +841,7 @@ err_out_free_dev:
return err;
}
-static int dnet_remove(struct platform_device *pdev)
+static void dnet_remove(struct platform_device *pdev)
{
struct net_device *dev;
@@ -859,13 +859,11 @@ static int dnet_remove(struct platform_device *pdev)
free_irq(dev->irq, dev);
free_netdev(dev);
}
-
- return 0;
}
static struct platform_driver dnet_driver = {
.probe = dnet_probe,
- .remove = dnet_remove,
+ .remove_new = dnet_remove,
.driver = {
.name = "dnet",
},
diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
index 6e14c918e3fb..f188fba021a6 100644
--- a/drivers/net/ethernet/engleder/tsnep.h
+++ b/drivers/net/ethernet/engleder/tsnep.h
@@ -143,7 +143,7 @@ struct tsnep_rx {
struct tsnep_queue {
struct tsnep_adapter *adapter;
- char name[IFNAMSIZ + 9];
+ char name[IFNAMSIZ + 16];
struct tsnep_tx *tx;
struct tsnep_rx *rx;
diff --git a/drivers/net/ethernet/engleder/tsnep_hw.h b/drivers/net/ethernet/engleder/tsnep_hw.h
index 55e1caf193a6..64c97eb66f67 100644
--- a/drivers/net/ethernet/engleder/tsnep_hw.h
+++ b/drivers/net/ethernet/engleder/tsnep_hw.h
@@ -181,6 +181,8 @@ struct tsnep_gcl_operation {
#define TSNEP_DESC_SIZE 256
#define TSNEP_DESC_SIZE_DATA_AFTER 2048
#define TSNEP_DESC_OFFSET 128
+#define TSNEP_DESC_SIZE_DATA_AFTER_INLINE (64 - sizeof(struct tsnep_tx_desc) + \
+ sizeof_field(struct tsnep_tx_desc, tx))
#define TSNEP_DESC_OWNER_COUNTER_MASK 0xC0000000
#define TSNEP_DESC_OWNER_COUNTER_SHIFT 30
#define TSNEP_DESC_LENGTH_MASK 0x00003FFF
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 8b992dc9bb52..df40c720e7b2 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -51,12 +51,22 @@
#define TSNEP_COALESCE_USECS_MAX ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \
ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1)
-#define TSNEP_TX_TYPE_SKB BIT(0)
-#define TSNEP_TX_TYPE_SKB_FRAG BIT(1)
-#define TSNEP_TX_TYPE_XDP_TX BIT(2)
-#define TSNEP_TX_TYPE_XDP_NDO BIT(3)
-#define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO)
-#define TSNEP_TX_TYPE_XSK BIT(4)
+/* mapping type */
+#define TSNEP_TX_TYPE_MAP BIT(0)
+#define TSNEP_TX_TYPE_MAP_PAGE BIT(1)
+#define TSNEP_TX_TYPE_INLINE BIT(2)
+/* buffer type */
+#define TSNEP_TX_TYPE_SKB BIT(8)
+#define TSNEP_TX_TYPE_SKB_MAP (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_MAP)
+#define TSNEP_TX_TYPE_SKB_INLINE (TSNEP_TX_TYPE_SKB | TSNEP_TX_TYPE_INLINE)
+#define TSNEP_TX_TYPE_SKB_FRAG BIT(9)
+#define TSNEP_TX_TYPE_SKB_FRAG_MAP_PAGE (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_MAP_PAGE)
+#define TSNEP_TX_TYPE_SKB_FRAG_INLINE (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_INLINE)
+#define TSNEP_TX_TYPE_XDP_TX BIT(10)
+#define TSNEP_TX_TYPE_XDP_NDO BIT(11)
+#define TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE (TSNEP_TX_TYPE_XDP_NDO | TSNEP_TX_TYPE_MAP_PAGE)
+#define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO)
+#define TSNEP_TX_TYPE_XSK BIT(12)
#define TSNEP_XDP_TX BIT(0)
#define TSNEP_XDP_REDIRECT BIT(1)
@@ -416,6 +426,8 @@ static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG;
entry->desc->more_properties =
__cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK);
+ if (entry->type & TSNEP_TX_TYPE_INLINE)
+ entry->properties |= TSNEP_TX_DESC_DATA_AFTER_DESC_FLAG;
/* descriptor properties shall be written last, because valid data is
* signaled there
@@ -433,39 +445,79 @@ static int tsnep_tx_desc_available(struct tsnep_tx *tx)
return tx->read - tx->write - 1;
}
+static int tsnep_tx_map_frag(skb_frag_t *frag, struct tsnep_tx_entry *entry,
+ struct device *dmadev, dma_addr_t *dma)
+{
+ unsigned int len;
+ int mapped;
+
+ len = skb_frag_size(frag);
+ if (likely(len > TSNEP_DESC_SIZE_DATA_AFTER_INLINE)) {
+ *dma = skb_frag_dma_map(dmadev, frag, 0, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dmadev, *dma))
+ return -ENOMEM;
+ entry->type = TSNEP_TX_TYPE_SKB_FRAG_MAP_PAGE;
+ mapped = 1;
+ } else {
+ void *fragdata = skb_frag_address_safe(frag);
+
+ if (likely(fragdata)) {
+ memcpy(&entry->desc->tx, fragdata, len);
+ } else {
+ struct page *page = skb_frag_page(frag);
+
+ fragdata = kmap_local_page(page);
+ memcpy(&entry->desc->tx, fragdata + skb_frag_off(frag),
+ len);
+ kunmap_local(fragdata);
+ }
+ entry->type = TSNEP_TX_TYPE_SKB_FRAG_INLINE;
+ mapped = 0;
+ }
+
+ return mapped;
+}
+
static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
{
struct device *dmadev = tx->adapter->dmadev;
struct tsnep_tx_entry *entry;
unsigned int len;
- dma_addr_t dma;
int map_len = 0;
- int i;
+ dma_addr_t dma;
+ int i, mapped;
for (i = 0; i < count; i++) {
entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK];
if (!i) {
len = skb_headlen(skb);
- dma = dma_map_single(dmadev, skb->data, len,
- DMA_TO_DEVICE);
-
- entry->type = TSNEP_TX_TYPE_SKB;
+ if (likely(len > TSNEP_DESC_SIZE_DATA_AFTER_INLINE)) {
+ dma = dma_map_single(dmadev, skb->data, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dmadev, dma))
+ return -ENOMEM;
+ entry->type = TSNEP_TX_TYPE_SKB_MAP;
+ mapped = 1;
+ } else {
+ memcpy(&entry->desc->tx, skb->data, len);
+ entry->type = TSNEP_TX_TYPE_SKB_INLINE;
+ mapped = 0;
+ }
} else {
- len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]);
- dma = skb_frag_dma_map(dmadev,
- &skb_shinfo(skb)->frags[i - 1],
- 0, len, DMA_TO_DEVICE);
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
- entry->type = TSNEP_TX_TYPE_SKB_FRAG;
+ len = skb_frag_size(frag);
+ mapped = tsnep_tx_map_frag(frag, entry, dmadev, &dma);
+ if (mapped < 0)
+ return mapped;
}
- if (dma_mapping_error(dmadev, dma))
- return -ENOMEM;
entry->len = len;
- dma_unmap_addr_set(entry, dma, dma);
-
- entry->desc->tx = __cpu_to_le64(dma);
+ if (likely(mapped)) {
+ dma_unmap_addr_set(entry, dma, dma);
+ entry->desc->tx = __cpu_to_le64(dma);
+ }
map_len += len;
}
@@ -484,13 +536,12 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
entry = &tx->entry[(index + i) & TSNEP_RING_MASK];
if (entry->len) {
- if (entry->type & TSNEP_TX_TYPE_SKB)
+ if (entry->type & TSNEP_TX_TYPE_MAP)
dma_unmap_single(dmadev,
dma_unmap_addr(entry, dma),
dma_unmap_len(entry, len),
DMA_TO_DEVICE);
- else if (entry->type &
- (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_XDP_NDO))
+ else if (entry->type & TSNEP_TX_TYPE_MAP_PAGE)
dma_unmap_page(dmadev,
dma_unmap_addr(entry, dma),
dma_unmap_len(entry, len),
@@ -586,7 +637,7 @@ static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx,
if (dma_mapping_error(dmadev, dma))
return -ENOMEM;
- entry->type = TSNEP_TX_TYPE_XDP_NDO;
+ entry->type = TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE;
} else {
page = unlikely(frag) ? skb_frag_page(frag) :
virt_to_page(xdpf->data);
@@ -1779,14 +1830,14 @@ static int tsnep_request_irq(struct tsnep_queue *queue, bool first)
dev = queue->adapter;
} else {
if (queue->tx && queue->rx)
- sprintf(queue->name, "%s-txrx-%d", name,
- queue->rx->queue_index);
+ snprintf(queue->name, sizeof(queue->name), "%s-txrx-%d",
+ name, queue->rx->queue_index);
else if (queue->tx)
- sprintf(queue->name, "%s-tx-%d", name,
- queue->tx->queue_index);
+ snprintf(queue->name, sizeof(queue->name), "%s-tx-%d",
+ name, queue->tx->queue_index);
else
- sprintf(queue->name, "%s-rx-%d", name,
- queue->rx->queue_index);
+ snprintf(queue->name, sizeof(queue->name), "%s-rx-%d",
+ name, queue->rx->queue_index);
handler = tsnep_irq_txrx;
dev = queue;
}
@@ -2587,7 +2638,7 @@ mdio_init_failed:
return retval;
}
-static int tsnep_remove(struct platform_device *pdev)
+static void tsnep_remove(struct platform_device *pdev)
{
struct tsnep_adapter *adapter = platform_get_drvdata(pdev);
@@ -2603,8 +2654,6 @@ static int tsnep_remove(struct platform_device *pdev)
mdiobus_unregister(adapter->mdiobus);
tsnep_disable_irq(adapter, ECM_INT_ALL);
-
- return 0;
}
static const struct of_device_id tsnep_of_match[] = {
@@ -2619,7 +2668,7 @@ static struct platform_driver tsnep_driver = {
.of_match_table = tsnep_of_match,
},
.probe = tsnep_probe,
- .remove = tsnep_remove,
+ .remove_new = tsnep_remove,
};
module_platform_driver(tsnep_driver);
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 95cbad198b4b..ad41c9019018 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1254,7 +1254,7 @@ out:
* ethoc_remove - shutdown OpenCores ethernet MAC
* @pdev: platform device
*/
-static int ethoc_remove(struct platform_device *pdev)
+static void ethoc_remove(struct platform_device *pdev)
{
struct net_device *netdev = platform_get_drvdata(pdev);
struct ethoc *priv = netdev_priv(netdev);
@@ -1271,8 +1271,6 @@ static int ethoc_remove(struct platform_device *pdev)
unregister_netdev(netdev);
free_netdev(netdev);
}
-
- return 0;
}
#ifdef CONFIG_PM
@@ -1298,7 +1296,7 @@ MODULE_DEVICE_TABLE(of, ethoc_match);
static struct platform_driver ethoc_driver = {
.probe = ethoc_probe,
- .remove = ethoc_remove,
+ .remove_new = ethoc_remove,
.suspend = ethoc_suspend,
.resume = ethoc_resume,
.driver = {
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index edf000e7bab4..4d7184d46824 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -198,7 +198,7 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
*/
if (nps_enet_is_tx_pending(priv)) {
nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
- napi_reschedule(napi);
+ napi_schedule(napi);
}
}
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 9135b918dd49..fddfd1dd5070 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -2012,7 +2012,7 @@ err_alloc_etherdev:
return err;
}
-static int ftgmac100_remove(struct platform_device *pdev)
+static void ftgmac100_remove(struct platform_device *pdev)
{
struct net_device *netdev;
struct ftgmac100 *priv;
@@ -2040,7 +2040,6 @@ static int ftgmac100_remove(struct platform_device *pdev)
netif_napi_del(&priv->napi);
free_netdev(netdev);
- return 0;
}
static const struct of_device_id ftgmac100_of_match[] = {
@@ -2051,7 +2050,7 @@ MODULE_DEVICE_TABLE(of, ftgmac100_of_match);
static struct platform_driver ftgmac100_driver = {
.probe = ftgmac100_probe,
- .remove = ftgmac100_remove,
+ .remove_new = ftgmac100_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = ftgmac100_of_match,
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 183069581bc0..003bc9a45c65 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1219,7 +1219,7 @@ err_alloc_etherdev:
return err;
}
-static int ftmac100_remove(struct platform_device *pdev)
+static void ftmac100_remove(struct platform_device *pdev)
{
struct net_device *netdev;
struct ftmac100 *priv;
@@ -1234,7 +1234,6 @@ static int ftmac100_remove(struct platform_device *pdev)
netif_napi_del(&priv->napi);
free_netdev(netdev);
- return 0;
}
static const struct of_device_id ftmac100_of_ids[] = {
@@ -1244,7 +1243,7 @@ static const struct of_device_id ftmac100_of_ids[] = {
static struct platform_driver ftmac100_driver = {
.probe = ftmac100_probe,
- .remove = ftmac100_remove,
+ .remove_new = ftmac100_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = ftmac100_of_ids
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 35461165de0d..30bec47bc665 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -1655,7 +1655,7 @@ out:
rx_ring->stats.bytes += rx_byte_cnt;
if (xdp_redirect_frm_cnt)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_tx_frm_cnt)
enetc_update_tx_ring_tail(tx_ring);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 7439739cd81a..a9c2ff22431c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -297,7 +297,7 @@ struct enetc_int_vector {
char name[ENETC_INT_NAME_MAX];
struct enetc_bdr rx_ring;
- struct enetc_bdr tx_ring[];
+ struct enetc_bdr tx_ring[] __counted_by(count_tx_rings);
} ____cacheline_aligned_in_smp;
struct enetc_cls_rule {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 2513b44056c1..b65da49dd926 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -443,7 +443,7 @@ struct enetc_psfp_gate {
u32 num_entries;
refcount_t refcount;
struct hlist_node node;
- struct action_gate_entry entries[];
+ struct action_gate_entry entries[] __counted_by(num_entries);
};
/* Only enable the green color frame now
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 77c8e9cfb445..032c15b541ff 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -52,11 +52,11 @@
#include <linux/clk.h>
#include <linux/crc32.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/mdio.h>
#include <linux/phy.h>
#include <linux/fec.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/regulator/consumer.h>
@@ -187,65 +187,22 @@ static struct platform_device_id fec_devtype[] = {
.name = DRIVER_NAME,
.driver_data = 0,
}, {
- .name = "imx25-fec",
- .driver_data = (kernel_ulong_t)&fec_imx25_info,
- }, {
- .name = "imx27-fec",
- .driver_data = (kernel_ulong_t)&fec_imx27_info,
- }, {
- .name = "imx28-fec",
- .driver_data = (kernel_ulong_t)&fec_imx28_info,
- }, {
- .name = "imx6q-fec",
- .driver_data = (kernel_ulong_t)&fec_imx6q_info,
- }, {
- .name = "mvf600-fec",
- .driver_data = (kernel_ulong_t)&fec_mvf600_info,
- }, {
- .name = "imx6sx-fec",
- .driver_data = (kernel_ulong_t)&fec_imx6x_info,
- }, {
- .name = "imx6ul-fec",
- .driver_data = (kernel_ulong_t)&fec_imx6ul_info,
- }, {
- .name = "imx8mq-fec",
- .driver_data = (kernel_ulong_t)&fec_imx8mq_info,
- }, {
- .name = "imx8qm-fec",
- .driver_data = (kernel_ulong_t)&fec_imx8qm_info,
- }, {
- .name = "s32v234-fec",
- .driver_data = (kernel_ulong_t)&fec_s32v234_info,
- }, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(platform, fec_devtype);
-enum imx_fec_type {
- IMX25_FEC = 1, /* runs on i.mx25/50/53 */
- IMX27_FEC, /* runs on i.mx27/35/51 */
- IMX28_FEC,
- IMX6Q_FEC,
- MVF600_FEC,
- IMX6SX_FEC,
- IMX6UL_FEC,
- IMX8MQ_FEC,
- IMX8QM_FEC,
- S32V234_FEC,
-};
-
static const struct of_device_id fec_dt_ids[] = {
- { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
- { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
- { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
- { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
- { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
- { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
- { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
- { .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], },
- { .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], },
- { .compatible = "fsl,s32v234-fec", .data = &fec_devtype[S32V234_FEC], },
+ { .compatible = "fsl,imx25-fec", .data = &fec_imx25_info, },
+ { .compatible = "fsl,imx27-fec", .data = &fec_imx27_info, },
+ { .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, },
+ { .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, },
+ { .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, },
+ { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6x_info, },
+ { .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, },
+ { .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, },
+ { .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, },
+ { .compatible = "fsl,s32v234-fec", .data = &fec_s32v234_info, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fec_dt_ids);
@@ -1832,7 +1789,7 @@ rx_processing_done:
rxq->bd.cur = bdp;
if (xdp_result & FEC_ENET_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
return pkt_received;
}
@@ -2907,12 +2864,10 @@ static void fec_enet_get_strings(struct net_device *netdev,
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(fec_stats); i++) {
- memcpy(data, fec_stats[i].name, ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
+ ethtool_sprintf(&data, "%s", fec_stats[i].name);
}
for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) {
- strncpy(data, fec_xdp_stat_strs[i], ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
+ ethtool_sprintf(&data, "%s", fec_xdp_stat_strs[i]);
}
page_pool_ethtool_stats_get_strings(data);
@@ -4292,14 +4247,13 @@ fec_probe(struct platform_device *pdev)
phy_interface_t interface;
struct net_device *ndev;
int i, irq, ret = 0;
- const struct of_device_id *of_id;
static int dev_id;
struct device_node *np = pdev->dev.of_node, *phy_node;
int num_tx_qs;
int num_rx_qs;
char irq_name[8];
int irq_cnt;
- struct fec_devinfo *dev_info;
+ const struct fec_devinfo *dev_info;
fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
@@ -4314,10 +4268,9 @@ fec_probe(struct platform_device *pdev)
/* setup board info structure */
fep = netdev_priv(ndev);
- of_id = of_match_device(fec_dt_ids, &pdev->dev);
- if (of_id)
- pdev->id_entry = of_id->data;
- dev_info = (struct fec_devinfo *)pdev->id_entry->driver_data;
+ dev_info = device_get_match_data(&pdev->dev);
+ if (!dev_info)
+ dev_info = (const struct fec_devinfo *)pdev->id_entry->driver_data;
if (dev_info)
fep->quirks = dev_info->quirks;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 3b75cc543be9..9ba15d3183d7 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -618,18 +618,17 @@ static int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
return 0;
}
-static void memac_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
+static unsigned long memac_get_caps(struct phylink_config *config,
+ phy_interface_t interface)
{
struct fman_mac *memac = fman_config_to_mac(config)->fman_mac;
unsigned long caps = config->mac_capabilities;
- if (phy_interface_mode_is_rgmii(state->interface) &&
+ if (phy_interface_mode_is_rgmii(interface) &&
memac->rgmii_no_half_duplex)
caps &= ~(MAC_10HD | MAC_100HD);
- phylink_validate_mask_caps(supported, state, caps);
+ return caps;
}
/**
@@ -776,7 +775,7 @@ static void memac_link_down(struct phylink_config *config, unsigned int mode,
}
static const struct phylink_mac_ops memac_mac_ops = {
- .validate = memac_validate,
+ .mac_get_caps = memac_get_caps,
.mac_select_pcs = memac_select_pcs,
.mac_prepare = memac_prepare,
.mac_config = memac_mac_config,
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index a6dfc8807d3d..cf392faa6105 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -35,10 +35,9 @@
#include <linux/fs.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
+#include <linux/property.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
-#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include <linux/pgtable.h>
@@ -884,9 +883,9 @@ static const struct ethtool_ops fs_ethtool_ops = {
/**************************************************************************************/
#ifdef CONFIG_FS_ENET_HAS_FEC
-#define IS_FEC(match) ((match)->data == &fs_fec_ops)
+#define IS_FEC(ops) ((ops) == &fs_fec_ops)
#else
-#define IS_FEC(match) 0
+#define IS_FEC(ops) 0
#endif
static const struct net_device_ops fs_enet_netdev_ops = {
@@ -903,10 +902,9 @@ static const struct net_device_ops fs_enet_netdev_ops = {
#endif
};
-static const struct of_device_id fs_enet_match[];
static int fs_enet_probe(struct platform_device *ofdev)
{
- const struct of_device_id *match;
+ const struct fs_ops *ops;
struct net_device *ndev;
struct fs_enet_private *fep;
struct fs_platform_info *fpi;
@@ -916,15 +914,15 @@ static int fs_enet_probe(struct platform_device *ofdev)
const char *phy_connection_type;
int privsize, len, ret = -ENODEV;
- match = of_match_device(fs_enet_match, &ofdev->dev);
- if (!match)
+ ops = device_get_match_data(&ofdev->dev);
+ if (!ops)
return -EINVAL;
fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
if (!fpi)
return -ENOMEM;
- if (!IS_FEC(match)) {
+ if (!IS_FEC(ops)) {
data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
if (!data || len != 4)
goto out_free_fpi;
@@ -986,7 +984,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
fep->dev = &ofdev->dev;
fep->ndev = ndev;
fep->fpi = fpi;
- fep->ops = match->data;
+ fep->ops = ops;
ret = fep->ops->setup_data(ndev);
if (ret)
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index a1e777a4b75f..7bb69727952a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -30,9 +30,10 @@
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
-#include <linux/of_platform.h>
#include <linux/pgtable.h>
#include <asm/irq.h>
@@ -96,20 +97,15 @@ static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location,
}
-static const struct of_device_id fs_enet_mdio_fec_match[];
static int fs_enet_mdio_probe(struct platform_device *ofdev)
{
- const struct of_device_id *match;
struct resource res;
struct mii_bus *new_bus;
struct fec_info *fec;
int (*get_bus_freq)(struct device *);
int ret = -ENOMEM, clock, speed;
- match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev);
- if (!match)
- return -EINVAL;
- get_bus_freq = match->data;
+ get_bus_freq = device_get_match_data(&ofdev->dev);
new_bus = mdiobus_alloc();
if (!new_bus)
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index eee675a25b2c..70dd982a5edc 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -19,9 +19,10 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/mii.h>
+#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <asm/io.h>
#if IS_ENABLED(CONFIG_UCC_GETH)
@@ -407,8 +408,6 @@ static void set_tbipa(const u32 tbipa_val, struct platform_device *pdev,
static int fsl_pq_mdio_probe(struct platform_device *pdev)
{
- const struct of_device_id *id =
- of_match_device(fsl_pq_mdio_match, &pdev->dev);
const struct fsl_pq_mdio_data *data;
struct device_node *np = pdev->dev.of_node;
struct resource res;
@@ -417,15 +416,12 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
struct mii_bus *new_bus;
int err;
- if (!id) {
+ data = device_get_match_data(&pdev->dev);
+ if (!data) {
dev_err(&pdev->dev, "Failed to match device\n");
return -ENODEV;
}
- data = id->data;
-
- dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);
-
new_bus = mdiobus_alloc_size(sizeof(*priv));
if (!new_bus)
return -ENOMEM;
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 5704b5f57cd0..276f996f95dc 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -190,7 +190,7 @@ static int gve_alloc_stats_report(struct gve_priv *priv)
rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
priv->rx_cfg.num_queues;
priv->stats_report_len = struct_size(priv->stats_report, stats,
- tx_stats_num + rx_stats_num);
+ size_add(tx_stats_num, rx_stats_num));
priv->stats_report =
dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
&priv->stats_report_bus, GFP_KERNEL);
@@ -281,7 +281,7 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
if (block->rx)
reschedule |= gve_rx_work_pending(block->rx);
- if (reschedule && napi_reschedule(napi))
+ if (reschedule && napi_schedule(napi))
iowrite32be(GVE_IRQ_MASK, irq_doorbell);
}
return work_done;
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index ecf92a5d56bb..b91e7a06b97f 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -1021,7 +1021,7 @@ init_fail:
return ret;
}
-static int hip04_remove(struct platform_device *pdev)
+static void hip04_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct hip04_priv *priv = netdev_priv(ndev);
@@ -1035,8 +1035,6 @@ static int hip04_remove(struct platform_device *pdev)
of_node_put(priv->phy_node);
cancel_work_sync(&priv->tx_timeout_task);
free_netdev(ndev);
-
- return 0;
}
static const struct of_device_id hip04_mac_match[] = {
@@ -1048,7 +1046,7 @@ MODULE_DEVICE_TABLE(of, hip04_mac_match);
static struct platform_driver hip04_mac_driver = {
.probe = hip04_mac_probe,
- .remove = hip04_remove,
+ .remove_new = hip04_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = hip04_mac_match,
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index cb7b0293fe85..2406263c9dd3 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -893,7 +893,7 @@ out_free_netdev:
return ret;
}
-static int hisi_femac_drv_remove(struct platform_device *pdev)
+static void hisi_femac_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct hisi_femac_priv *priv = netdev_priv(ndev);
@@ -904,8 +904,6 @@ static int hisi_femac_drv_remove(struct platform_device *pdev)
phy_disconnect(ndev->phydev);
clk_disable_unprepare(priv->clk);
free_netdev(ndev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -961,7 +959,7 @@ static struct platform_driver hisi_femac_driver = {
.of_match_table = hisi_femac_match,
},
.probe = hisi_femac_drv_probe,
- .remove = hisi_femac_drv_remove,
+ .remove_new = hisi_femac_drv_remove,
#ifdef CONFIG_PM
.suspend = hisi_femac_drv_suspend,
.resume = hisi_femac_drv_resume,
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 26d22bb04b87..1a972b093a42 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -7,7 +7,8 @@
#include <linux/interrupt.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
+#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
#include <linux/reset.h>
@@ -1094,7 +1095,6 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
- const struct of_device_id *of_id = NULL;
struct net_device *ndev;
struct hix5hd2_priv *priv;
struct mii_bus *bus;
@@ -1110,12 +1110,7 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
priv->dev = dev;
priv->netdev = ndev;
- of_id = of_match_device(hix5hd2_of_match, dev);
- if (!of_id) {
- ret = -EINVAL;
- goto out_free_netdev;
- }
- priv->hw_cap = (unsigned long)of_id->data;
+ priv->hw_cap = (unsigned long)device_get_match_data(dev);
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base)) {
@@ -1282,7 +1277,7 @@ out_free_netdev:
return ret;
}
-static int hix5hd2_dev_remove(struct platform_device *pdev)
+static void hix5hd2_dev_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct hix5hd2_priv *priv = netdev_priv(ndev);
@@ -1298,8 +1293,6 @@ static int hix5hd2_dev_remove(struct platform_device *pdev)
of_node_put(priv->phy_node);
cancel_work_sync(&priv->tx_timeout_task);
free_netdev(ndev);
-
- return 0;
}
static const struct of_device_id hix5hd2_of_match[] = {
@@ -1319,7 +1312,7 @@ static struct platform_driver hix5hd2_dev_driver = {
.of_match_table = hix5hd2_of_match,
},
.probe = hix5hd2_dev_probe,
- .remove = hix5hd2_dev_remove,
+ .remove_new = hix5hd2_dev_remove,
};
module_platform_driver(hix5hd2_dev_driver);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index fcaf5132b865..1b67da1f6fa8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -3007,7 +3007,7 @@ free_dev:
* hns_dsaf_remove - remove dsaf dev
* @pdev: dasf platform device
*/
-static int hns_dsaf_remove(struct platform_device *pdev)
+static void hns_dsaf_remove(struct platform_device *pdev)
{
struct dsaf_device *dsaf_dev = dev_get_drvdata(&pdev->dev);
@@ -3020,8 +3020,6 @@ static int hns_dsaf_remove(struct platform_device *pdev)
hns_dsaf_free(dsaf_dev);
hns_dsaf_free_dev(dsaf_dev);
-
- return 0;
}
static const struct of_device_id g_dsaf_match[] = {
@@ -3033,7 +3031,7 @@ MODULE_DEVICE_TABLE(of, g_dsaf_match);
static struct platform_driver g_dsaf_driver = {
.probe = hns_dsaf_probe,
- .remove = hns_dsaf_remove,
+ .remove_new = hns_dsaf_remove,
.driver = {
.name = DSAF_DRV_NAME,
.of_match_table = g_dsaf_match,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index 0f0e16f9afc0..7e00231c1acf 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -92,7 +92,7 @@ struct ppe_common_cb {
u8 comm_index; /*ppe_common index*/
u32 ppe_num;
- struct hns_ppe_cb ppe_cb[];
+ struct hns_ppe_cb ppe_cb[] __counted_by(ppe_num);
};
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index a9f805925699..c1e9b6997853 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -108,7 +108,7 @@ struct rcb_common_cb {
u32 ring_num;
u32 desc_num; /* desc num per queue*/
- struct ring_pair_cb ring_pair_cb[];
+ struct ring_pair_cb ring_pair_cb[] __counted_by(ring_num);
};
int hns_rcb_buf_size2type(u32 buf_size);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 7cf10d1e2b31..0900abf5c508 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -2384,7 +2384,7 @@ out_read_prop_fail:
return ret;
}
-static int hns_nic_dev_remove(struct platform_device *pdev)
+static void hns_nic_dev_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct hns_nic_priv *priv = netdev_priv(ndev);
@@ -2413,7 +2413,6 @@ static int hns_nic_dev_remove(struct platform_device *pdev)
of_node_put(to_of_node(priv->fwnode));
free_netdev(ndev);
- return 0;
}
static const struct of_device_id hns_enet_of_match[] = {
@@ -2431,7 +2430,7 @@ static struct platform_driver hns_nic_dev_driver = {
.acpi_match_table = ACPI_PTR(hns_enet_acpi_match),
},
.probe = hns_nic_dev_probe,
- .remove = hns_nic_dev_remove,
+ .remove_new = hns_nic_dev_remove,
};
module_platform_driver(hns_nic_dev_driver);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index aaf1f42624a7..d7e175a9cb49 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -103,6 +103,7 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_LANE_NUM_B,
HNAE3_DEV_SUPPORT_WOL_B,
HNAE3_DEV_SUPPORT_TM_FLUSH_B,
+ HNAE3_DEV_SUPPORT_VF_FAULT_B,
};
#define hnae3_ae_dev_fd_supported(ae_dev) \
@@ -177,6 +178,9 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_tm_flush_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_TM_FLUSH_B, (hdev)->ae_dev->caps)
+#define hnae3_ae_dev_vf_fault_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_VF_FAULT_B, (ae_dev)->caps)
+
enum HNAE3_PF_CAP_BITS {
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
};
@@ -271,6 +275,7 @@ enum hnae3_reset_type {
HNAE3_GLOBAL_RESET,
HNAE3_IMP_RESET,
HNAE3_NONE_RESET,
+ HNAE3_VF_EXP_RESET,
HNAE3_MAX_RESET,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
index dcecb23daac6..d92ad6082d8e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -157,6 +157,7 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
{HCLGE_COMM_CAP_LANE_NUM_B, HNAE3_DEV_SUPPORT_LANE_NUM_B},
{HCLGE_COMM_CAP_WOL_B, HNAE3_DEV_SUPPORT_WOL_B},
{HCLGE_COMM_CAP_TM_FLUSH_B, HNAE3_DEV_SUPPORT_TM_FLUSH_B},
+ {HCLGE_COMM_CAP_VF_FAULT_B, HNAE3_DEV_SUPPORT_VF_FAULT_B},
};
static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
index 2b7197ce0ae8..533c19d25e4f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
@@ -93,6 +93,7 @@ enum hclge_opcode_type {
HCLGE_OPC_DFX_SSU_REG_2 = 0x004F,
HCLGE_OPC_QUERY_DEV_SPECS = 0x0050,
+ HCLGE_OPC_GET_QUEUE_ERR_VF = 0x0067,
/* MAC command */
HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
@@ -348,6 +349,7 @@ enum HCLGE_COMM_CAP_BITS {
HCLGE_COMM_CAP_GRO_B = 20,
HCLGE_COMM_CAP_FD_B = 21,
HCLGE_COMM_CAP_FEC_STATS_B = 25,
+ HCLGE_COMM_CAP_VF_FAULT_B = 26,
HCLGE_COMM_CAP_LANE_NUM_B = 27,
HCLGE_COMM_CAP_WOL_B = 28,
HCLGE_COMM_CAP_TM_FLUSH_B = 31,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index b8508533878b..0b138635bafa 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -414,6 +414,9 @@ static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
}, {
.name = "support tm flush",
.cap_bit = HNAE3_DEV_SUPPORT_TM_FLUSH_B,
+ }, {
+ .name = "support vf fault detect",
+ .cap_bit = HNAE3_DEV_SUPPORT_VF_FAULT_B,
}
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index cf50368441b7..06117502001f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -4940,8 +4940,7 @@ static void hns3_put_ring_config(struct hns3_nic_priv *priv)
static void hns3_alloc_page_pool(struct hns3_enet_ring *ring)
{
struct page_pool_params pp_params = {
- .flags = PP_FLAG_DMA_MAP | PP_FLAG_PAGE_FRAG |
- PP_FLAG_DMA_SYNC_DEV,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.order = hns3_page_order(ring),
.pool_size = ring->desc_num * hns3_buf_size(ring) /
(PAGE_SIZE << hns3_page_order(ring)),
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 3f35227ef1fa..d63e114f93d0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1301,10 +1301,12 @@ static const struct hclge_hw_type_id hclge_hw_type_id_st[] = {
.msg = "tqp_int_ecc_error"
}, {
.type_id = PF_ABNORMAL_INT_ERROR,
- .msg = "pf_abnormal_int_error"
+ .msg = "pf_abnormal_int_error",
+ .cause_by_vf = true
}, {
.type_id = MPF_ABNORMAL_INT_ERROR,
- .msg = "mpf_abnormal_int_error"
+ .msg = "mpf_abnormal_int_error",
+ .cause_by_vf = true
}, {
.type_id = COMMON_ERROR,
.msg = "common_error"
@@ -2759,7 +2761,7 @@ void hclge_handle_occurred_error(struct hclge_dev *hdev)
hclge_handle_error_info_log(ae_dev);
}
-static void
+static bool
hclge_handle_error_type_reg_log(struct device *dev,
struct hclge_mod_err_info *mod_info,
struct hclge_type_reg_err_info *type_reg_info)
@@ -2770,6 +2772,7 @@ hclge_handle_error_type_reg_log(struct device *dev,
u8 mod_id, total_module, type_id, total_type, i, is_ras;
u8 index_module = MODULE_NONE;
u8 index_type = NONE_ERROR;
+ bool cause_by_vf = false;
mod_id = mod_info->mod_id;
type_id = type_reg_info->type_id & HCLGE_ERR_TYPE_MASK;
@@ -2788,6 +2791,7 @@ hclge_handle_error_type_reg_log(struct device *dev,
for (i = 0; i < total_type; i++) {
if (type_id == hclge_hw_type_id_st[i].type_id) {
index_type = i;
+ cause_by_vf = hclge_hw_type_id_st[i].cause_by_vf;
break;
}
}
@@ -2805,6 +2809,8 @@ hclge_handle_error_type_reg_log(struct device *dev,
dev_err(dev, "reg_value:\n");
for (i = 0; i < type_reg_info->reg_num; i++)
dev_err(dev, "0x%08x\n", type_reg_info->hclge_reg[i]);
+
+ return cause_by_vf;
}
static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev,
@@ -2815,6 +2821,7 @@ static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev,
struct device *dev = &hdev->pdev->dev;
struct hclge_mod_err_info *mod_info;
struct hclge_sum_err_info *sum_info;
+ bool cause_by_vf = false;
u8 mod_num, err_num, i;
u32 offset = 0;
@@ -2843,12 +2850,16 @@ static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev,
type_reg_info = (struct hclge_type_reg_err_info *)
&buf[offset++];
- hclge_handle_error_type_reg_log(dev, mod_info,
- type_reg_info);
+ if (hclge_handle_error_type_reg_log(dev, mod_info,
+ type_reg_info))
+ cause_by_vf = true;
offset += type_reg_info->reg_num;
}
}
+
+ if (hnae3_ae_dev_vf_fault_supported(hdev->ae_dev) && cause_by_vf)
+ set_bit(HNAE3_VF_EXP_RESET, &ae_dev->hw_err_reset_req);
}
static int hclge_query_all_err_bd_num(struct hclge_dev *hdev, u32 *bd_num)
@@ -2940,3 +2951,98 @@ err_desc:
out:
return ret;
}
+
+static bool hclge_reset_vf_in_bitmap(struct hclge_dev *hdev,
+ unsigned long *bitmap)
+{
+ struct hclge_vport *vport;
+ bool exist_set = false;
+ int func_id;
+ int ret;
+
+ func_id = find_first_bit(bitmap, HCLGE_VPORT_NUM);
+ if (func_id == PF_VPORT_ID)
+ return false;
+
+ while (func_id != HCLGE_VPORT_NUM) {
+ vport = hclge_get_vf_vport(hdev,
+ func_id - HCLGE_VF_VPORT_START_NUM);
+ if (!vport) {
+ dev_err(&hdev->pdev->dev, "invalid func id(%d)\n",
+ func_id);
+ return false;
+ }
+
+ dev_info(&hdev->pdev->dev, "do function %d recovery.", func_id);
+
+ ret = hclge_reset_tqp(&vport->nic);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to reset tqp, ret = %d.", ret);
+ return false;
+ }
+
+ ret = hclge_inform_vf_reset(vport, HNAE3_VF_FUNC_RESET);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to reset func %d, ret = %d.",
+ func_id, ret);
+ return false;
+ }
+
+ exist_set = true;
+ clear_bit(func_id, bitmap);
+ func_id = find_first_bit(bitmap, HCLGE_VPORT_NUM);
+ }
+
+ return exist_set;
+}
+
+static void hclge_get_vf_fault_bitmap(struct hclge_desc *desc,
+ unsigned long *bitmap)
+{
+#define HCLGE_FIR_FAULT_BYTES 24
+#define HCLGE_SEC_FAULT_BYTES 8
+
+ u8 *buff;
+
+ BUILD_BUG_ON(HCLGE_FIR_FAULT_BYTES + HCLGE_SEC_FAULT_BYTES !=
+ BITS_TO_BYTES(HCLGE_VPORT_NUM));
+
+ memcpy(bitmap, desc[0].data, HCLGE_FIR_FAULT_BYTES);
+ buff = (u8 *)bitmap + HCLGE_FIR_FAULT_BYTES;
+ memcpy(buff, desc[1].data, HCLGE_SEC_FAULT_BYTES);
+}
+
+int hclge_handle_vf_queue_err_ras(struct hclge_dev *hdev)
+{
+ unsigned long vf_fault_bitmap[BITS_TO_LONGS(HCLGE_VPORT_NUM)];
+ struct hclge_desc desc[2];
+ bool cause_by_vf = false;
+ int ret;
+
+ if (!test_and_clear_bit(HNAE3_VF_EXP_RESET,
+ &hdev->ae_dev->hw_err_reset_req) ||
+ !hnae3_ae_dev_vf_fault_supported(hdev->ae_dev))
+ return 0;
+
+ hclge_comm_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_GET_QUEUE_ERR_VF,
+ true);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_comm_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_GET_QUEUE_ERR_VF,
+ true);
+
+ ret = hclge_comm_cmd_send(&hdev->hw.hw, desc, 2);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get vf bitmap, ret = %d.\n", ret);
+ return ret;
+ }
+ hclge_get_vf_fault_bitmap(desc, vf_fault_bitmap);
+
+ cause_by_vf = hclge_reset_vf_in_bitmap(hdev, vf_fault_bitmap);
+ if (cause_by_vf)
+ hdev->ae_dev->hw_err_reset_req = 0;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index 86be6fb32990..68b738affa66 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -196,6 +196,7 @@ struct hclge_hw_module_id {
struct hclge_hw_type_id {
enum hclge_err_type_list type_id;
const char *msg;
+ bool cause_by_vf; /* indicate the error may from vf exception */
};
struct hclge_sum_err_info {
@@ -228,4 +229,5 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
unsigned long *reset_requests);
int hclge_handle_error_info_log(struct hnae3_ae_dev *ae_dev);
int hclge_handle_mac_tnl(struct hclge_dev *hdev);
+int hclge_handle_vf_queue_err_ras(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index c42574e29747..66e5807903a0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -881,8 +881,8 @@ static const struct hclge_speed_bit_map speed_bit_map[] = {
{HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
{HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
{HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
- {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
- {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
+ {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BITS},
+ {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BITS},
{HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
};
@@ -939,100 +939,98 @@ static void hclge_update_fec_support(struct hclge_mac *mac)
mac->supported);
}
+static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[8] = {
+ {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT},
+ {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT},
+ {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT},
+ {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT},
+ {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT},
+ {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT},
+ {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT},
+ {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT},
+};
+
+static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[6] = {
+ {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT},
+ {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT},
+ {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT},
+ {HCLGE_SUPPORT_100G_R4_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT},
+ {HCLGE_SUPPORT_100G_R2_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT},
+ {HCLGE_SUPPORT_200G_BIT,
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT},
+};
+
+static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[8] = {
+ {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT},
+ {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT},
+ {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT},
+ {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT},
+ {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT},
+ {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT},
+ {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT},
+ {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT},
+};
+
+static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[9] = {
+ {HCLGE_SUPPORT_1G_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT},
+ {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
+ {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT},
+ {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT},
+ {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT},
+ {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT},
+ {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT},
+ {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT},
+ {HCLGE_SUPPORT_200G_BIT, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT},
+};
+
static void hclge_convert_setting_sr(u16 speed_ability,
unsigned long *link_mode)
{
- if (speed_ability & HCLGE_SUPPORT_10G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_25G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_40G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_50G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_100G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_200G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
- link_mode);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_sr_link_mode_bmap); i++) {
+ if (speed_ability & hclge_sr_link_mode_bmap[i].support_bit)
+ linkmode_set_bit(hclge_sr_link_mode_bmap[i].link_mode,
+ link_mode);
+ }
}
static void hclge_convert_setting_lr(u16 speed_ability,
unsigned long *link_mode)
{
- if (speed_ability & HCLGE_SUPPORT_10G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_25G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_50G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_40G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_100G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_200G_BIT)
- linkmode_set_bit(
- ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
- link_mode);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_lr_link_mode_bmap); i++) {
+ if (speed_ability & hclge_lr_link_mode_bmap[i].support_bit)
+ linkmode_set_bit(hclge_lr_link_mode_bmap[i].link_mode,
+ link_mode);
+ }
}
static void hclge_convert_setting_cr(u16 speed_ability,
unsigned long *link_mode)
{
- if (speed_ability & HCLGE_SUPPORT_10G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_25G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_40G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_50G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_100G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_200G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
- link_mode);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_cr_link_mode_bmap); i++) {
+ if (speed_ability & hclge_cr_link_mode_bmap[i].support_bit)
+ linkmode_set_bit(hclge_cr_link_mode_bmap[i].link_mode,
+ link_mode);
+ }
}
static void hclge_convert_setting_kr(u16 speed_ability,
unsigned long *link_mode)
{
- if (speed_ability & HCLGE_SUPPORT_1G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_10G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_25G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_40G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_50G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_100G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
- link_mode);
- if (speed_ability & HCLGE_SUPPORT_200G_BIT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
- link_mode);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_kr_link_mode_bmap); i++) {
+ if (speed_ability & hclge_kr_link_mode_bmap[i].support_bit)
+ linkmode_set_bit(hclge_kr_link_mode_bmap[i].link_mode,
+ link_mode);
+ }
}
static void hclge_convert_setting_fec(struct hclge_mac *mac)
@@ -1158,10 +1156,10 @@ static u32 hclge_get_max_speed(u16 speed_ability)
if (speed_ability & HCLGE_SUPPORT_200G_BIT)
return HCLGE_MAC_SPEED_200G;
- if (speed_ability & HCLGE_SUPPORT_100G_BIT)
+ if (speed_ability & HCLGE_SUPPORT_100G_BITS)
return HCLGE_MAC_SPEED_100G;
- if (speed_ability & HCLGE_SUPPORT_50G_BIT)
+ if (speed_ability & HCLGE_SUPPORT_50G_BITS)
return HCLGE_MAC_SPEED_50G;
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
@@ -3424,7 +3422,7 @@ static int hclge_get_status(struct hnae3_handle *handle)
return hdev->hw.mac.link;
}
-static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
+struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
{
if (!pci_num_vf(hdev->pdev)) {
dev_err(&hdev->pdev->dev,
@@ -4468,6 +4466,7 @@ static void hclge_handle_err_recovery(struct hclge_dev *hdev)
if (hclge_find_error_source(hdev)) {
hclge_handle_error_info_log(ae_dev);
hclge_handle_mac_tnl(hdev);
+ hclge_handle_vf_queue_err_ras(hdev);
}
hclge_handle_err_reset_request(hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 7bc2049b723d..51979cf71262 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -185,15 +185,22 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_SUPPORT_1G_BIT BIT(0)
#define HCLGE_SUPPORT_10G_BIT BIT(1)
#define HCLGE_SUPPORT_25G_BIT BIT(2)
-#define HCLGE_SUPPORT_50G_BIT BIT(3)
-#define HCLGE_SUPPORT_100G_BIT BIT(4)
+#define HCLGE_SUPPORT_50G_R2_BIT BIT(3)
+#define HCLGE_SUPPORT_100G_R4_BIT BIT(4)
/* to be compatible with exsit board */
#define HCLGE_SUPPORT_40G_BIT BIT(5)
#define HCLGE_SUPPORT_100M_BIT BIT(6)
#define HCLGE_SUPPORT_10M_BIT BIT(7)
#define HCLGE_SUPPORT_200G_BIT BIT(8)
+#define HCLGE_SUPPORT_50G_R1_BIT BIT(9)
+#define HCLGE_SUPPORT_100G_R2_BIT BIT(10)
+
#define HCLGE_SUPPORT_GE \
(HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT)
+#define HCLGE_SUPPORT_50G_BITS \
+ (HCLGE_SUPPORT_50G_R2_BIT | HCLGE_SUPPORT_50G_R1_BIT)
+#define HCLGE_SUPPORT_100G_BITS \
+ (HCLGE_SUPPORT_100G_R4_BIT | HCLGE_SUPPORT_100G_R2_BIT)
enum HCLGE_DEV_STATE {
HCLGE_STATE_REINITING,
@@ -1076,6 +1083,11 @@ struct hclge_mac_speed_map {
u32 speed_fw; /* speed defined in firmware */
};
+struct hclge_link_mode_bmap {
+ u16 support_bit;
+ enum ethtool_link_mode_bit_indices link_mode;
+};
+
int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
bool en_mc_pmc, bool en_bc_pmc);
int hclge_add_uc_addr_common(struct hclge_vport *vport,
@@ -1146,4 +1158,6 @@ int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len);
int hclge_push_vf_link_status(struct hclge_vport *vport);
int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en);
int hclge_mac_update_stats(struct hclge_dev *hdev);
+struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf);
+int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 04ff9bf12185..4b0d07ca2505 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -124,7 +124,7 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
return status;
}
-static int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type)
+int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type)
{
__le16 msg_data;
u8 dest_vfid;
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 409a89d80220..ed73707176c1 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -610,7 +610,7 @@ static int hns_mdio_probe(struct platform_device *pdev)
*
* Return 0 on success, negative on failure
*/
-static int hns_mdio_remove(struct platform_device *pdev)
+static void hns_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus;
@@ -618,7 +618,6 @@ static int hns_mdio_remove(struct platform_device *pdev)
mdiobus_unregister(bus);
platform_set_drvdata(pdev, NULL);
- return 0;
}
static const struct of_device_id hns_mdio_match[] = {
@@ -636,7 +635,7 @@ MODULE_DEVICE_TABLE(acpi, hns_mdio_acpi_match);
static struct platform_driver hns_mdio_driver = {
.probe = hns_mdio_probe,
- .remove = hns_mdio_remove,
+ .remove_new = hns_mdio_remove,
.driver = {
.name = MDIO_DRV_NAME,
.of_match_table = hns_mdio_match,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
index 1749d26f4bef..03e42512a2d5 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
@@ -315,136 +315,76 @@ void hinic_devlink_unregister(struct hinic_devlink_priv *priv)
devlink_unregister(devlink);
}
-static int chip_fault_show(struct devlink_fmsg *fmsg,
- struct hinic_fault_event *event)
+static void chip_fault_show(struct devlink_fmsg *fmsg,
+ struct hinic_fault_event *event)
{
const char * const level_str[FAULT_LEVEL_MAX + 1] = {
"fatal", "reset", "flr", "general", "suggestion", "Unknown"};
u8 fault_level;
- int err;
fault_level = (event->event.chip.err_level < FAULT_LEVEL_MAX) ?
event->event.chip.err_level : FAULT_LEVEL_MAX;
- if (fault_level == FAULT_LEVEL_SERIOUS_FLR) {
- err = devlink_fmsg_u32_pair_put(fmsg, "Function level err func_id",
- (u32)event->event.chip.func_id);
- if (err)
- return err;
- }
-
- err = devlink_fmsg_u8_pair_put(fmsg, "module_id", event->event.chip.node_id);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "err_type", (u32)event->event.chip.err_type);
- if (err)
- return err;
-
- err = devlink_fmsg_string_pair_put(fmsg, "err_level", level_str[fault_level]);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_addr",
- event->event.chip.err_csr_addr);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_value",
- event->event.chip.err_csr_value);
- if (err)
- return err;
-
- return 0;
+ if (fault_level == FAULT_LEVEL_SERIOUS_FLR)
+ devlink_fmsg_u32_pair_put(fmsg, "Function level err func_id",
+ (u32)event->event.chip.func_id);
+ devlink_fmsg_u8_pair_put(fmsg, "module_id", event->event.chip.node_id);
+ devlink_fmsg_u32_pair_put(fmsg, "err_type", (u32)event->event.chip.err_type);
+ devlink_fmsg_string_pair_put(fmsg, "err_level", level_str[fault_level]);
+ devlink_fmsg_u32_pair_put(fmsg, "err_csr_addr",
+ event->event.chip.err_csr_addr);
+ devlink_fmsg_u32_pair_put(fmsg, "err_csr_value",
+ event->event.chip.err_csr_value);
}
-static int fault_report_show(struct devlink_fmsg *fmsg,
- struct hinic_fault_event *event)
+static void fault_report_show(struct devlink_fmsg *fmsg,
+ struct hinic_fault_event *event)
{
const char * const type_str[FAULT_TYPE_MAX + 1] = {
"chip", "ucode", "mem rd timeout", "mem wr timeout",
"reg rd timeout", "reg wr timeout", "phy fault", "Unknown"};
u8 fault_type;
- int err;
fault_type = (event->type < FAULT_TYPE_MAX) ? event->type : FAULT_TYPE_MAX;
- err = devlink_fmsg_string_pair_put(fmsg, "Fault type", type_str[fault_type]);
- if (err)
- return err;
-
- err = devlink_fmsg_binary_pair_put(fmsg, "Fault raw data",
- event->event.val, sizeof(event->event.val));
- if (err)
- return err;
+ devlink_fmsg_string_pair_put(fmsg, "Fault type", type_str[fault_type]);
+ devlink_fmsg_binary_pair_put(fmsg, "Fault raw data", event->event.val,
+ sizeof(event->event.val));
switch (event->type) {
case FAULT_TYPE_CHIP:
- err = chip_fault_show(fmsg, event);
- if (err)
- return err;
+ chip_fault_show(fmsg, event);
break;
case FAULT_TYPE_UCODE:
- err = devlink_fmsg_u8_pair_put(fmsg, "Cause_id", event->event.ucode.cause_id);
- if (err)
- return err;
- err = devlink_fmsg_u8_pair_put(fmsg, "core_id", event->event.ucode.core_id);
- if (err)
- return err;
- err = devlink_fmsg_u8_pair_put(fmsg, "c_id", event->event.ucode.c_id);
- if (err)
- return err;
- err = devlink_fmsg_u8_pair_put(fmsg, "epc", event->event.ucode.epc);
- if (err)
- return err;
+ devlink_fmsg_u8_pair_put(fmsg, "Cause_id", event->event.ucode.cause_id);
+ devlink_fmsg_u8_pair_put(fmsg, "core_id", event->event.ucode.core_id);
+ devlink_fmsg_u8_pair_put(fmsg, "c_id", event->event.ucode.c_id);
+ devlink_fmsg_u8_pair_put(fmsg, "epc", event->event.ucode.epc);
break;
case FAULT_TYPE_MEM_RD_TIMEOUT:
case FAULT_TYPE_MEM_WR_TIMEOUT:
- err = devlink_fmsg_u32_pair_put(fmsg, "Err_csr_ctrl",
- event->event.mem_timeout.err_csr_ctrl);
- if (err)
- return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "err_csr_data",
- event->event.mem_timeout.err_csr_data);
- if (err)
- return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "ctrl_tab",
- event->event.mem_timeout.ctrl_tab);
- if (err)
- return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "mem_index",
- event->event.mem_timeout.mem_index);
- if (err)
- return err;
+ devlink_fmsg_u32_pair_put(fmsg, "Err_csr_ctrl",
+ event->event.mem_timeout.err_csr_ctrl);
+ devlink_fmsg_u32_pair_put(fmsg, "err_csr_data",
+ event->event.mem_timeout.err_csr_data);
+ devlink_fmsg_u32_pair_put(fmsg, "ctrl_tab",
+ event->event.mem_timeout.ctrl_tab);
+ devlink_fmsg_u32_pair_put(fmsg, "mem_index",
+ event->event.mem_timeout.mem_index);
break;
case FAULT_TYPE_REG_RD_TIMEOUT:
case FAULT_TYPE_REG_WR_TIMEOUT:
- err = devlink_fmsg_u32_pair_put(fmsg, "Err_csr", event->event.reg_timeout.err_csr);
- if (err)
- return err;
+ devlink_fmsg_u32_pair_put(fmsg, "Err_csr", event->event.reg_timeout.err_csr);
break;
case FAULT_TYPE_PHY_FAULT:
- err = devlink_fmsg_u8_pair_put(fmsg, "Op_type", event->event.phy_fault.op_type);
- if (err)
- return err;
- err = devlink_fmsg_u8_pair_put(fmsg, "port_id", event->event.phy_fault.port_id);
- if (err)
- return err;
- err = devlink_fmsg_u8_pair_put(fmsg, "dev_ad", event->event.phy_fault.dev_ad);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "csr_addr", event->event.phy_fault.csr_addr);
- if (err)
- return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "op_data", event->event.phy_fault.op_data);
- if (err)
- return err;
+ devlink_fmsg_u8_pair_put(fmsg, "Op_type", event->event.phy_fault.op_type);
+ devlink_fmsg_u8_pair_put(fmsg, "port_id", event->event.phy_fault.port_id);
+ devlink_fmsg_u8_pair_put(fmsg, "dev_ad", event->event.phy_fault.dev_ad);
+ devlink_fmsg_u32_pair_put(fmsg, "csr_addr", event->event.phy_fault.csr_addr);
+ devlink_fmsg_u32_pair_put(fmsg, "op_data", event->event.phy_fault.op_data);
break;
default:
break;
}
-
- return 0;
}
static int hinic_hw_reporter_dump(struct devlink_health_reporter *reporter,
@@ -452,75 +392,30 @@ static int hinic_hw_reporter_dump(struct devlink_health_reporter *reporter,
struct netlink_ext_ack *extack)
{
if (priv_ctx)
- return fault_report_show(fmsg, priv_ctx);
+ fault_report_show(fmsg, priv_ctx);
return 0;
}
-static int mgmt_watchdog_report_show(struct devlink_fmsg *fmsg,
- struct hinic_mgmt_watchdog_info *watchdog_info)
+static void mgmt_watchdog_report_show(struct devlink_fmsg *fmsg,
+ struct hinic_mgmt_watchdog_info *winfo)
{
- int err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "Mgmt deadloop time_h", watchdog_info->curr_time_h);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "time_l", watchdog_info->curr_time_l);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "task_id", watchdog_info->task_id);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "sp", watchdog_info->sp);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "stack_current_used", watchdog_info->curr_used);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "peak_used", watchdog_info->peak_used);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "\n Overflow_flag", watchdog_info->is_overflow);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "stack_top", watchdog_info->stack_top);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "stack_bottom", watchdog_info->stack_bottom);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "mgmt_pc", watchdog_info->pc);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "lr", watchdog_info->lr);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "cpsr", watchdog_info->cpsr);
- if (err)
- return err;
-
- err = devlink_fmsg_binary_pair_put(fmsg, "Mgmt register info",
- watchdog_info->reg, sizeof(watchdog_info->reg));
- if (err)
- return err;
-
- err = devlink_fmsg_binary_pair_put(fmsg, "Mgmt dump stack(start from sp)",
- watchdog_info->data, sizeof(watchdog_info->data));
- if (err)
- return err;
-
- return 0;
+ devlink_fmsg_u32_pair_put(fmsg, "Mgmt deadloop time_h", winfo->curr_time_h);
+ devlink_fmsg_u32_pair_put(fmsg, "time_l", winfo->curr_time_l);
+ devlink_fmsg_u32_pair_put(fmsg, "task_id", winfo->task_id);
+ devlink_fmsg_u32_pair_put(fmsg, "sp", winfo->sp);
+ devlink_fmsg_u32_pair_put(fmsg, "stack_current_used", winfo->curr_used);
+ devlink_fmsg_u32_pair_put(fmsg, "peak_used", winfo->peak_used);
+ devlink_fmsg_u32_pair_put(fmsg, "\n Overflow_flag", winfo->is_overflow);
+ devlink_fmsg_u32_pair_put(fmsg, "stack_top", winfo->stack_top);
+ devlink_fmsg_u32_pair_put(fmsg, "stack_bottom", winfo->stack_bottom);
+ devlink_fmsg_u32_pair_put(fmsg, "mgmt_pc", winfo->pc);
+ devlink_fmsg_u32_pair_put(fmsg, "lr", winfo->lr);
+ devlink_fmsg_u32_pair_put(fmsg, "cpsr", winfo->cpsr);
+ devlink_fmsg_binary_pair_put(fmsg, "Mgmt register info", winfo->reg,
+ sizeof(winfo->reg));
+ devlink_fmsg_binary_pair_put(fmsg, "Mgmt dump stack(start from sp)",
+ winfo->data, sizeof(winfo->data));
}
static int hinic_fw_reporter_dump(struct devlink_health_reporter *reporter,
@@ -528,7 +423,7 @@ static int hinic_fw_reporter_dump(struct devlink_health_reporter *reporter,
struct netlink_ext_ack *extack)
{
if (priv_ctx)
- return mgmt_watchdog_report_show(fmsg, priv_ctx);
+ mgmt_watchdog_report_show(fmsg, priv_ctx);
return 0;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index ad47ac51a139..9b60966736db 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -861,7 +861,7 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq);
struct hinic_dev *nic_dev = netdev_priv(netdev);
struct hinic_hwdev *hwdev = nic_dev->hwdev;
- int err, irqname_len;
+ int err;
txq->netdev = netdev;
txq->sq = sq;
@@ -882,15 +882,13 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
goto err_alloc_free_sges;
}
- irqname_len = snprintf(NULL, 0, "%s_txq%d", netdev->name, qp->q_id) + 1;
- txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL);
+ txq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL, "%s_txq%d",
+ netdev->name, qp->q_id);
if (!txq->irq_name) {
err = -ENOMEM;
goto err_alloc_irqname;
}
- sprintf(txq->irq_name, "%s_txq%d", netdev->name, qp->q_id);
-
err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING,
CI_UPDATE_NO_COALESC);
if (err)
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 54bb4d9a0d1e..813403c2628f 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -153,7 +153,7 @@ probe_failed_free_mpu:
return retval;
}
-static int sni_82596_driver_remove(struct platform_device *pdev)
+static void sni_82596_driver_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct i596_private *lp = netdev_priv(dev);
@@ -164,12 +164,11 @@ static int sni_82596_driver_remove(struct platform_device *pdev)
iounmap(lp->ca);
iounmap(lp->mpu_port);
free_netdev (dev);
- return 0;
}
static struct platform_driver sni_82596_driver = {
.probe = sni_82596_probe,
- .remove = sni_82596_driver_remove,
+ .remove_new = sni_82596_driver_remove,
.driver = {
.name = sni_82596_string,
},
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 0a56e9752464..1e29e5c9a2df 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -90,7 +90,7 @@ static struct ehea_bcmc_reg_array ehea_bcmc_regs;
static int ehea_probe_adapter(struct platform_device *dev);
-static int ehea_remove(struct platform_device *dev);
+static void ehea_remove(struct platform_device *dev);
static const struct of_device_id ehea_module_device_table[] = {
{
@@ -121,7 +121,7 @@ static struct platform_driver ehea_driver = {
.of_match_table = ehea_device_table,
},
.probe = ehea_probe_adapter,
- .remove = ehea_remove,
+ .remove_new = ehea_remove,
};
void ehea_dump(void *adr, int len, char *msg)
@@ -900,7 +900,7 @@ static int ehea_poll(struct napi_struct *napi, int budget)
if (!cqe && !cqe_skb)
return rx;
- if (!napi_reschedule(napi))
+ if (!napi_schedule(napi))
return rx;
cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
@@ -3471,7 +3471,7 @@ out:
return ret;
}
-static int ehea_remove(struct platform_device *dev)
+static void ehea_remove(struct platform_device *dev)
{
struct ehea_adapter *adapter = platform_get_drvdata(dev);
int i;
@@ -3492,8 +3492,6 @@ static int ehea_remove(struct platform_device *dev)
list_del(&adapter->list);
ehea_update_firmware_handles();
-
- return 0;
}
static int check_module_parm(void)
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 0c314bf97480..e6e47b1842ea 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -3253,7 +3253,7 @@ static int emac_probe(struct platform_device *ofdev)
return err;
}
-static int emac_remove(struct platform_device *ofdev)
+static void emac_remove(struct platform_device *ofdev)
{
struct emac_instance *dev = platform_get_drvdata(ofdev);
@@ -3290,8 +3290,6 @@ static int emac_remove(struct platform_device *ofdev)
irq_dispose_mapping(dev->emac_irq);
free_netdev(dev->ndev);
-
- return 0;
}
/* XXX Features in here should be replaced by properties... */
@@ -3319,7 +3317,7 @@ static struct platform_driver emac_driver = {
.of_match_table = emac_match,
},
.probe = emac_probe,
- .remove = emac_remove,
+ .remove_new = emac_remove,
};
static void __init emac_make_bootlist(void)
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index c3236b59e7e9..2439f7e96e05 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -442,7 +442,7 @@ static int mal_poll(struct napi_struct *napi, int budget)
if (unlikely(mc->ops->peek_rx(mc->dev) ||
test_bit(MAL_COMMAC_RX_STOPPED, &mc->flags))) {
MAL_DBG2(mal, "rotting packet" NL);
- if (!napi_reschedule(napi))
+ if (!napi_schedule(napi))
goto more_work;
spin_lock_irqsave(&mal->lock, flags);
@@ -711,7 +711,7 @@ static int mal_probe(struct platform_device *ofdev)
return err;
}
-static int mal_remove(struct platform_device *ofdev)
+static void mal_remove(struct platform_device *ofdev)
{
struct mal_instance *mal = platform_get_drvdata(ofdev);
@@ -740,8 +740,6 @@ static int mal_remove(struct platform_device *ofdev)
NUM_RX_BUFF * mal->num_rx_chans), mal->bd_virt,
mal->bd_dma);
kfree(mal);
-
- return 0;
}
static const struct of_device_id mal_platform_match[] =
@@ -770,7 +768,7 @@ static struct platform_driver mal_of_driver = {
.of_match_table = mal_platform_match,
},
.probe = mal_probe,
- .remove = mal_remove,
+ .remove_new = mal_remove,
};
int __init mal_init(void)
diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c
index fd437f986edf..e1712fdc3c31 100644
--- a/drivers/net/ethernet/ibm/emac/rgmii.c
+++ b/drivers/net/ethernet/ibm/emac/rgmii.c
@@ -273,7 +273,7 @@ static int rgmii_probe(struct platform_device *ofdev)
return rc;
}
-static int rgmii_remove(struct platform_device *ofdev)
+static void rgmii_remove(struct platform_device *ofdev)
{
struct rgmii_instance *dev = platform_get_drvdata(ofdev);
@@ -281,8 +281,6 @@ static int rgmii_remove(struct platform_device *ofdev)
iounmap(dev->base);
kfree(dev);
-
- return 0;
}
static const struct of_device_id rgmii_match[] =
@@ -302,7 +300,7 @@ static struct platform_driver rgmii_driver = {
.of_match_table = rgmii_match,
},
.probe = rgmii_probe,
- .remove = rgmii_remove,
+ .remove_new = rgmii_remove,
};
int __init rgmii_init(void)
diff --git a/drivers/net/ethernet/ibm/emac/tah.c b/drivers/net/ethernet/ibm/emac/tah.c
index aae9a88d95d7..fa3488258ca2 100644
--- a/drivers/net/ethernet/ibm/emac/tah.c
+++ b/drivers/net/ethernet/ibm/emac/tah.c
@@ -130,7 +130,7 @@ static int tah_probe(struct platform_device *ofdev)
return rc;
}
-static int tah_remove(struct platform_device *ofdev)
+static void tah_remove(struct platform_device *ofdev)
{
struct tah_instance *dev = platform_get_drvdata(ofdev);
@@ -138,8 +138,6 @@ static int tah_remove(struct platform_device *ofdev)
iounmap(dev->base);
kfree(dev);
-
- return 0;
}
static const struct of_device_id tah_match[] =
@@ -160,7 +158,7 @@ static struct platform_driver tah_driver = {
.of_match_table = tah_match,
},
.probe = tah_probe,
- .remove = tah_remove,
+ .remove_new = tah_remove,
};
int __init tah_init(void)
diff --git a/drivers/net/ethernet/ibm/emac/zmii.c b/drivers/net/ethernet/ibm/emac/zmii.c
index 6337388ee5f4..26e86cdee2f6 100644
--- a/drivers/net/ethernet/ibm/emac/zmii.c
+++ b/drivers/net/ethernet/ibm/emac/zmii.c
@@ -278,7 +278,7 @@ static int zmii_probe(struct platform_device *ofdev)
return rc;
}
-static int zmii_remove(struct platform_device *ofdev)
+static void zmii_remove(struct platform_device *ofdev)
{
struct zmii_instance *dev = platform_get_drvdata(ofdev);
@@ -286,8 +286,6 @@ static int zmii_remove(struct platform_device *ofdev)
iounmap(dev->base);
kfree(dev);
-
- return 0;
}
static const struct of_device_id zmii_match[] =
@@ -308,7 +306,7 @@ static struct platform_driver zmii_driver = {
.of_match_table = zmii_match,
},
.probe = zmii_probe,
- .remove = zmii_remove,
+ .remove_new = zmii_remove,
};
int __init zmii_init(void)
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index a8d79ee350f8..b5aef0b29efe 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1432,7 +1432,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
BUG_ON(lpar_rc != H_SUCCESS);
if (ibmveth_rxq_pending_buffer(adapter) &&
- napi_reschedule(napi)) {
+ napi_schedule(napi)) {
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
VIO_IRQ_DISABLE);
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index cdf5251e5679..30c47b8470ad 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -3519,7 +3519,7 @@ restart_poll:
if (napi_complete_done(napi, frames_processed)) {
enable_scrq_irq(adapter, rx_scrq);
if (pending_scrq(adapter, rx_scrq)) {
- if (napi_reschedule(napi)) {
+ if (napi_schedule(napi)) {
disable_scrq_irq(adapter, rx_scrq);
goto restart_poll;
}
@@ -5247,7 +5247,8 @@ static void handle_vpd_rsp(union ibmvnic_crq *crq,
/* copy firmware version string from vpd into adapter */
if ((substr + 3 + fw_level_len) <
(adapter->vpd->buff + adapter->vpd->len)) {
- strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
+ strscpy(adapter->fw_version, substr + 3,
+ sizeof(adapter->fw_version));
} else {
dev_info(dev, "FW substr extrapolated VPD buff\n");
}
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 9bc0a9519899..06ddd7147c7f 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -225,6 +225,7 @@ config I40E
depends on PTP_1588_CLOCK_OPTIONAL
depends on PCI
select AUXILIARY_BUS
+ select NET_DEVLINK
help
This driver supports Intel(R) Ethernet Controller XL710 Family of
devices. For more information on how to identify your adapter, go
@@ -284,6 +285,7 @@ config ICE
select DIMLIB
select NET_DEVLINK
select PLDMFW
+ select DPLL
help
This driver supports Intel(R) Ethernet Connection E800 Series of
devices. For more information on how to identify your adapter, go
@@ -355,5 +357,17 @@ config IGC
To compile this driver as a module, choose M here. The module
will be called igc.
+config IDPF
+ tristate "Intel(R) Infrastructure Data Path Function Support"
+ depends on PCI_MSI
+ select DIMLIB
+ select PAGE_POOL
+ select PAGE_POOL_STATS
+ help
+ This driver supports Intel(R) Infrastructure Data Path Function
+ devices.
+
+ To compile this driver as a module, choose M here. The module
+ will be called idpf.
endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index d80d04132073..dacb481ee5b1 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_I40E) += i40e/
obj-$(CONFIG_IAVF) += iavf/
obj-$(CONFIG_FM10K) += fm10k/
obj-$(CONFIG_ICE) += ice/
+obj-$(CONFIG_IDPF) += idpf/
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index d3fdc290937f..01f0f12035ca 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2841,7 +2841,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &e100_netdev_ops;
netdev->ethtool_ops = &e100_ethtool_ops;
netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
- strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+ strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
nic = netdev_priv(netdev);
netif_napi_add_weight(netdev, &nic->napi, e100_poll, E100_NAPI_WEIGHT);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index da6e303ad99b..1d1e93686af2 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1014,7 +1014,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->watchdog_timeo = 5 * HZ;
netif_napi_add(netdev, &adapter->napi, e1000_clean);
- strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+ strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
adapter->bd_number = cards_found;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index d53369e30040..13a05604dcc0 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -448,10 +448,10 @@ static void fm10k_get_drvinfo(struct net_device *dev,
{
struct fm10k_intfc *interface = netdev_priv(dev);
- strncpy(info->driver, fm10k_driver_name,
- sizeof(info->driver) - 1);
- strncpy(info->bus_info, pci_name(interface->pdev),
- sizeof(info->bus_info) - 1);
+ strscpy(info->driver, fm10k_driver_name,
+ sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(interface->pdev),
+ sizeof(info->bus_info));
}
static void fm10k_get_pauseparam(struct net_device *dev,
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index 2f21b3e89fd0..cad93f323bd5 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -24,6 +24,7 @@ i40e-objs := i40e_main.o \
i40e_ddp.o \
i40e_client.o \
i40e_virtchnl_pf.o \
- i40e_xsk.o
+ i40e_xsk.o \
+ i40e_devlink.o
i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 55bb0b5310d5..1bf424ac3145 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -4,47 +4,21 @@
#ifndef _I40E_H_
#define _I40E_H_
-#include <net/tcp.h>
-#include <net/udp.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/ioport.h>
-#include <linux/iommu.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/hashtable.h>
-#include <linux/string.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/sctp.h>
-#include <linux/pkt_sched.h>
-#include <linux/ipv6.h>
-#include <net/checksum.h>
-#include <net/ip6_checksum.h>
-#include <linux/ethtool.h>
-#include <linux/if_vlan.h>
-#include <linux/if_macvlan.h>
-#include <linux/if_bridge.h>
-#include <linux/clocksource.h>
-#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
+#include <linux/types.h>
+#include <linux/avf/virtchnl.h>
+#include <linux/net/intel/i40e_client.h>
+#include <net/devlink.h>
#include <net/pkt_cls.h>
-#include <net/pkt_sched.h>
-#include <net/tc_act/tc_gact.h>
-#include <net/tc_act/tc_mirred.h>
#include <net/udp_tunnel.h>
-#include <net/xdp_sock.h>
-#include <linux/bitfield.h>
-#include "i40e_type.h"
+#include "i40e_dcb.h"
+#include "i40e_debug.h"
+#include "i40e_devlink.h"
+#include "i40e_io.h"
#include "i40e_prototype.h"
-#include <linux/net/intel/i40e_client.h>
-#include <linux/avf/virtchnl.h>
-#include "i40e_virtchnl_pf.h"
+#include "i40e_register.h"
#include "i40e_txrx.h"
-#include "i40e_dcb.h"
/* Useful i40e defaults */
#define I40E_MAX_VEB 16
@@ -75,23 +49,19 @@
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
-#define I40E_NVM_VERSION_LO_SHIFT 0
-#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
-#define I40E_NVM_VERSION_HI_SHIFT 12
-#define I40E_NVM_VERSION_HI_MASK (0xf << I40E_NVM_VERSION_HI_SHIFT)
-#define I40E_OEM_VER_BUILD_MASK 0xffff
-#define I40E_OEM_VER_PATCH_MASK 0xff
-#define I40E_OEM_VER_BUILD_SHIFT 8
-#define I40E_OEM_VER_SHIFT 24
#define I40E_PHY_DEBUG_ALL \
(I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW | \
I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW)
#define I40E_OEM_EETRACK_ID 0xffffffff
-#define I40E_OEM_GEN_SHIFT 24
-#define I40E_OEM_SNAP_MASK 0x00ff0000
-#define I40E_OEM_SNAP_SHIFT 16
-#define I40E_OEM_RELEASE_MASK 0x0000ffff
+#define I40E_NVM_VERSION_LO_MASK GENMASK(7, 0)
+#define I40E_NVM_VERSION_HI_MASK GENMASK(15, 12)
+#define I40E_OEM_VER_BUILD_MASK GENMASK(23, 8)
+#define I40E_OEM_VER_PATCH_MASK GENMASK(7, 0)
+#define I40E_OEM_VER_MASK GENMASK(31, 24)
+#define I40E_OEM_GEN_MASK GENMASK(31, 24)
+#define I40E_OEM_SNAP_MASK GENMASK(23, 16)
+#define I40E_OEM_RELEASE_MASK GENMASK(15, 0)
#define I40E_RX_DESC(R, i) \
(&(((union i40e_rx_desc *)((R)->desc))[i]))
@@ -323,29 +293,6 @@ struct i40e_udp_port_config {
u8 filter_index;
};
-#define I40_DDP_FLASH_REGION 100
-#define I40E_PROFILE_INFO_SIZE 48
-#define I40E_MAX_PROFILE_NUM 16
-#define I40E_PROFILE_LIST_SIZE \
- (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4)
-#define I40E_DDP_PROFILE_PATH "intel/i40e/ddp/"
-#define I40E_DDP_PROFILE_NAME_MAX 64
-
-int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
- bool is_add);
-int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash);
-
-struct i40e_ddp_profile_list {
- u32 p_count;
- struct i40e_profile_info p_info[];
-};
-
-struct i40e_ddp_old_profile_list {
- struct list_head list;
- size_t old_ddp_size;
- u8 old_ddp_buf[];
-};
-
/* macros related to FLX_PIT */
#define I40E_FLEX_SET_FSIZE(fsize) (((fsize) << \
I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \
@@ -462,6 +409,7 @@ static inline const u8 *i40e_channel_mac(struct i40e_channel *ch)
/* struct that defines the Ethernet device */
struct i40e_pf {
struct pci_dev *pdev;
+ struct devlink_port devlink_port;
struct i40e_hw hw;
DECLARE_BITMAP(state, __I40E_STATE_SIZE__);
struct msix_entry *msix_entries;
@@ -1002,43 +950,104 @@ struct i40e_device {
};
/**
- * i40e_nvm_version_str - format the NVM version strings
+ * i40e_info_nvm_ver - format the NVM version string
* @hw: ptr to the hardware info
+ * @buf: string buffer to store
+ * @len: buffer size
+ *
+ * Formats NVM version string as:
+ * <gen>.<snap>.<release> when eetrackid == I40E_OEM_EETRACK_ID
+ * <nvm_major>.<nvm_minor> otherwise
**/
-static inline char *i40e_nvm_version_str(struct i40e_hw *hw)
+static inline void i40e_info_nvm_ver(struct i40e_hw *hw, char *buf, size_t len)
{
- static char buf[32];
- u32 full_ver;
+ struct i40e_nvm_info *nvm = &hw->nvm;
- full_ver = hw->nvm.oem_ver;
-
- if (hw->nvm.eetrack == I40E_OEM_EETRACK_ID) {
+ if (nvm->eetrack == I40E_OEM_EETRACK_ID) {
+ u32 full_ver = nvm->oem_ver;
u8 gen, snap;
u16 release;
- gen = (u8)(full_ver >> I40E_OEM_GEN_SHIFT);
- snap = (u8)((full_ver & I40E_OEM_SNAP_MASK) >>
- I40E_OEM_SNAP_SHIFT);
- release = (u16)(full_ver & I40E_OEM_RELEASE_MASK);
-
- snprintf(buf, sizeof(buf), "%x.%x.%x", gen, snap, release);
+ gen = FIELD_GET(I40E_OEM_GEN_MASK, full_ver);
+ snap = FIELD_GET(I40E_OEM_SNAP_MASK, full_ver);
+ release = FIELD_GET(I40E_OEM_RELEASE_MASK, full_ver);
+ snprintf(buf, len, "%x.%x.%x", gen, snap, release);
} else {
- u8 ver, patch;
+ u8 major, minor;
+
+ major = FIELD_GET(I40E_NVM_VERSION_HI_MASK, nvm->version);
+ minor = FIELD_GET(I40E_NVM_VERSION_LO_MASK, nvm->version);
+ snprintf(buf, len, "%x.%02x", major, minor);
+ }
+}
+
+/**
+ * i40e_info_eetrack - format the EETrackID string
+ * @hw: ptr to the hardware info
+ * @buf: string buffer to store
+ * @len: buffer size
+ *
+ * Returns hexadecimally formated EETrackID if it is
+ * different from I40E_OEM_EETRACK_ID or empty string.
+ **/
+static inline void i40e_info_eetrack(struct i40e_hw *hw, char *buf, size_t len)
+{
+ struct i40e_nvm_info *nvm = &hw->nvm;
+
+ buf[0] = '\0';
+ if (nvm->eetrack != I40E_OEM_EETRACK_ID)
+ snprintf(buf, len, "0x%08x", nvm->eetrack);
+}
+
+/**
+ * i40e_info_civd_ver - format the NVM version strings
+ * @hw: ptr to the hardware info
+ * @buf: string buffer to store
+ * @len: buffer size
+ *
+ * Returns formated combo image version if adapter's EETrackID is
+ * different from I40E_OEM_EETRACK_ID or empty string.
+ **/
+static inline void i40e_info_civd_ver(struct i40e_hw *hw, char *buf, size_t len)
+{
+ struct i40e_nvm_info *nvm = &hw->nvm;
+
+ buf[0] = '\0';
+ if (nvm->eetrack != I40E_OEM_EETRACK_ID) {
+ u32 full_ver = nvm->oem_ver;
+ u8 major, minor;
u16 build;
- ver = (u8)(full_ver >> I40E_OEM_VER_SHIFT);
- build = (u16)((full_ver >> I40E_OEM_VER_BUILD_SHIFT) &
- I40E_OEM_VER_BUILD_MASK);
- patch = (u8)(full_ver & I40E_OEM_VER_PATCH_MASK);
-
- snprintf(buf, sizeof(buf),
- "%x.%02x 0x%x %d.%d.%d",
- (hw->nvm.version & I40E_NVM_VERSION_HI_MASK) >>
- I40E_NVM_VERSION_HI_SHIFT,
- (hw->nvm.version & I40E_NVM_VERSION_LO_MASK) >>
- I40E_NVM_VERSION_LO_SHIFT,
- hw->nvm.eetrack, ver, build, patch);
+ major = FIELD_GET(I40E_OEM_VER_MASK, full_ver);
+ build = FIELD_GET(I40E_OEM_VER_BUILD_MASK, full_ver);
+ minor = FIELD_GET(I40E_OEM_VER_PATCH_MASK, full_ver);
+ snprintf(buf, len, "%d.%d.%d", major, build, minor);
}
+}
+
+/**
+ * i40e_nvm_version_str - format the NVM version strings
+ * @hw: ptr to the hardware info
+ * @buf: string buffer to store
+ * @len: buffer size
+ **/
+static inline char *i40e_nvm_version_str(struct i40e_hw *hw, char *buf,
+ size_t len)
+{
+ char ver[16] = " ";
+
+ /* Get NVM version */
+ i40e_info_nvm_ver(hw, buf, len);
+
+ /* Append EETrackID if provided */
+ i40e_info_eetrack(hw, &ver[1], sizeof(ver) - 1);
+ if (strlen(ver) > 1)
+ strlcat(buf, ver, len);
+
+ /* Append combo image version if provided */
+ i40e_info_civd_ver(hw, &ver[1], sizeof(ver) - 1);
+ if (strlen(ver) > 1)
+ strlcat(buf, ver, len);
return buf;
}
@@ -1321,4 +1330,15 @@ static inline u32 i40e_is_tc_mqprio_enabled(struct i40e_pf *pf)
return pf->flags & I40E_FLAG_TC_MQPRIO;
}
+/**
+ * i40e_hw_to_pf - get pf pointer from the hardware structure
+ * @hw: pointer to the device HW structure
+ **/
+static inline struct i40e_pf *i40e_hw_to_pf(struct i40e_hw *hw)
+{
+ return container_of(hw, struct i40e_pf, hw);
+}
+
+struct device *i40e_hw_to_dev(struct i40e_hw *hw);
+
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 100eb77b8dfe..9ce6e633cc2f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
-#include "i40e_type.h"
+#include <linux/delay.h>
+#include "i40e_alloc.h"
#include "i40e_register.h"
-#include "i40e_adminq.h"
#include "i40e_prototype.h"
static void i40e_resume_aq(struct i40e_hw *hw);
@@ -51,7 +51,6 @@ static int i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
int ret_code;
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
- i40e_mem_atq_ring,
(hw->aq.num_asq_entries *
sizeof(struct i40e_aq_desc)),
I40E_ADMINQ_DESC_ALIGNMENT);
@@ -78,7 +77,6 @@ static int i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
int ret_code;
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
- i40e_mem_arq_ring,
(hw->aq.num_arq_entries *
sizeof(struct i40e_aq_desc)),
I40E_ADMINQ_DESC_ALIGNMENT);
@@ -136,7 +134,6 @@ static int i40e_alloc_arq_bufs(struct i40e_hw *hw)
for (i = 0; i < hw->aq.num_arq_entries; i++) {
bi = &hw->aq.arq.r.arq_bi[i];
ret_code = i40e_allocate_dma_mem(hw, bi,
- i40e_mem_arq_buf,
hw->aq.arq_buf_size,
I40E_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
@@ -198,7 +195,6 @@ static int i40e_alloc_asq_bufs(struct i40e_hw *hw)
for (i = 0; i < hw->aq.num_asq_entries; i++) {
bi = &hw->aq.asq.r.asq_bi[i];
ret_code = i40e_allocate_dma_mem(hw, bi,
- i40e_mem_asq_buf,
hw->aq.asq_buf_size,
I40E_ADMINQ_DESC_ALIGNMENT);
if (ret_code)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 267f2e0a21ce..80125bea80a2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -4,7 +4,8 @@
#ifndef _I40E_ADMINQ_H_
#define _I40E_ADMINQ_H_
-#include "i40e_osdep.h"
+#include <linux/mutex.h>
+#include "i40e_alloc.h"
#include "i40e_adminq_cmd.h"
#define I40E_ADMINQ_DESC(R, i) \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 3357d65a906b..18a1c3b6d72c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -4,6 +4,8 @@
#ifndef _I40E_ADMINQ_CMD_H_
#define _I40E_ADMINQ_CMD_H_
+#include <linux/bits.h>
+
/* This header file defines the i40e Admin Queue commands and is shared between
* i40e Firmware and Software.
*
diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
index a6c9a9e343d1..e0dde326255d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
@@ -4,25 +4,25 @@
#ifndef _I40E_ALLOC_H_
#define _I40E_ALLOC_H_
+#include <linux/types.h>
+
struct i40e_hw;
-/* Memory allocation types */
-enum i40e_memory_type {
- i40e_mem_arq_buf = 0, /* ARQ indirect command buffer */
- i40e_mem_asq_buf = 1,
- i40e_mem_atq_buf = 2, /* ATQ indirect command buffer */
- i40e_mem_arq_ring = 3, /* ARQ descriptor ring */
- i40e_mem_atq_ring = 4, /* ATQ descriptor ring */
- i40e_mem_pd = 5, /* Page Descriptor */
- i40e_mem_bp = 6, /* Backing Page - 4KB */
- i40e_mem_bp_jumbo = 7, /* Backing Page - > 4KB */
- i40e_mem_reserved
+/* memory allocation tracking */
+struct i40e_dma_mem {
+ void *va;
+ dma_addr_t pa;
+ u32 size;
+};
+
+struct i40e_virt_mem {
+ void *va;
+ u32 size;
};
/* prototype for functions used for dynamic memory allocation */
int i40e_allocate_dma_mem(struct i40e_hw *hw,
struct i40e_dma_mem *mem,
- enum i40e_memory_type type,
u64 size, u32 alignment);
int i40e_free_dma_mem(struct i40e_hw *hw,
struct i40e_dma_mem *mem);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 639c5a1ca853..306758428aef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -6,7 +6,6 @@
#include <linux/net/intel/i40e_client.h>
#include "i40e.h"
-#include "i40e_prototype.h"
static LIST_HEAD(i40e_devices);
static DEFINE_MUTEX(i40e_device_mutex);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 1b493854f522..d7e24d661724 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1,11 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2021 Intel Corporation. */
-#include "i40e.h"
-#include "i40e_type.h"
-#include "i40e_adminq.h"
-#include "i40e_prototype.h"
#include <linux/avf/virtchnl.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include "i40e_adminq_cmd.h"
+#include "i40e_devids.h"
+#include "i40e_prototype.h"
+#include "i40e_register.h"
/**
* i40e_set_mac_type - Sets MAC type
@@ -818,62 +821,72 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
}
/**
- * i40e_read_pba_string - Reads part number string from EEPROM
+ * i40e_get_pba_string - Reads part number string from EEPROM
* @hw: pointer to hardware structure
- * @pba_num: stores the part number string from the EEPROM
- * @pba_num_size: part number string buffer length
*
- * Reads the part number string from the EEPROM.
+ * Reads the part number string from the EEPROM and stores it
+ * into newly allocated buffer and saves resulting pointer
+ * to i40e_hw->pba_id field.
**/
-int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
- u32 pba_num_size)
+void i40e_get_pba_string(struct i40e_hw *hw)
{
+#define I40E_NVM_PBA_FLAGS_BLK_PRESENT 0xFAFA
u16 pba_word = 0;
u16 pba_size = 0;
u16 pba_ptr = 0;
- int status = 0;
- u16 i = 0;
+ int status;
+ char *ptr;
+ u16 i;
status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
- if (status || (pba_word != 0xFAFA)) {
- hw_dbg(hw, "Failed to read PBA flags or flag is invalid.\n");
- return status;
+ if (status) {
+ hw_dbg(hw, "Failed to read PBA flags.\n");
+ return;
+ }
+ if (pba_word != I40E_NVM_PBA_FLAGS_BLK_PRESENT) {
+ hw_dbg(hw, "PBA block is not present.\n");
+ return;
}
status = i40e_read_nvm_word(hw, I40E_SR_PBA_BLOCK_PTR, &pba_ptr);
if (status) {
hw_dbg(hw, "Failed to read PBA Block pointer.\n");
- return status;
+ return;
}
status = i40e_read_nvm_word(hw, pba_ptr, &pba_size);
if (status) {
hw_dbg(hw, "Failed to read PBA Block size.\n");
- return status;
+ return;
}
/* Subtract one to get PBA word count (PBA Size word is included in
- * total size)
+ * total size) and advance pointer to first PBA word.
*/
pba_size--;
- if (pba_num_size < (((u32)pba_size * 2) + 1)) {
- hw_dbg(hw, "Buffer too small for PBA data.\n");
- return -EINVAL;
+ pba_ptr++;
+ if (!pba_size) {
+ hw_dbg(hw, "PBA ID is empty.\n");
+ return;
}
+ ptr = devm_kzalloc(i40e_hw_to_dev(hw), pba_size * 2 + 1, GFP_KERNEL);
+ if (!ptr)
+ return;
+ hw->pba_id = ptr;
+
for (i = 0; i < pba_size; i++) {
- status = i40e_read_nvm_word(hw, (pba_ptr + 1) + i, &pba_word);
+ status = i40e_read_nvm_word(hw, pba_ptr + i, &pba_word);
if (status) {
hw_dbg(hw, "Failed to read PBA Block word %d.\n", i);
- return status;
+ devm_kfree(i40e_hw_to_dev(hw), hw->pba_id);
+ hw->pba_id = NULL;
+ return;
}
- pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
- pba_num[(i * 2) + 1] = pba_word & 0xFF;
+ *ptr++ = (pba_word >> 8) & 0xFF;
+ *ptr++ = pba_word & 0xFF;
}
- pba_num[(pba_size * 2)] = '\0';
-
- return status;
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index f81e744c0fb3..68602fc375f6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2021 Intel Corporation. */
-#include "i40e_adminq.h"
-#include "i40e_prototype.h"
+#include "i40e_alloc.h"
#include "i40e_dcb.h"
+#include "i40e_prototype.h"
/**
* i40e_get_dcbx_status
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 195421d863ab..077a95dad32c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -2,8 +2,8 @@
/* Copyright(c) 2013 - 2021 Intel Corporation. */
#ifdef CONFIG_I40E_DCB
-#include "i40e.h"
#include <net/dcbnl.h>
+#include "i40e.h"
#define I40E_DCBNL_STATUS_SUCCESS 0
#define I40E_DCBNL_STATUS_ERROR 1
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
index 0e72abd178ae..cf25bfc5dc3f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
@@ -1,9 +1,27 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
+#include <linux/firmware.h>
#include "i40e.h"
-#include <linux/firmware.h>
+#define I40_DDP_FLASH_REGION 100
+#define I40E_PROFILE_INFO_SIZE 48
+#define I40E_MAX_PROFILE_NUM 16
+#define I40E_PROFILE_LIST_SIZE \
+ (I40E_PROFILE_INFO_SIZE * I40E_MAX_PROFILE_NUM + 4)
+#define I40E_DDP_PROFILE_PATH "intel/i40e/ddp/"
+#define I40E_DDP_PROFILE_NAME_MAX 64
+
+struct i40e_ddp_profile_list {
+ u32 p_count;
+ struct i40e_profile_info p_info[];
+};
+
+struct i40e_ddp_old_profile_list {
+ struct list_head list;
+ size_t old_ddp_size;
+ u8 old_ddp_buf[];
+};
/**
* i40e_ddp_profiles_eq - checks if DDP profiles are the equivalent
@@ -261,8 +279,8 @@ static bool i40e_ddp_is_pkg_hdr_valid(struct net_device *netdev,
* Checks correctness and loads DDP profile to the NIC. The function is
* also used for rolling back previously loaded profile.
**/
-int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
- bool is_add)
+static int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
+ bool is_add)
{
u8 profile_info_sec[sizeof(struct i40e_profile_section_header) +
sizeof(struct i40e_profile_info)];
@@ -438,10 +456,9 @@ int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash)
char profile_name[sizeof(I40E_DDP_PROFILE_PATH)
+ I40E_DDP_PROFILE_NAME_MAX];
- profile_name[sizeof(profile_name) - 1] = 0;
- strncpy(profile_name, I40E_DDP_PROFILE_PATH,
- sizeof(profile_name) - 1);
- strncat(profile_name, flash->data, I40E_DDP_PROFILE_NAME_MAX);
+ scnprintf(profile_name, sizeof(profile_name), "%s%s",
+ I40E_DDP_PROFILE_PATH, flash->data);
+
/* Load DDP recipe. */
status = request_firmware(&ddp_config, profile_name,
&netdev->dev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debug.h b/drivers/net/ethernet/intel/i40e/i40e_debug.h
new file mode 100644
index 000000000000..27ebc72d8bfe
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_debug.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2023 Intel Corporation. */
+
+#ifndef _I40E_DEBUG_H_
+#define _I40E_DEBUG_H_
+
+#include <linux/dev_printk.h>
+
+/* debug masks - set these bits in hw->debug_mask to control output */
+enum i40e_debug_mask {
+ I40E_DEBUG_INIT = 0x00000001,
+ I40E_DEBUG_RELEASE = 0x00000002,
+
+ I40E_DEBUG_LINK = 0x00000010,
+ I40E_DEBUG_PHY = 0x00000020,
+ I40E_DEBUG_HMC = 0x00000040,
+ I40E_DEBUG_NVM = 0x00000080,
+ I40E_DEBUG_LAN = 0x00000100,
+ I40E_DEBUG_FLOW = 0x00000200,
+ I40E_DEBUG_DCB = 0x00000400,
+ I40E_DEBUG_DIAG = 0x00000800,
+ I40E_DEBUG_FD = 0x00001000,
+ I40E_DEBUG_PACKAGE = 0x00002000,
+ I40E_DEBUG_IWARP = 0x00F00000,
+ I40E_DEBUG_AQ_MESSAGE = 0x01000000,
+ I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
+ I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
+ I40E_DEBUG_AQ_COMMAND = 0x06000000,
+ I40E_DEBUG_AQ = 0x0F000000,
+
+ I40E_DEBUG_USER = 0xF0000000,
+
+ I40E_DEBUG_ALL = 0xFFFFFFFF
+};
+
+struct i40e_hw;
+struct device *i40e_hw_to_dev(struct i40e_hw *hw);
+
+#define hw_dbg(hw, S, A...) dev_dbg(i40e_hw_to_dev(hw), S, ##A)
+
+#define i40e_debug(h, m, s, ...) \
+do { \
+ if (((m) & (h)->debug_mask)) \
+ dev_info(i40e_hw_to_dev(hw), s, ##__VA_ARGS__); \
+} while (0)
+
+#endif /* _I40E_DEBUG_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 1a497cb07710..999c9708def5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -5,8 +5,9 @@
#include <linux/fs.h>
#include <linux/debugfs.h>
-
+#include <linux/if_bridge.h>
#include "i40e.h"
+#include "i40e_virtchnl_pf.h"
static struct dentry *i40e_dbg_root;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devlink.c b/drivers/net/ethernet/intel/i40e/i40e_devlink.c
new file mode 100644
index 000000000000..74bc111b4849
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_devlink.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2023 Intel Corporation. */
+
+#include <net/devlink.h>
+#include "i40e.h"
+#include "i40e_devlink.h"
+
+static void i40e_info_get_dsn(struct i40e_pf *pf, char *buf, size_t len)
+{
+ u8 dsn[8];
+
+ put_unaligned_be64(pci_get_dsn(pf->pdev), dsn);
+
+ snprintf(buf, len, "%8phD", dsn);
+}
+
+static void i40e_info_fw_mgmt(struct i40e_hw *hw, char *buf, size_t len)
+{
+ struct i40e_adminq_info *aq = &hw->aq;
+
+ snprintf(buf, len, "%u.%u", aq->fw_maj_ver, aq->fw_min_ver);
+}
+
+static void i40e_info_fw_mgmt_build(struct i40e_hw *hw, char *buf, size_t len)
+{
+ struct i40e_adminq_info *aq = &hw->aq;
+
+ snprintf(buf, len, "%05d", aq->fw_build);
+}
+
+static void i40e_info_fw_api(struct i40e_hw *hw, char *buf, size_t len)
+{
+ struct i40e_adminq_info *aq = &hw->aq;
+
+ snprintf(buf, len, "%u.%u", aq->api_maj_ver, aq->api_min_ver);
+}
+
+static void i40e_info_pba(struct i40e_hw *hw, char *buf, size_t len)
+{
+ buf[0] = '\0';
+ if (hw->pba_id)
+ strscpy(buf, hw->pba_id, len);
+}
+
+enum i40e_devlink_version_type {
+ I40E_DL_VERSION_FIXED,
+ I40E_DL_VERSION_RUNNING,
+};
+
+static int i40e_devlink_info_put(struct devlink_info_req *req,
+ enum i40e_devlink_version_type type,
+ const char *key, const char *value)
+{
+ if (!strlen(value))
+ return 0;
+
+ switch (type) {
+ case I40E_DL_VERSION_FIXED:
+ return devlink_info_version_fixed_put(req, key, value);
+ case I40E_DL_VERSION_RUNNING:
+ return devlink_info_version_running_put(req, key, value);
+ }
+ return 0;
+}
+
+static int i40e_devlink_info_get(struct devlink *dl,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct i40e_pf *pf = devlink_priv(dl);
+ struct i40e_hw *hw = &pf->hw;
+ char buf[32];
+ int err;
+
+ i40e_info_get_dsn(pf, buf, sizeof(buf));
+ err = devlink_info_serial_number_put(req, buf);
+ if (err)
+ return err;
+
+ i40e_info_fw_mgmt(hw, buf, sizeof(buf));
+ err = i40e_devlink_info_put(req, I40E_DL_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, buf);
+ if (err)
+ return err;
+
+ i40e_info_fw_mgmt_build(hw, buf, sizeof(buf));
+ err = i40e_devlink_info_put(req, I40E_DL_VERSION_RUNNING,
+ "fw.mgmt.build", buf);
+ if (err)
+ return err;
+
+ i40e_info_fw_api(hw, buf, sizeof(buf));
+ err = i40e_devlink_info_put(req, I40E_DL_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API,
+ buf);
+ if (err)
+ return err;
+
+ i40e_info_nvm_ver(hw, buf, sizeof(buf));
+ err = i40e_devlink_info_put(req, I40E_DL_VERSION_RUNNING,
+ "fw.psid.api", buf);
+ if (err)
+ return err;
+
+ i40e_info_eetrack(hw, buf, sizeof(buf));
+ err = i40e_devlink_info_put(req, I40E_DL_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID,
+ buf);
+ if (err)
+ return err;
+
+ i40e_info_civd_ver(hw, buf, sizeof(buf));
+ err = i40e_devlink_info_put(req, I40E_DL_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, buf);
+ if (err)
+ return err;
+
+ i40e_info_pba(hw, buf, sizeof(buf));
+ err = i40e_devlink_info_put(req, I40E_DL_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, buf);
+
+ return err;
+}
+
+static const struct devlink_ops i40e_devlink_ops = {
+ .info_get = i40e_devlink_info_get,
+};
+
+/**
+ * i40e_alloc_pf - Allocate devlink and return i40e_pf structure pointer
+ * @dev: the device to allocate for
+ *
+ * Allocate a devlink instance for this device and return the private
+ * area as the i40e_pf structure.
+ **/
+struct i40e_pf *i40e_alloc_pf(struct device *dev)
+{
+ struct devlink *devlink;
+
+ devlink = devlink_alloc(&i40e_devlink_ops, sizeof(struct i40e_pf), dev);
+ if (!devlink)
+ return NULL;
+
+ return devlink_priv(devlink);
+}
+
+/**
+ * i40e_free_pf - Free i40e_pf structure and associated devlink
+ * @pf: the PF structure
+ *
+ * Free i40e_pf structure and devlink allocated by devlink_alloc.
+ **/
+void i40e_free_pf(struct i40e_pf *pf)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+
+ devlink_free(devlink);
+}
+
+/**
+ * i40e_devlink_register - Register devlink interface for this PF
+ * @pf: the PF to register the devlink for.
+ *
+ * Register the devlink instance associated with this physical function.
+ **/
+void i40e_devlink_register(struct i40e_pf *pf)
+{
+ devlink_register(priv_to_devlink(pf));
+}
+
+/**
+ * i40e_devlink_unregister - Unregister devlink resources for this PF.
+ * @pf: the PF structure to cleanup
+ *
+ * Releases resources used by devlink and cleans up associated memory.
+ **/
+void i40e_devlink_unregister(struct i40e_pf *pf)
+{
+ devlink_unregister(priv_to_devlink(pf));
+}
+
+/**
+ * i40e_devlink_set_switch_id - Set unique switch id based on pci dsn
+ * @pf: the PF to create a devlink port for
+ * @ppid: struct with switch id information
+ */
+static void i40e_devlink_set_switch_id(struct i40e_pf *pf,
+ struct netdev_phys_item_id *ppid)
+{
+ u64 id = pci_get_dsn(pf->pdev);
+
+ ppid->id_len = sizeof(id);
+ put_unaligned_be64(id, &ppid->id);
+}
+
+/**
+ * i40e_devlink_create_port - Create a devlink port for this PF
+ * @pf: the PF to create a port for
+ *
+ * Create and register a devlink_port for this PF. Note that although each
+ * physical function is connected to a separate devlink instance, the port
+ * will still be numbered according to the physical function id.
+ *
+ * Return: zero on success or an error code on failure.
+ **/
+int i40e_devlink_create_port(struct i40e_pf *pf)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct devlink_port_attrs attrs = {};
+ struct device *dev = &pf->pdev->dev;
+ int err;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = pf->hw.pf_id;
+ i40e_devlink_set_switch_id(pf, &attrs.switch_id);
+ devlink_port_attrs_set(&pf->devlink_port, &attrs);
+ err = devlink_port_register(devlink, &pf->devlink_port, pf->hw.pf_id);
+ if (err) {
+ dev_err(dev, "devlink_port_register failed: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_devlink_destroy_port - Destroy the devlink_port for this PF
+ * @pf: the PF to cleanup
+ *
+ * Unregisters the devlink_port structure associated with this PF.
+ **/
+void i40e_devlink_destroy_port(struct i40e_pf *pf)
+{
+ devlink_port_type_clear(&pf->devlink_port);
+ devlink_port_unregister(&pf->devlink_port);
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devlink.h b/drivers/net/ethernet/intel/i40e/i40e_devlink.h
new file mode 100644
index 000000000000..469fb3d2ee25
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_devlink.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2023, Intel Corporation. */
+
+#ifndef _I40E_DEVLINK_H_
+#define _I40E_DEVLINK_H_
+
+#include <linux/device.h>
+
+struct i40e_pf;
+
+struct i40e_pf *i40e_alloc_pf(struct device *dev);
+void i40e_free_pf(struct i40e_pf *pf);
+void i40e_devlink_register(struct i40e_pf *pf);
+void i40e_devlink_unregister(struct i40e_pf *pf);
+int i40e_devlink_create_port(struct i40e_pf *pf);
+void i40e_devlink_destroy_port(struct i40e_pf *pf);
+
+#endif /* _I40E_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
index c3ce5f35211f..ece3a6b9a5c6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -4,7 +4,10 @@
#ifndef _I40E_DIAG_H_
#define _I40E_DIAG_H_
-#include "i40e_type.h"
+#include "i40e_adminq_cmd.h"
+
+/* forward-declare the HW struct for the compiler */
+struct i40e_hw;
enum i40e_lb_mode {
I40E_LB_MODE_NONE = 0x0,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index bd1321bf7e26..fd7163128c4d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -3,9 +3,10 @@
/* ethtool support for i40e */
-#include "i40e.h"
+#include "i40e_devids.h"
#include "i40e_diag.h"
#include "i40e_txrx_common.h"
+#include "i40e_virtchnl_pf.h"
/* ethtool statistics helpers */
@@ -245,6 +246,7 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = {
I40E_NETDEV_STAT(rx_errors),
I40E_NETDEV_STAT(tx_errors),
I40E_NETDEV_STAT(rx_dropped),
+ I40E_NETDEV_STAT(rx_missed_errors),
I40E_NETDEV_STAT(tx_dropped),
I40E_NETDEV_STAT(collisions),
I40E_NETDEV_STAT(rx_length_errors),
@@ -321,7 +323,7 @@ static const struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("port.rx_broadcast", stats.eth.rx_broadcast),
I40E_PF_STAT("port.tx_broadcast", stats.eth.tx_broadcast),
I40E_PF_STAT("port.tx_errors", stats.eth.tx_errors),
- I40E_PF_STAT("port.rx_dropped", stats.eth.rx_discards),
+ I40E_PF_STAT("port.rx_discards", stats.eth.rx_discards),
I40E_PF_STAT("port.tx_dropped_link_down", stats.tx_dropped_link_down),
I40E_PF_STAT("port.rx_crc_errors", stats.crc_errors),
I40E_PF_STAT("port.illegal_bytes", stats.illegal_bytes),
@@ -2004,8 +2006,8 @@ static void i40e_get_drvinfo(struct net_device *netdev,
struct i40e_pf *pf = vsi->back;
strscpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
- strscpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw),
- sizeof(drvinfo->fw_version));
+ i40e_nvm_version_str(&pf->hw, drvinfo->fw_version,
+ sizeof(drvinfo->fw_version));
strscpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
@@ -2512,11 +2514,13 @@ static void i40e_get_priv_flag_strings(struct net_device *netdev, u8 *data)
u8 *p = data;
for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++)
- ethtool_sprintf(&p, i40e_gstrings_priv_flags[i].flag_string);
+ ethtool_sprintf(&p, "%s",
+ i40e_gstrings_priv_flags[i].flag_string);
if (pf->hw.pf_id != 0)
return;
for (i = 0; i < I40E_GL_PRIV_FLAGS_STR_LEN; i++)
- ethtool_sprintf(&p, i40e_gl_gstrings_priv_flags[i].flag_string);
+ ethtool_sprintf(&p, "%s",
+ i40e_gl_gstrings_priv_flags[i].flag_string);
}
static void i40e_get_strings(struct net_device *netdev, u32 stringset,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 96ee63aca7a1..1742624ca62e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -1,10 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
-#include "i40e.h"
-#include "i40e_osdep.h"
-#include "i40e_register.h"
#include "i40e_alloc.h"
+#include "i40e_debug.h"
#include "i40e_hmc.h"
#include "i40e_type.h"
@@ -22,7 +20,6 @@ int i40e_add_sd_table_entry(struct i40e_hw *hw,
enum i40e_sd_entry_type type,
u64 direct_mode_sz)
{
- enum i40e_memory_type mem_type __attribute__((unused));
struct i40e_hmc_sd_entry *sd_entry;
bool dma_mem_alloc_done = false;
struct i40e_dma_mem mem;
@@ -43,16 +40,13 @@ int i40e_add_sd_table_entry(struct i40e_hw *hw,
sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
if (!sd_entry->valid) {
- if (I40E_SD_TYPE_PAGED == type) {
- mem_type = i40e_mem_pd;
+ if (type == I40E_SD_TYPE_PAGED)
alloc_len = I40E_HMC_PAGED_BP_SIZE;
- } else {
- mem_type = i40e_mem_bp_jumbo;
+ else
alloc_len = direct_mode_sz;
- }
/* allocate a 4K pd page or 2M backing page */
- ret_code = i40e_allocate_dma_mem(hw, &mem, mem_type, alloc_len,
+ ret_code = i40e_allocate_dma_mem(hw, &mem, alloc_len,
I40E_HMC_PD_BP_BUF_ALIGNMENT);
if (ret_code)
goto exit;
@@ -140,7 +134,7 @@ int i40e_add_pd_table_entry(struct i40e_hw *hw,
page = rsrc_pg;
} else {
/* allocate a 4K backing page */
- ret_code = i40e_allocate_dma_mem(hw, page, i40e_mem_bp,
+ ret_code = i40e_allocate_dma_mem(hw, page,
I40E_HMC_PAGED_BP_SIZE,
I40E_HMC_PD_BP_BUF_ALIGNMENT);
if (ret_code)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index 9960da07a573..480e3a883cc7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -4,6 +4,10 @@
#ifndef _I40E_HMC_H_
#define _I40E_HMC_H_
+#include "i40e_alloc.h"
+#include "i40e_io.h"
+#include "i40e_register.h"
+
#define I40E_HMC_MAX_BP_COUNT 512
/* forward-declare the HW struct for the compiler */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_io.h b/drivers/net/ethernet/intel/i40e/i40e_io.h
new file mode 100644
index 000000000000..2a2ed9a1d476
--- /dev/null
+++ b/drivers/net/ethernet/intel/i40e/i40e_io.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2023 Intel Corporation. */
+
+#ifndef _I40E_IO_H_
+#define _I40E_IO_H_
+
+/* get readq/writeq support for 32 bit kernels, use the low-first version */
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+#define rd32(a, reg) readl((a)->hw_addr + (reg))
+
+#define rd64(a, reg) readq((a)->hw_addr + (reg))
+#define i40e_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT)
+
+#endif /* _I40E_IO_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index 474365bf0648..beaaf5c309d5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -1,13 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
-#include "i40e.h"
-#include "i40e_osdep.h"
-#include "i40e_register.h"
-#include "i40e_type.h"
-#include "i40e_hmc.h"
+#include "i40e_alloc.h"
+#include "i40e_debug.h"
#include "i40e_lan_hmc.h"
-#include "i40e_prototype.h"
+#include "i40e_type.h"
/* lan specific interface functions */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
index 9f960404c2b3..305a276953b0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -4,6 +4,8 @@
#ifndef _I40E_LAN_HMC_H_
#define _I40E_LAN_HMC_H_
+#include "i40e_hmc.h"
+
/* forward-declare the HW struct for the compiler */
struct i40e_hw;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index de7fd43dc11c..3157d14d9b12 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1,19 +1,22 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2021 Intel Corporation. */
-#include <linux/etherdevice.h>
-#include <linux/of_net.h>
-#include <linux/pci.h>
-#include <linux/bpf.h>
#include <generated/utsrelease.h>
#include <linux/crash_dump.h>
+#include <linux/if_bridge.h>
+#include <linux/if_macvlan.h>
+#include <linux/module.h>
+#include <net/pkt_cls.h>
+#include <net/xdp_sock_drv.h>
/* Local includes */
#include "i40e.h"
+#include "i40e_devids.h"
#include "i40e_diag.h"
+#include "i40e_lan_hmc.h"
+#include "i40e_virtchnl_pf.h"
#include "i40e_xsk.h"
-#include <net/udp_tunnel.h>
-#include <net/xdp_sock_drv.h>
+
/* All i40e tracepoints are defined by the include below, which
* must be included exactly once across the whole kernel with
* CREATE_TRACE_POINTS defined
@@ -120,16 +123,27 @@ static void netdev_hw_addr_refcnt(struct i40e_mac_filter *f,
}
/**
- * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
+ * i40e_hw_to_dev - get device pointer from the hardware structure
+ * @hw: pointer to the device HW structure
+ **/
+struct device *i40e_hw_to_dev(struct i40e_hw *hw)
+{
+ struct i40e_pf *pf = i40e_hw_to_pf(hw);
+
+ return &pf->pdev->dev;
+}
+
+/**
+ * i40e_allocate_dma_mem - OS specific memory alloc for shared code
* @hw: pointer to the HW structure
* @mem: ptr to mem struct to fill out
* @size: size of memory requested
* @alignment: what to align the allocation to
**/
-int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
- u64 size, u32 alignment)
+int i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem,
+ u64 size, u32 alignment)
{
- struct i40e_pf *pf = (struct i40e_pf *)hw->back;
+ struct i40e_pf *pf = i40e_hw_to_pf(hw);
mem->size = ALIGN(size, alignment);
mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa,
@@ -141,13 +155,13 @@ int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
}
/**
- * i40e_free_dma_mem_d - OS specific memory free for shared code
+ * i40e_free_dma_mem - OS specific memory free for shared code
* @hw: pointer to the HW structure
* @mem: ptr to mem struct to free
**/
-int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
+int i40e_free_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem)
{
- struct i40e_pf *pf = (struct i40e_pf *)hw->back;
+ struct i40e_pf *pf = i40e_hw_to_pf(hw);
dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
mem->va = NULL;
@@ -158,13 +172,13 @@ int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
}
/**
- * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
+ * i40e_allocate_virt_mem - OS specific memory alloc for shared code
* @hw: pointer to the HW structure
* @mem: ptr to mem struct to fill out
* @size: size of memory requested
**/
-int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
- u32 size)
+int i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem,
+ u32 size)
{
mem->size = size;
mem->va = kzalloc(size, GFP_KERNEL);
@@ -176,11 +190,11 @@ int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
}
/**
- * i40e_free_virt_mem_d - OS specific memory free for shared code
+ * i40e_free_virt_mem - OS specific memory free for shared code
* @hw: pointer to the HW structure
* @mem: ptr to mem struct to free
**/
-int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
+int i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem)
{
/* it's ok to kfree a NULL pointer */
kfree(mem->va);
@@ -489,6 +503,7 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev,
stats->tx_dropped = vsi_stats->tx_dropped;
stats->rx_errors = vsi_stats->rx_errors;
stats->rx_dropped = vsi_stats->rx_dropped;
+ stats->rx_missed_errors = vsi_stats->rx_missed_errors;
stats->rx_crc_errors = vsi_stats->rx_crc_errors;
stats->rx_length_errors = vsi_stats->rx_length_errors;
}
@@ -680,17 +695,13 @@ i40e_stats_update_rx_discards(struct i40e_vsi *vsi, struct i40e_hw *hw,
struct i40e_eth_stats *stat_offset,
struct i40e_eth_stats *stat)
{
- u64 rx_rdpc, rx_rxerr;
-
i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx), offset_loaded,
- &stat_offset->rx_discards, &rx_rdpc);
+ &stat_offset->rx_discards, &stat->rx_discards);
i40e_stat_update64(hw,
I40E_GL_RXERR1H(i40e_compute_pci_to_hw_id(vsi, hw)),
I40E_GL_RXERR1L(i40e_compute_pci_to_hw_id(vsi, hw)),
offset_loaded, &stat_offset->rx_discards_other,
- &rx_rxerr);
-
- stat->rx_discards = rx_rdpc + rx_rxerr;
+ &stat->rx_discards_other);
}
/**
@@ -712,9 +723,6 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)
i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
vsi->stat_offsets_loaded,
&oes->tx_errors, &es->tx_errors);
- i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
- vsi->stat_offsets_loaded,
- &oes->rx_discards, &es->rx_discards);
i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
vsi->stat_offsets_loaded,
&oes->rx_unknown_protocol, &es->rx_unknown_protocol);
@@ -971,8 +979,10 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
ns->tx_errors = es->tx_errors;
ons->multicast = oes->rx_multicast;
ns->multicast = es->rx_multicast;
- ons->rx_dropped = oes->rx_discards;
- ns->rx_dropped = es->rx_discards;
+ ons->rx_dropped = oes->rx_discards_other;
+ ns->rx_dropped = es->rx_discards_other;
+ ons->rx_missed_errors = oes->rx_discards;
+ ns->rx_missed_errors = es->rx_discards;
ons->tx_dropped = oes->tx_discards;
ns->tx_dropped = es->tx_discards;
@@ -10788,7 +10798,9 @@ static void i40e_get_oem_version(struct i40e_hw *hw)
&gen_snap);
i40e_read_nvm_word(hw, block_offset + I40E_NVM_OEM_RELEASE_OFFSET,
&release);
- hw->nvm.oem_ver = (gen_snap << I40E_OEM_SNAP_SHIFT) | release;
+ hw->nvm.oem_ver =
+ FIELD_PREP(I40E_OEM_GEN_MASK | I40E_OEM_SNAP_MASK, gen_snap) |
+ FIELD_PREP(I40E_OEM_RELEASE_MASK, release);
hw->nvm.eetrack = I40E_OEM_EETRACK_ID;
}
@@ -14201,6 +14213,8 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
}
set_bit(__I40E_VSI_RELEASING, vsi->state);
uplink_seid = vsi->uplink_seid;
+ if (vsi->type == I40E_VSI_MAIN)
+ i40e_devlink_destroy_port(pf);
if (vsi->type != I40E_VSI_SRIOV) {
if (vsi->netdev_registered) {
vsi->netdev_registered = false;
@@ -14388,6 +14402,8 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
err_rings:
i40e_vsi_free_q_vectors(vsi);
+ if (vsi->type == I40E_VSI_MAIN)
+ i40e_devlink_destroy_port(pf);
if (vsi->netdev_registered) {
vsi->netdev_registered = false;
unregister_netdev(vsi->netdev);
@@ -14534,9 +14550,15 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
ret = i40e_netif_set_realnum_tx_rx_queues(vsi);
if (ret)
goto err_netdev;
+ if (vsi->type == I40E_VSI_MAIN) {
+ ret = i40e_devlink_create_port(pf);
+ if (ret)
+ goto err_netdev;
+ SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
+ }
ret = register_netdev(vsi->netdev);
if (ret)
- goto err_netdev;
+ goto err_dl_port;
vsi->netdev_registered = true;
netif_carrier_off(vsi->netdev);
#ifdef CONFIG_I40E_DCB
@@ -14579,6 +14601,9 @@ err_msix:
free_netdev(vsi->netdev);
vsi->netdev = NULL;
}
+err_dl_port:
+ if (vsi->type == I40E_VSI_MAIN)
+ i40e_devlink_destroy_port(pf);
err_netdev:
i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
err_vsi:
@@ -15609,7 +15634,7 @@ err_switch_setup:
iounmap(hw->hw_addr);
pci_release_mem_regions(pf->pdev);
pci_disable_device(pf->pdev);
- kfree(pf);
+ i40e_free_pf(pf);
return err;
}
@@ -15623,10 +15648,10 @@ err_switch_setup:
**/
static inline void i40e_set_subsystem_device_id(struct i40e_hw *hw)
{
- struct pci_dev *pdev = ((struct i40e_pf *)hw->back)->pdev;
+ struct i40e_pf *pf = i40e_hw_to_pf(hw);
- hw->subsystem_device_id = pdev->subsystem_device ?
- pdev->subsystem_device :
+ hw->subsystem_device_id = pf->pdev->subsystem_device ?
+ pf->pdev->subsystem_device :
(ushort)(rd32(hw, I40E_PFPCI_SUBSYSID) & USHRT_MAX);
}
@@ -15651,6 +15676,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct i40e_hw *hw;
static u16 pfs_found;
u16 wol_nvm_bits;
+ char nvm_ver[32];
u16 link_status;
#ifdef CONFIG_I40E_DCB
int status;
@@ -15686,7 +15712,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* the Admin Queue structures and then querying for the
* device's current profile information.
*/
- pf = kzalloc(sizeof(*pf), GFP_KERNEL);
+ pf = i40e_alloc_pf(&pdev->dev);
if (!pf) {
err = -ENOMEM;
goto err_pf_alloc;
@@ -15696,7 +15722,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
set_bit(__I40E_DOWN, pf->state);
hw = &pf->hw;
- hw->back = pf;
pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
I40E_MAX_CSR_SPACE);
@@ -15821,13 +15846,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pf_reset;
}
i40e_get_oem_version(hw);
+ i40e_get_pba_string(hw);
/* provide nvm, fw, api versions, vendor:device id, subsys vendor:device id */
+ i40e_nvm_version_str(hw, nvm_ver, sizeof(nvm_ver));
dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n",
hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
- hw->aq.api_maj_ver, hw->aq.api_min_ver,
- i40e_nvm_version_str(hw), hw->vendor_id, hw->device_id,
- hw->subsystem_vendor_id, hw->subsystem_device_id);
+ hw->aq.api_maj_ver, hw->aq.api_min_ver, nvm_ver,
+ hw->vendor_id, hw->device_id, hw->subsystem_vendor_id,
+ hw->subsystem_device_id);
if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw))
@@ -16214,6 +16241,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* print a string summarizing features */
i40e_print_features(pf);
+ i40e_devlink_register(pf);
+
return 0;
/* Unwind what we've done if something failed in the setup */
@@ -16234,7 +16263,7 @@ err_adminq_setup:
err_pf_reset:
iounmap(hw->hw_addr);
err_ioremap:
- kfree(pf);
+ i40e_free_pf(pf);
err_pf_alloc:
pci_release_mem_regions(pdev);
err_pci_reg:
@@ -16259,6 +16288,8 @@ static void i40e_remove(struct pci_dev *pdev)
int ret_code;
int i;
+ i40e_devlink_unregister(pf);
+
i40e_dbg_pf_exit(pf);
i40e_ptp_stop(pf);
@@ -16320,11 +16351,15 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_switch_branch_release(pf->veb[i]);
}
- /* Now we can shutdown the PF's VSI, just before we kill
+ /* Now we can shutdown the PF's VSIs, just before we kill
* adminq and hmc.
*/
- if (pf->vsi[pf->lan_vsi])
- i40e_vsi_release(pf->vsi[pf->lan_vsi]);
+ for (i = pf->num_alloc_vsi; i--;)
+ if (pf->vsi[i]) {
+ i40e_vsi_close(pf->vsi[i]);
+ i40e_vsi_release(pf->vsi[i]);
+ pf->vsi[i] = NULL;
+ }
i40e_cloud_filter_exit(pf);
@@ -16380,7 +16415,7 @@ unmap:
kfree(pf->vsi);
iounmap(hw->hw_addr);
- kfree(pf);
+ i40e_free_pf(pf);
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 07a46adeab38..77cdbfc19d47 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
+#include <linux/delay.h>
+#include "i40e_alloc.h"
#include "i40e_prototype.h"
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
deleted file mode 100644
index 2bd4de03dafa..000000000000
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_OSDEP_H_
-#define _I40E_OSDEP_H_
-
-#include <linux/types.h>
-#include <linux/if_ether.h>
-#include <linux/if_vlan.h>
-#include <linux/tcp.h>
-#include <linux/pci.h>
-#include <linux/highuid.h>
-
-/* get readq/writeq support for 32 bit kernels, use the low-first version */
-#include <linux/io-64-nonatomic-lo-hi.h>
-
-/* File to be the magic between shared code and
- * actual OS primitives
- */
-
-#define hw_dbg(hw, S, A...) \
-do { \
- dev_dbg(&((struct i40e_pf *)hw->back)->pdev->dev, S, ##A); \
-} while (0)
-
-#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
-#define rd32(a, reg) readl((a)->hw_addr + (reg))
-
-#define rd64(a, reg) readq((a)->hw_addr + (reg))
-#define i40e_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT)
-
-/* memory allocation tracking */
-struct i40e_dma_mem {
- void *va;
- dma_addr_t pa;
- u32 size;
-};
-
-#define i40e_allocate_dma_mem(h, m, unused, s, a) \
- i40e_allocate_dma_mem_d(h, m, s, a)
-#define i40e_free_dma_mem(h, m) i40e_free_dma_mem_d(h, m)
-
-struct i40e_virt_mem {
- void *va;
- u32 size;
-};
-
-#define i40e_allocate_virt_mem(h, m, s) i40e_allocate_virt_mem_d(h, m, s)
-#define i40e_free_virt_mem(h, m) i40e_free_virt_mem_d(h, m)
-
-#define i40e_debug(h, m, s, ...) \
-do { \
- if (((m) & (h)->debug_mask)) \
- pr_info("i40e %02x:%02x.%x " s, \
- (h)->bus.bus_id, (h)->bus.device, \
- (h)->bus.func, ##__VA_ARGS__); \
-} while (0)
-
-#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 3eeee224f1fb..001162042050 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -4,9 +4,10 @@
#ifndef _I40E_PROTOTYPE_H_
#define _I40E_PROTOTYPE_H_
-#include "i40e_type.h"
-#include "i40e_alloc.h"
+#include <linux/ethtool.h>
#include <linux/avf/virtchnl.h>
+#include "i40e_debug.h"
+#include "i40e_type.h"
/* Prototypes for shared code functions that are not in
* the standard function pointer structures. These are
@@ -340,8 +341,7 @@ i40e_aq_configure_partition_bw(struct i40e_hw *hw,
struct i40e_aqc_configure_partition_bw_data *bw_data,
struct i40e_asq_cmd_details *cmd_details);
int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
-int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
- u32 pba_num_size);
+void i40e_get_pba_string(struct i40e_hw *hw);
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
/* prototype for functions used for NVM access */
int i40e_init_nvm(struct i40e_hw *hw);
@@ -497,4 +497,8 @@ int
i40e_add_pinfo_to_list(struct i40e_hw *hw,
struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id);
+
+/* i40e_ddp */
+int i40e_ddp_flash(struct net_device *netdev, struct ethtool_flash *flash);
+
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 8a26811140b4..20b77398f060 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -1,9 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
-#include "i40e.h"
#include <linux/ptp_classify.h>
#include <linux/posix-clock.h>
+#include "i40e.h"
+#include "i40e_devids.h"
/* The XL710 timesync is very much like Intel's 82599 design when it comes to
* the fundamental clock design. However, the clock operations are much simpler
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 7339003aa17c..f408fcf23ce8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -4,6 +4,9 @@
#ifndef _I40E_REGISTER_H_
#define _I40E_REGISTER_H_
+/* I40E_MASK is a macro used on 32 bit registers */
+#define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
+
#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30
#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT)
#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */
@@ -202,7 +205,9 @@
#define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
#define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
#define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
+#define I40E_GLGEN_MSCA_OPCODE_MASK(_i) I40E_MASK(_i, I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_GLGEN_MSCA_STCODE_SHIFT 28
+#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_STCODE_SHIFT)
#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index b047c587629b..dd410b15000f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1,14 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
-#include <linux/prefetch.h>
#include <linux/bpf_trace.h>
+#include <linux/prefetch.h>
+#include <linux/sctp.h>
#include <net/mpls.h>
#include <net/xdp.h>
-#include "i40e.h"
-#include "i40e_trace.h"
-#include "i40e_prototype.h"
#include "i40e_txrx_common.h"
+#include "i40e_trace.h"
#include "i40e_xsk.h"
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
@@ -2405,7 +2404,7 @@ void i40e_update_rx_stats(struct i40e_ring *rx_ring,
void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
{
if (xdp_res & I40E_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_res & I40E_XDP_TX) {
struct i40e_ring *xdp_ring =
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 900b0d9ede9f..421fe5675584 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -5,6 +5,7 @@
#define _I40E_TXRX_H_
#include <net/xdp.h>
+#include "i40e_type.h"
/* Interrupt Throttling and Rate Limiting Goodies */
#define I40E_DEFAULT_IRQ_WORK 256
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
index 8c5118c8baaf..e26807fd2123 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
@@ -4,6 +4,8 @@
#ifndef I40E_TXRX_COMMON_
#define I40E_TXRX_COMMON_
+#include "i40e.h"
+
int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring);
void i40e_clean_programming_status(struct i40e_ring *rx_ring, u64 qword0_raw,
u64 qword1);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 232131bedc3e..aff6dc6afbe2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -4,15 +4,9 @@
#ifndef _I40E_TYPE_H_
#define _I40E_TYPE_H_
-#include "i40e_osdep.h"
-#include "i40e_register.h"
+#include <uapi/linux/if_ether.h>
#include "i40e_adminq.h"
#include "i40e_hmc.h"
-#include "i40e_lan_hmc.h"
-#include "i40e_devids.h"
-
-/* I40E_MASK is a macro used on 32 bit registers */
-#define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
#define I40E_MAX_VSI_QP 16
#define I40E_MAX_VF_VSI 4
@@ -43,48 +37,14 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
#define I40E_QTX_CTL_VM_QUEUE 0x1
#define I40E_QTX_CTL_PF_QUEUE 0x2
-/* debug masks - set these bits in hw->debug_mask to control output */
-enum i40e_debug_mask {
- I40E_DEBUG_INIT = 0x00000001,
- I40E_DEBUG_RELEASE = 0x00000002,
-
- I40E_DEBUG_LINK = 0x00000010,
- I40E_DEBUG_PHY = 0x00000020,
- I40E_DEBUG_HMC = 0x00000040,
- I40E_DEBUG_NVM = 0x00000080,
- I40E_DEBUG_LAN = 0x00000100,
- I40E_DEBUG_FLOW = 0x00000200,
- I40E_DEBUG_DCB = 0x00000400,
- I40E_DEBUG_DIAG = 0x00000800,
- I40E_DEBUG_FD = 0x00001000,
- I40E_DEBUG_PACKAGE = 0x00002000,
- I40E_DEBUG_IWARP = 0x00F00000,
- I40E_DEBUG_AQ_MESSAGE = 0x01000000,
- I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
- I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
- I40E_DEBUG_AQ_COMMAND = 0x06000000,
- I40E_DEBUG_AQ = 0x0F000000,
-
- I40E_DEBUG_USER = 0xF0000000,
-
- I40E_DEBUG_ALL = 0xFFFFFFFF
-};
-
-#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_MASK(1, \
- I40E_GLGEN_MSCA_STCODE_SHIFT)
-#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_MASK(1, \
- I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_MASK(2, \
- I40E_GLGEN_MSCA_OPCODE_SHIFT)
-
-#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_MASK(0, \
- I40E_GLGEN_MSCA_STCODE_SHIFT)
-#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_MASK(0, \
- I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_MASK(1, \
- I40E_GLGEN_MSCA_OPCODE_SHIFT)
-#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_MASK(3, \
- I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_MDIO_CLAUSE22_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK
+#define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK I40E_GLGEN_MSCA_OPCODE_MASK(1)
+#define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK I40E_GLGEN_MSCA_OPCODE_MASK(2)
+
+#define I40E_MDIO_CLAUSE45_STCODE_MASK I40E_GLGEN_MSCA_STCODE_MASK
+#define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK I40E_GLGEN_MSCA_OPCODE_MASK(0)
+#define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK I40E_GLGEN_MSCA_OPCODE_MASK(1)
+#define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK I40E_GLGEN_MSCA_OPCODE_MASK(3)
#define I40E_PHY_COM_REG_PAGE 0x1E
#define I40E_PHY_LED_LINK_MODE_MASK 0xF0
@@ -525,7 +485,6 @@ struct i40e_dcbx_config {
/* Port hardware description */
struct i40e_hw {
u8 __iomem *hw_addr;
- void *back;
/* subsystem structs */
struct i40e_phy_info phy;
@@ -534,6 +493,9 @@ struct i40e_hw {
struct i40e_nvm_info nvm;
struct i40e_fc_info fc;
+ /* PBA ID */
+ const char *pba_id;
+
/* pci info */
u16 device_id;
u16 vendor_id;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index d3d6415553ed..08d7edccfb8d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2,6 +2,8 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "i40e.h"
+#include "i40e_lan_hmc.h"
+#include "i40e_virtchnl_pf.h"
/*********************notification routines***********************/
@@ -4916,7 +4918,7 @@ int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
vf_stats->tx_bytes = stats->tx_bytes;
vf_stats->broadcast = stats->rx_broadcast;
vf_stats->multicast = stats->rx_multicast;
- vf_stats->rx_dropped = stats->rx_discards;
+ vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other;
vf_stats->tx_dropped = stats->tx_discards;
return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 895b8feb2567..2ee0f8a23248 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -4,7 +4,9 @@
#ifndef _I40E_VIRTCHNL_PF_H_
#define _I40E_VIRTCHNL_PF_H_
-#include "i40e.h"
+#include <linux/avf/virtchnl.h>
+#include <linux/netdevice.h>
+#include "i40e_type.h"
#define I40E_MAX_VLANID 4095
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 7d991e4d9b89..e99fa854d17f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -2,11 +2,7 @@
/* Copyright(c) 2018 Intel Corporation. */
#include <linux/bpf_trace.h>
-#include <linux/stringify.h>
#include <net/xdp_sock_drv.h>
-#include <net/xdp.h>
-
-#include "i40e.h"
#include "i40e_txrx_common.h"
#include "i40e_xsk.h"
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index 821df248f8be..ef156fad52f2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -4,6 +4,8 @@
#ifndef _I40E_XSK_H_
#define _I40E_XSK_H_
+#include <linux/types.h>
+
/* This value should match the pragma in the loop_unrolled_for
* macro. Why 4? It is strictly empirical. It seems to be a good
* compromise between the advantage of having simultaneous outstanding
@@ -20,7 +22,9 @@
#define loop_unrolled_for for
#endif
+struct i40e_ring;
struct i40e_vsi;
+struct net_device;
struct xsk_buff_pool;
int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair);
diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile
index 9c3e45c54d01..2d154a4e2fd7 100644
--- a/drivers/net/ethernet/intel/iavf/Makefile
+++ b/drivers/net/ethernet/intel/iavf/Makefile
@@ -13,4 +13,4 @@ obj-$(CONFIG_IAVF) += iavf.o
iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \
iavf_adv_rss.o \
- iavf_txrx.o iavf_common.o iavf_adminq.o iavf_client.o
+ iavf_txrx.o iavf_common.o iavf_adminq.o
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index e110ba346185..e7ab89dc883a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -63,7 +63,6 @@ struct iavf_vsi {
DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__);
int base_vector;
u16 qs_handle;
- void *priv; /* client driver data reference. */
};
/* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -256,7 +255,6 @@ struct iavf_adapter {
struct work_struct reset_task;
struct work_struct adminq_task;
struct work_struct finish_config;
- struct delayed_work client_task;
wait_queue_head_t down_waitqueue;
wait_queue_head_t reset_waitqueue;
wait_queue_head_t vc_waitqueue;
@@ -265,7 +263,6 @@ struct iavf_adapter {
int num_vlan_filters;
struct list_head mac_filter_list;
struct mutex crit_lock;
- struct mutex client_lock;
/* Lock to protect accesses to MAC and VLAN lists */
spinlock_t mac_vlan_list_lock;
char misc_vector_name[IFNAMSIZ + 9];
@@ -282,10 +279,6 @@ struct iavf_adapter {
u64 hw_csum_rx_error;
u32 rx_desc_count;
int num_msix_vectors;
- int num_rdma_msix;
- int rdma_base_vector;
- u32 client_pending;
- struct iavf_client_instance *cinst;
struct msix_entry *msix_entries;
u32 flags;
@@ -294,12 +287,6 @@ struct iavf_adapter {
#define IAVF_FLAG_RESET_PENDING BIT(4)
#define IAVF_FLAG_RESET_NEEDED BIT(5)
#define IAVF_FLAG_WB_ON_ITR_CAPABLE BIT(6)
-#define IAVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(9)
-#define IAVF_FLAG_CLIENT_NEEDS_OPEN BIT(10)
-#define IAVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11)
-#define IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(12)
-#define IAVF_FLAG_PROMISC_ON BIT(13)
-#define IAVF_FLAG_ALLMULTI_ON BIT(14)
#define IAVF_FLAG_LEGACY_RX BIT(15)
#define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16)
#define IAVF_FLAG_QUEUES_DISABLED BIT(17)
@@ -325,10 +312,7 @@ struct iavf_adapter {
#define IAVF_FLAG_AQ_SET_HENA BIT_ULL(12)
#define IAVF_FLAG_AQ_SET_RSS_KEY BIT_ULL(13)
#define IAVF_FLAG_AQ_SET_RSS_LUT BIT_ULL(14)
-#define IAVF_FLAG_AQ_REQUEST_PROMISC BIT_ULL(15)
-#define IAVF_FLAG_AQ_RELEASE_PROMISC BIT_ULL(16)
-#define IAVF_FLAG_AQ_REQUEST_ALLMULTI BIT_ULL(17)
-#define IAVF_FLAG_AQ_RELEASE_ALLMULTI BIT_ULL(18)
+#define IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE BIT_ULL(15)
#define IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT_ULL(19)
#define IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT_ULL(20)
#define IAVF_FLAG_AQ_ENABLE_CHANNELS BIT_ULL(21)
@@ -365,6 +349,12 @@ struct iavf_adapter {
(IAVF_EXTENDED_CAP_SEND_VLAN_V2 | \
IAVF_EXTENDED_CAP_RECV_VLAN_V2)
+ /* Lock to prevent possible clobbering of
+ * current_netdev_promisc_flags
+ */
+ spinlock_t current_netdev_promisc_flags_lock;
+ netdev_features_t current_netdev_promisc_flags;
+
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
@@ -376,7 +366,6 @@ struct iavf_adapter {
unsigned long crit_section;
struct delayed_work watchdog_task;
- bool netdev_registered;
bool link_up;
enum virtchnl_link_speed link_speed;
/* This is only populated if the VIRTCHNL_VF_CAP_ADV_LINK_SPEED is set
@@ -388,11 +377,6 @@ struct iavf_adapter {
u32 link_speed_mbps;
enum virtchnl_ops current_op;
-#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \
- (_a)->vf_res->vf_cap_flags & \
- VIRTCHNL_VF_OFFLOAD_RDMA : \
- 0)
-#define CLIENT_ENABLED(_a) ((_a)->cinst)
/* RSS by the PF should be preferred over RSS via other methods. */
#define RSS_PF(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_RSS_PF)
@@ -405,6 +389,8 @@ struct iavf_adapter {
VIRTCHNL_VF_OFFLOAD_VLAN)
#define VLAN_V2_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_VLAN_V2)
+#define CRC_OFFLOAD_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
+ VIRTCHNL_VF_OFFLOAD_CRC)
#define VLAN_V2_FILTERING_ALLOWED(_a) \
(VLAN_V2_ALLOWED((_a)) && \
((_a)->vlan_v2_caps.filtering.filtering_support.outer || \
@@ -458,12 +444,6 @@ struct iavf_adapter {
/* Ethtool Private Flags */
-/* lan device, used by client interface */
-struct iavf_device {
- struct list_head list;
- struct iavf_adapter *vf;
-};
-
/* needed by iavf_ethtool.c */
extern char iavf_driver_name[];
@@ -551,7 +531,8 @@ void iavf_add_ether_addrs(struct iavf_adapter *adapter);
void iavf_del_ether_addrs(struct iavf_adapter *adapter);
void iavf_add_vlans(struct iavf_adapter *adapter);
void iavf_del_vlans(struct iavf_adapter *adapter);
-void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags);
+void iavf_set_promiscuous(struct iavf_adapter *adapter);
+bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter);
void iavf_request_stats(struct iavf_adapter *adapter);
int iavf_request_reset(struct iavf_adapter *adapter);
void iavf_get_hena(struct iavf_adapter *adapter);
@@ -566,11 +547,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
int iavf_config_rss(struct iavf_adapter *adapter);
int iavf_lan_add_device(struct iavf_adapter *adapter);
int iavf_lan_del_device(struct iavf_adapter *adapter);
-void iavf_client_subtask(struct iavf_adapter *adapter);
-void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len);
-void iavf_notify_client_l2_params(struct iavf_vsi *vsi);
-void iavf_notify_client_open(struct iavf_vsi *vsi);
-void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset);
void iavf_enable_channels(struct iavf_adapter *adapter);
void iavf_disable_channels(struct iavf_adapter *adapter);
void iavf_add_cloud_filter(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.c b/drivers/net/ethernet/intel/iavf/iavf_client.c
deleted file mode 100644
index e6051b6355aa..000000000000
--- a/drivers/net/ethernet/intel/iavf/iavf_client.c
+++ /dev/null
@@ -1,578 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#include <linux/list.h>
-#include <linux/errno.h>
-
-#include "iavf.h"
-#include "iavf_prototype.h"
-#include "iavf_client.h"
-
-static
-const char iavf_client_interface_version_str[] = IAVF_CLIENT_VERSION_STR;
-static struct iavf_client *vf_registered_client;
-static LIST_HEAD(iavf_devices);
-static DEFINE_MUTEX(iavf_device_mutex);
-
-static u32 iavf_client_virtchnl_send(struct iavf_info *ldev,
- struct iavf_client *client,
- u8 *msg, u16 len);
-
-static int iavf_client_setup_qvlist(struct iavf_info *ldev,
- struct iavf_client *client,
- struct iavf_qvlist_info *qvlist_info);
-
-static struct iavf_ops iavf_lan_ops = {
- .virtchnl_send = iavf_client_virtchnl_send,
- .setup_qvlist = iavf_client_setup_qvlist,
-};
-
-/**
- * iavf_client_get_params - retrieve relevant client parameters
- * @vsi: VSI with parameters
- * @params: client param struct
- **/
-static
-void iavf_client_get_params(struct iavf_vsi *vsi, struct iavf_params *params)
-{
- int i;
-
- memset(params, 0, sizeof(struct iavf_params));
- params->mtu = vsi->netdev->mtu;
- params->link_up = vsi->back->link_up;
-
- for (i = 0; i < IAVF_MAX_USER_PRIORITY; i++) {
- params->qos.prio_qos[i].tc = 0;
- params->qos.prio_qos[i].qs_handle = vsi->qs_handle;
- }
-}
-
-/**
- * iavf_notify_client_message - call the client message receive callback
- * @vsi: the VSI associated with this client
- * @msg: message buffer
- * @len: length of message
- *
- * If there is a client to this VSI, call the client
- **/
-void iavf_notify_client_message(struct iavf_vsi *vsi, u8 *msg, u16 len)
-{
- struct iavf_client_instance *cinst;
-
- if (!vsi)
- return;
-
- cinst = vsi->back->cinst;
- if (!cinst || !cinst->client || !cinst->client->ops ||
- !cinst->client->ops->virtchnl_receive) {
- dev_dbg(&vsi->back->pdev->dev,
- "Cannot locate client instance virtchnl_receive function\n");
- return;
- }
- cinst->client->ops->virtchnl_receive(&cinst->lan_info, cinst->client,
- msg, len);
-}
-
-/**
- * iavf_notify_client_l2_params - call the client notify callback
- * @vsi: the VSI with l2 param changes
- *
- * If there is a client to this VSI, call the client
- **/
-void iavf_notify_client_l2_params(struct iavf_vsi *vsi)
-{
- struct iavf_client_instance *cinst;
- struct iavf_params params;
-
- if (!vsi)
- return;
-
- cinst = vsi->back->cinst;
-
- if (!cinst || !cinst->client || !cinst->client->ops ||
- !cinst->client->ops->l2_param_change) {
- dev_dbg(&vsi->back->pdev->dev,
- "Cannot locate client instance l2_param_change function\n");
- return;
- }
- iavf_client_get_params(vsi, &params);
- cinst->lan_info.params = params;
- cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client,
- &params);
-}
-
-/**
- * iavf_notify_client_open - call the client open callback
- * @vsi: the VSI with netdev opened
- *
- * If there is a client to this netdev, call the client with open
- **/
-void iavf_notify_client_open(struct iavf_vsi *vsi)
-{
- struct iavf_adapter *adapter = vsi->back;
- struct iavf_client_instance *cinst = adapter->cinst;
- int ret;
-
- if (!cinst || !cinst->client || !cinst->client->ops ||
- !cinst->client->ops->open) {
- dev_dbg(&vsi->back->pdev->dev,
- "Cannot locate client instance open function\n");
- return;
- }
- if (!(test_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state))) {
- ret = cinst->client->ops->open(&cinst->lan_info, cinst->client);
- if (!ret)
- set_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state);
- }
-}
-
-/**
- * iavf_client_release_qvlist - send a message to the PF to release rdma qv map
- * @ldev: pointer to L2 context.
- *
- * Return 0 on success or < 0 on error
- **/
-static int iavf_client_release_qvlist(struct iavf_info *ldev)
-{
- struct iavf_adapter *adapter = ldev->vf;
- enum iavf_status err;
-
- if (adapter->aq_required)
- return -EAGAIN;
-
- err = iavf_aq_send_msg_to_pf(&adapter->hw,
- VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP,
- IAVF_SUCCESS, NULL, 0, NULL);
-
- if (err)
- dev_err(&adapter->pdev->dev,
- "Unable to send RDMA vector release message to PF, error %d, aq status %d\n",
- err, adapter->hw.aq.asq_last_status);
-
- return err;
-}
-
-/**
- * iavf_notify_client_close - call the client close callback
- * @vsi: the VSI with netdev closed
- * @reset: true when close called due to reset pending
- *
- * If there is a client to this netdev, call the client with close
- **/
-void iavf_notify_client_close(struct iavf_vsi *vsi, bool reset)
-{
- struct iavf_adapter *adapter = vsi->back;
- struct iavf_client_instance *cinst = adapter->cinst;
-
- if (!cinst || !cinst->client || !cinst->client->ops ||
- !cinst->client->ops->close) {
- dev_dbg(&vsi->back->pdev->dev,
- "Cannot locate client instance close function\n");
- return;
- }
- cinst->client->ops->close(&cinst->lan_info, cinst->client, reset);
- iavf_client_release_qvlist(&cinst->lan_info);
- clear_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state);
-}
-
-/**
- * iavf_client_add_instance - add a client instance to the instance list
- * @adapter: pointer to the board struct
- *
- * Returns cinst ptr on success, NULL on failure
- **/
-static struct iavf_client_instance *
-iavf_client_add_instance(struct iavf_adapter *adapter)
-{
- struct iavf_client_instance *cinst = NULL;
- struct iavf_vsi *vsi = &adapter->vsi;
- struct netdev_hw_addr *mac = NULL;
- struct iavf_params params;
-
- if (!vf_registered_client)
- goto out;
-
- if (adapter->cinst) {
- cinst = adapter->cinst;
- goto out;
- }
-
- cinst = kzalloc(sizeof(*cinst), GFP_KERNEL);
- if (!cinst)
- goto out;
-
- cinst->lan_info.vf = (void *)adapter;
- cinst->lan_info.netdev = vsi->netdev;
- cinst->lan_info.pcidev = adapter->pdev;
- cinst->lan_info.fid = 0;
- cinst->lan_info.ftype = IAVF_CLIENT_FTYPE_VF;
- cinst->lan_info.hw_addr = adapter->hw.hw_addr;
- cinst->lan_info.ops = &iavf_lan_ops;
- cinst->lan_info.version.major = IAVF_CLIENT_VERSION_MAJOR;
- cinst->lan_info.version.minor = IAVF_CLIENT_VERSION_MINOR;
- cinst->lan_info.version.build = IAVF_CLIENT_VERSION_BUILD;
- iavf_client_get_params(vsi, &params);
- cinst->lan_info.params = params;
- set_bit(__IAVF_CLIENT_INSTANCE_NONE, &cinst->state);
-
- cinst->lan_info.msix_count = adapter->num_rdma_msix;
- cinst->lan_info.msix_entries =
- &adapter->msix_entries[adapter->rdma_base_vector];
-
- mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list,
- struct netdev_hw_addr, list);
- if (mac)
- ether_addr_copy(cinst->lan_info.lanmac, mac->addr);
- else
- dev_err(&adapter->pdev->dev, "MAC address list is empty!\n");
-
- cinst->client = vf_registered_client;
- adapter->cinst = cinst;
-out:
- return cinst;
-}
-
-/**
- * iavf_client_del_instance - removes a client instance from the list
- * @adapter: pointer to the board struct
- *
- **/
-static
-void iavf_client_del_instance(struct iavf_adapter *adapter)
-{
- kfree(adapter->cinst);
- adapter->cinst = NULL;
-}
-
-/**
- * iavf_client_subtask - client maintenance work
- * @adapter: board private structure
- **/
-void iavf_client_subtask(struct iavf_adapter *adapter)
-{
- struct iavf_client *client = vf_registered_client;
- struct iavf_client_instance *cinst;
- int ret = 0;
-
- if (adapter->state < __IAVF_DOWN)
- return;
-
- /* first check client is registered */
- if (!client)
- return;
-
- /* Add the client instance to the instance list */
- cinst = iavf_client_add_instance(adapter);
- if (!cinst)
- return;
-
- dev_info(&adapter->pdev->dev, "Added instance of Client %s\n",
- client->name);
-
- if (!test_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state)) {
- /* Send an Open request to the client */
-
- if (client->ops && client->ops->open)
- ret = client->ops->open(&cinst->lan_info, client);
- if (!ret)
- set_bit(__IAVF_CLIENT_INSTANCE_OPENED,
- &cinst->state);
- else
- /* remove client instance */
- iavf_client_del_instance(adapter);
- }
-}
-
-/**
- * iavf_lan_add_device - add a lan device struct to the list of lan devices
- * @adapter: pointer to the board struct
- *
- * Returns 0 on success or none 0 on error
- **/
-int iavf_lan_add_device(struct iavf_adapter *adapter)
-{
- struct iavf_device *ldev;
- int ret = 0;
-
- mutex_lock(&iavf_device_mutex);
- list_for_each_entry(ldev, &iavf_devices, list) {
- if (ldev->vf == adapter) {
- ret = -EEXIST;
- goto out;
- }
- }
- ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
- if (!ldev) {
- ret = -ENOMEM;
- goto out;
- }
- ldev->vf = adapter;
- INIT_LIST_HEAD(&ldev->list);
- list_add(&ldev->list, &iavf_devices);
- dev_info(&adapter->pdev->dev, "Added LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
- adapter->hw.bus.bus_id, adapter->hw.bus.device,
- adapter->hw.bus.func);
-
- /* Since in some cases register may have happened before a device gets
- * added, we can schedule a subtask to go initiate the clients.
- */
- adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
-
-out:
- mutex_unlock(&iavf_device_mutex);
- return ret;
-}
-
-/**
- * iavf_lan_del_device - removes a lan device from the device list
- * @adapter: pointer to the board struct
- *
- * Returns 0 on success or non-0 on error
- **/
-int iavf_lan_del_device(struct iavf_adapter *adapter)
-{
- struct iavf_device *ldev, *tmp;
- int ret = -ENODEV;
-
- mutex_lock(&iavf_device_mutex);
- list_for_each_entry_safe(ldev, tmp, &iavf_devices, list) {
- if (ldev->vf == adapter) {
- dev_info(&adapter->pdev->dev,
- "Deleted LAN device bus=0x%02x dev=0x%02x func=0x%02x\n",
- adapter->hw.bus.bus_id, adapter->hw.bus.device,
- adapter->hw.bus.func);
- list_del(&ldev->list);
- kfree(ldev);
- ret = 0;
- break;
- }
- }
-
- mutex_unlock(&iavf_device_mutex);
- return ret;
-}
-
-/**
- * iavf_client_release - release client specific resources
- * @client: pointer to the registered client
- *
- **/
-static void iavf_client_release(struct iavf_client *client)
-{
- struct iavf_client_instance *cinst;
- struct iavf_device *ldev;
- struct iavf_adapter *adapter;
-
- mutex_lock(&iavf_device_mutex);
- list_for_each_entry(ldev, &iavf_devices, list) {
- adapter = ldev->vf;
- cinst = adapter->cinst;
- if (!cinst)
- continue;
- if (test_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state)) {
- if (client->ops && client->ops->close)
- client->ops->close(&cinst->lan_info, client,
- false);
- iavf_client_release_qvlist(&cinst->lan_info);
- clear_bit(__IAVF_CLIENT_INSTANCE_OPENED, &cinst->state);
-
- dev_warn(&adapter->pdev->dev,
- "Client %s instance closed\n", client->name);
- }
- /* delete the client instance */
- iavf_client_del_instance(adapter);
- dev_info(&adapter->pdev->dev, "Deleted client instance of Client %s\n",
- client->name);
- }
- mutex_unlock(&iavf_device_mutex);
-}
-
-/**
- * iavf_client_prepare - prepare client specific resources
- * @client: pointer to the registered client
- *
- **/
-static void iavf_client_prepare(struct iavf_client *client)
-{
- struct iavf_device *ldev;
- struct iavf_adapter *adapter;
-
- mutex_lock(&iavf_device_mutex);
- list_for_each_entry(ldev, &iavf_devices, list) {
- adapter = ldev->vf;
- /* Signal the watchdog to service the client */
- adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
- }
- mutex_unlock(&iavf_device_mutex);
-}
-
-/**
- * iavf_client_virtchnl_send - send a message to the PF instance
- * @ldev: pointer to L2 context.
- * @client: Client pointer.
- * @msg: pointer to message buffer
- * @len: message length
- *
- * Return 0 on success or < 0 on error
- **/
-static u32 iavf_client_virtchnl_send(struct iavf_info *ldev,
- struct iavf_client *client,
- u8 *msg, u16 len)
-{
- struct iavf_adapter *adapter = ldev->vf;
- enum iavf_status err;
-
- if (adapter->aq_required)
- return -EAGAIN;
-
- err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_RDMA,
- IAVF_SUCCESS, msg, len, NULL);
- if (err)
- dev_err(&adapter->pdev->dev, "Unable to send RDMA message to PF, error %d, aq status %d\n",
- err, adapter->hw.aq.asq_last_status);
-
- return err;
-}
-
-/**
- * iavf_client_setup_qvlist - send a message to the PF to setup rdma qv map
- * @ldev: pointer to L2 context.
- * @client: Client pointer.
- * @qvlist_info: queue and vector list
- *
- * Return 0 on success or < 0 on error
- **/
-static int iavf_client_setup_qvlist(struct iavf_info *ldev,
- struct iavf_client *client,
- struct iavf_qvlist_info *qvlist_info)
-{
- struct virtchnl_rdma_qvlist_info *v_qvlist_info;
- struct iavf_adapter *adapter = ldev->vf;
- struct iavf_qv_info *qv_info;
- enum iavf_status err;
- u32 v_idx, i;
- size_t msg_size;
-
- if (adapter->aq_required)
- return -EAGAIN;
-
- /* A quick check on whether the vectors belong to the client */
- for (i = 0; i < qvlist_info->num_vectors; i++) {
- qv_info = &qvlist_info->qv_info[i];
- if (!qv_info)
- continue;
- v_idx = qv_info->v_idx;
- if ((v_idx >=
- (adapter->rdma_base_vector + adapter->num_rdma_msix)) ||
- (v_idx < adapter->rdma_base_vector))
- return -EINVAL;
- }
-
- v_qvlist_info = (struct virtchnl_rdma_qvlist_info *)qvlist_info;
- msg_size = virtchnl_struct_size(v_qvlist_info, qv_info,
- v_qvlist_info->num_vectors);
-
- adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP);
- err = iavf_aq_send_msg_to_pf(&adapter->hw,
- VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP, IAVF_SUCCESS,
- (u8 *)v_qvlist_info, msg_size, NULL);
-
- if (err) {
- dev_err(&adapter->pdev->dev,
- "Unable to send RDMA vector config message to PF, error %d, aq status %d\n",
- err, adapter->hw.aq.asq_last_status);
- goto out;
- }
-
- err = -EBUSY;
- for (i = 0; i < 5; i++) {
- msleep(100);
- if (!(adapter->client_pending &
- BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP))) {
- err = 0;
- break;
- }
- }
-out:
- return err;
-}
-
-/**
- * iavf_register_client - Register a iavf client driver with the L2 driver
- * @client: pointer to the iavf_client struct
- *
- * Returns 0 on success or non-0 on error
- **/
-int iavf_register_client(struct iavf_client *client)
-{
- int ret = 0;
-
- if (!client) {
- ret = -EIO;
- goto out;
- }
-
- if (strlen(client->name) == 0) {
- pr_info("iavf: Failed to register client with no name\n");
- ret = -EIO;
- goto out;
- }
-
- if (vf_registered_client) {
- pr_info("iavf: Client %s has already been registered!\n",
- client->name);
- ret = -EEXIST;
- goto out;
- }
-
- if ((client->version.major != IAVF_CLIENT_VERSION_MAJOR) ||
- (client->version.minor != IAVF_CLIENT_VERSION_MINOR)) {
- pr_info("iavf: Failed to register client %s due to mismatched client interface version\n",
- client->name);
- pr_info("Client is using version: %02d.%02d.%02d while LAN driver supports %s\n",
- client->version.major, client->version.minor,
- client->version.build,
- iavf_client_interface_version_str);
- ret = -EIO;
- goto out;
- }
-
- vf_registered_client = client;
-
- iavf_client_prepare(client);
-
- pr_info("iavf: Registered client %s with return code %d\n",
- client->name, ret);
-out:
- return ret;
-}
-EXPORT_SYMBOL(iavf_register_client);
-
-/**
- * iavf_unregister_client - Unregister a iavf client driver with the L2 driver
- * @client: pointer to the iavf_client struct
- *
- * Returns 0 on success or non-0 on error
- **/
-int iavf_unregister_client(struct iavf_client *client)
-{
- int ret = 0;
-
- /* When a unregister request comes through we would have to send
- * a close for each of the client instances that were opened.
- * client_release function is called to handle this.
- */
- iavf_client_release(client);
-
- if (vf_registered_client != client) {
- pr_info("iavf: Client %s has not been registered\n",
- client->name);
- ret = -ENODEV;
- goto out;
- }
- vf_registered_client = NULL;
- pr_info("iavf: Unregistered client %s\n", client->name);
-out:
- return ret;
-}
-EXPORT_SYMBOL(iavf_unregister_client);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.h b/drivers/net/ethernet/intel/iavf/iavf_client.h
deleted file mode 100644
index 500269bc0f5b..000000000000
--- a/drivers/net/ethernet/intel/iavf/iavf_client.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _IAVF_CLIENT_H_
-#define _IAVF_CLIENT_H_
-
-#define IAVF_CLIENT_STR_LENGTH 10
-
-/* Client interface version should be updated anytime there is a change in the
- * existing APIs or data structures.
- */
-#define IAVF_CLIENT_VERSION_MAJOR 0
-#define IAVF_CLIENT_VERSION_MINOR 01
-#define IAVF_CLIENT_VERSION_BUILD 00
-#define IAVF_CLIENT_VERSION_STR \
- __stringify(IAVF_CLIENT_VERSION_MAJOR) "." \
- __stringify(IAVF_CLIENT_VERSION_MINOR) "." \
- __stringify(IAVF_CLIENT_VERSION_BUILD)
-
-struct iavf_client_version {
- u8 major;
- u8 minor;
- u8 build;
- u8 rsvd;
-};
-
-enum iavf_client_state {
- __IAVF_CLIENT_NULL,
- __IAVF_CLIENT_REGISTERED
-};
-
-enum iavf_client_instance_state {
- __IAVF_CLIENT_INSTANCE_NONE,
- __IAVF_CLIENT_INSTANCE_OPENED,
-};
-
-struct iavf_ops;
-struct iavf_client;
-
-/* HW does not define a type value for AEQ; only for RX/TX and CEQ.
- * In order for us to keep the interface simple, SW will define a
- * unique type value for AEQ.
- */
-#define IAVF_QUEUE_TYPE_PE_AEQ 0x80
-#define IAVF_QUEUE_INVALID_IDX 0xFFFF
-
-struct iavf_qv_info {
- u32 v_idx; /* msix_vector */
- u16 ceq_idx;
- u16 aeq_idx;
- u8 itr_idx;
-};
-
-struct iavf_qvlist_info {
- u32 num_vectors;
- struct iavf_qv_info qv_info[];
-};
-
-#define IAVF_CLIENT_MSIX_ALL 0xFFFFFFFF
-
-/* set of LAN parameters useful for clients managed by LAN */
-
-/* Struct to hold per priority info */
-struct iavf_prio_qos_params {
- u16 qs_handle; /* qs handle for prio */
- u8 tc; /* TC mapped to prio */
- u8 reserved;
-};
-
-#define IAVF_CLIENT_MAX_USER_PRIORITY 8
-/* Struct to hold Client QoS */
-struct iavf_qos_params {
- struct iavf_prio_qos_params prio_qos[IAVF_CLIENT_MAX_USER_PRIORITY];
-};
-
-struct iavf_params {
- struct iavf_qos_params qos;
- u16 mtu;
- u16 link_up; /* boolean */
-};
-
-/* Structure to hold LAN device info for a client device */
-struct iavf_info {
- struct iavf_client_version version;
- u8 lanmac[6];
- struct net_device *netdev;
- struct pci_dev *pcidev;
- u8 __iomem *hw_addr;
- u8 fid; /* function id, PF id or VF id */
-#define IAVF_CLIENT_FTYPE_PF 0
-#define IAVF_CLIENT_FTYPE_VF 1
- u8 ftype; /* function type, PF or VF */
- void *vf; /* cast to iavf_adapter */
-
- /* All L2 params that could change during the life span of the device
- * and needs to be communicated to the client when they change
- */
- struct iavf_params params;
- struct iavf_ops *ops;
-
- u16 msix_count; /* number of msix vectors*/
- /* Array down below will be dynamically allocated based on msix_count */
- struct msix_entry *msix_entries;
- u16 itr_index; /* Which ITR index the PE driver is suppose to use */
-};
-
-struct iavf_ops {
- /* setup_q_vector_list enables queues with a particular vector */
- int (*setup_qvlist)(struct iavf_info *ldev, struct iavf_client *client,
- struct iavf_qvlist_info *qv_info);
-
- u32 (*virtchnl_send)(struct iavf_info *ldev, struct iavf_client *client,
- u8 *msg, u16 len);
-
- /* If the PE Engine is unresponsive, RDMA driver can request a reset.*/
- void (*request_reset)(struct iavf_info *ldev,
- struct iavf_client *client);
-};
-
-struct iavf_client_ops {
- /* Should be called from register_client() or whenever the driver is
- * ready to create a specific client instance.
- */
- int (*open)(struct iavf_info *ldev, struct iavf_client *client);
-
- /* Should be closed when netdev is unavailable or when unregister
- * call comes in. If the close happens due to a reset, set the reset
- * bit to true.
- */
- void (*close)(struct iavf_info *ldev, struct iavf_client *client,
- bool reset);
-
- /* called when some l2 managed parameters changes - mss */
- void (*l2_param_change)(struct iavf_info *ldev,
- struct iavf_client *client,
- struct iavf_params *params);
-
- /* called when a message is received from the PF */
- int (*virtchnl_receive)(struct iavf_info *ldev,
- struct iavf_client *client,
- u8 *msg, u16 len);
-};
-
-/* Client device */
-struct iavf_client_instance {
- struct list_head list;
- struct iavf_info lan_info;
- struct iavf_client *client;
- unsigned long state;
-};
-
-struct iavf_client {
- struct list_head list; /* list of registered clients */
- char name[IAVF_CLIENT_STR_LENGTH];
- struct iavf_client_version version;
- unsigned long state; /* client state */
- atomic_t ref_cnt; /* Count of all the client devices of this kind */
- u32 flags;
-#define IAVF_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
-#define IAVF_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
- u8 type;
-#define IAVF_CLIENT_RDMA 0
- struct iavf_client_ops *ops; /* client ops provided by the client */
-};
-
-/* used by clients */
-int iavf_register_client(struct iavf_client *client);
-int iavf_unregister_client(struct iavf_client *client);
-#endif /* _IAVF_CLIENT_H_ */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
index 1afd761d8052..8091e6feca01 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -7,38 +7,6 @@
#include <linux/avf/virtchnl.h>
/**
- * iavf_set_mac_type - Sets MAC type
- * @hw: pointer to the HW structure
- *
- * This function sets the mac type of the adapter based on the
- * vendor ID and device ID stored in the hw structure.
- **/
-enum iavf_status iavf_set_mac_type(struct iavf_hw *hw)
-{
- enum iavf_status status = 0;
-
- if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
- switch (hw->device_id) {
- case IAVF_DEV_ID_X722_VF:
- hw->mac.type = IAVF_MAC_X722_VF;
- break;
- case IAVF_DEV_ID_VF:
- case IAVF_DEV_ID_VF_HV:
- case IAVF_DEV_ID_ADAPTIVE_VF:
- hw->mac.type = IAVF_MAC_VF;
- break;
- default:
- hw->mac.type = IAVF_MAC_GENERIC;
- break;
- }
- } else {
- status = IAVF_ERR_DEVICE_NOT_SUPPORTED;
- }
-
- return status;
-}
-
-/**
* iavf_aq_str - convert AQ err code to a string
* @hw: pointer to the HW structure
* @aq_err: the AQ error code to convert
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 90397293525f..6f236d1a6444 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -395,11 +395,9 @@ static void iavf_get_priv_flag_strings(struct net_device *netdev, u8 *data)
{
unsigned int i;
- for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
- snprintf(data, ETH_GSTRING_LEN, "%s",
- iavf_gstrings_priv_flags[i].flag_string);
- data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++)
+ ethtool_sprintf(&data, "%s",
+ iavf_gstrings_priv_flags[i].flag_string);
}
/**
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index b3434dbc90d6..c862ebcd2e39 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -3,7 +3,6 @@
#include "iavf.h"
#include "iavf_prototype.h"
-#include "iavf_client.h"
/* All iavf tracepoints are defined by the include below, which must
* be included exactly once across the whole kernel with
* CREATE_TRACE_POINTS defined
@@ -1187,6 +1186,16 @@ static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
}
/**
+ * iavf_promiscuous_mode_changed - check if promiscuous mode bits changed
+ * @adapter: device specific adapter
+ */
+bool iavf_promiscuous_mode_changed(struct iavf_adapter *adapter)
+{
+ return (adapter->current_netdev_promisc_flags ^ adapter->netdev->flags) &
+ (IFF_PROMISC | IFF_ALLMULTI);
+}
+
+/**
* iavf_set_rx_mode - NDO callback to set the netdev filters
* @netdev: network interface device structure
**/
@@ -1199,19 +1208,10 @@ static void iavf_set_rx_mode(struct net_device *netdev)
__dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
spin_unlock_bh(&adapter->mac_vlan_list_lock);
- if (netdev->flags & IFF_PROMISC &&
- !(adapter->flags & IAVF_FLAG_PROMISC_ON))
- adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
- else if (!(netdev->flags & IFF_PROMISC) &&
- adapter->flags & IAVF_FLAG_PROMISC_ON)
- adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
-
- if (netdev->flags & IFF_ALLMULTI &&
- !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
- adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
- else if (!(netdev->flags & IFF_ALLMULTI) &&
- adapter->flags & IAVF_FLAG_ALLMULTI_ON)
- adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
+ spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
+ if (iavf_promiscuous_mode_changed(adapter))
+ adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
+ spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
}
/**
@@ -1275,7 +1275,7 @@ static void iavf_configure(struct iavf_adapter *adapter)
* iavf_up_complete - Finish the last steps of bringing up a connection
* @adapter: board private structure
*
- * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
+ * Expects to be called while holding crit_lock.
**/
static void iavf_up_complete(struct iavf_adapter *adapter)
{
@@ -1285,8 +1285,6 @@ static void iavf_up_complete(struct iavf_adapter *adapter)
iavf_napi_enable_all(adapter);
adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
- if (CLIENT_ENABLED(adapter))
- adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
}
@@ -1399,7 +1397,7 @@ static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
* iavf_down - Shutdown the connection processing
* @adapter: board private structure
*
- * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
+ * Expects to be called while holding crit_lock.
**/
void iavf_down(struct iavf_adapter *adapter)
{
@@ -1419,8 +1417,10 @@ void iavf_down(struct iavf_adapter *adapter)
iavf_clear_fdir_filters(adapter);
iavf_clear_adv_rss_conf(adapter);
- if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
- !(test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))) {
+ if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
+ return;
+
+ if (!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
/* cancel any current operation */
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
/* Schedule operations to close down the HW. Don't wait
@@ -1952,6 +1952,17 @@ err_alloc_queues:
}
/**
+ * iavf_free_interrupt_scheme - Undo what iavf_init_interrupt_scheme does
+ * @adapter: board private structure
+ **/
+static void iavf_free_interrupt_scheme(struct iavf_adapter *adapter)
+{
+ iavf_free_q_vectors(adapter);
+ iavf_reset_interrupt_capability(adapter);
+ iavf_free_queues(adapter);
+}
+
+/**
* iavf_free_rss - Free memory used by RSS structs
* @adapter: board private structure
**/
@@ -1979,11 +1990,9 @@ static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter, bool runni
if (running)
iavf_free_traffic_irqs(adapter);
iavf_free_misc_irq(adapter);
- iavf_reset_interrupt_capability(adapter);
- iavf_free_q_vectors(adapter);
- iavf_free_queues(adapter);
+ iavf_free_interrupt_scheme(adapter);
- err = iavf_init_interrupt_scheme(adapter);
+ err = iavf_init_interrupt_scheme(adapter);
if (err)
goto err;
@@ -2018,7 +2027,7 @@ static void iavf_finish_config(struct work_struct *work)
mutex_lock(&adapter->crit_lock);
if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) &&
- adapter->netdev_registered &&
+ adapter->netdev->reg_state == NETREG_REGISTERED &&
!test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) {
netdev_update_features(adapter->netdev);
adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES;
@@ -2026,7 +2035,7 @@ static void iavf_finish_config(struct work_struct *work)
switch (adapter->state) {
case __IAVF_DOWN:
- if (!adapter->netdev_registered) {
+ if (adapter->netdev->reg_state != NETREG_REGISTERED) {
err = register_netdevice(adapter->netdev);
if (err) {
dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
@@ -2040,7 +2049,6 @@ static void iavf_finish_config(struct work_struct *work)
__IAVF_INIT_CONFIG_ADAPTER);
goto out;
}
- adapter->netdev_registered = true;
}
/* Set the real number of queues when reset occurs while
@@ -2162,19 +2170,8 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
return 0;
}
- if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
- iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
- FLAG_VF_MULTICAST_PROMISC);
- return 0;
- }
-
- if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
- iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
- return 0;
- }
- if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
- (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
- iavf_set_promiscuous(adapter, 0);
+ if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE) {
+ iavf_set_promiscuous(adapter);
return 0;
}
@@ -2366,11 +2363,6 @@ static void iavf_startup(struct iavf_adapter *adapter)
/* driver loaded, probe complete */
adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
- status = iavf_set_mac_type(hw);
- if (status) {
- dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", status);
- goto err;
- }
ret = iavf_check_reset_complete(hw);
if (ret) {
@@ -2711,12 +2703,6 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
adapter->link_up = false;
netif_tx_stop_all_queues(netdev);
- if (CLIENT_ALLOWED(adapter)) {
- err = iavf_lan_add_device(adapter);
- if (err)
- dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
- err);
- }
dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
if (netdev->features & NETIF_F_GRO)
dev_info(&pdev->dev, "GRO is enabled\n");
@@ -2913,7 +2899,6 @@ static void iavf_watchdog_task(struct work_struct *work)
return;
}
- schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
mutex_unlock(&adapter->crit_lock);
restart_watchdog:
if (adapter->state >= __IAVF_DOWN)
@@ -2982,9 +2967,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
spin_unlock_bh(&adapter->cloud_filter_list_lock);
iavf_free_misc_irq(adapter);
- iavf_reset_interrupt_capability(adapter);
- iavf_free_q_vectors(adapter);
- iavf_free_queues(adapter);
+ iavf_free_interrupt_scheme(adapter);
memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
iavf_shutdown_adminq(&adapter->hw);
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
@@ -3026,16 +3009,6 @@ static void iavf_reset_task(struct work_struct *work)
return;
}
- while (!mutex_trylock(&adapter->client_lock))
- usleep_range(500, 1000);
- if (CLIENT_ENABLED(adapter)) {
- adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
- IAVF_FLAG_CLIENT_NEEDS_CLOSE |
- IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
- IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
- cancel_delayed_work_sync(&adapter->client_task);
- iavf_notify_client_close(&adapter->vsi, true);
- }
iavf_misc_irq_disable(adapter);
if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
@@ -3079,7 +3052,6 @@ static void iavf_reset_task(struct work_struct *work)
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
reg_val);
iavf_disable_vf(adapter);
- mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
return; /* Do not attempt to reinit. It's dead, Jim. */
}
@@ -3218,7 +3190,6 @@ continue_reset:
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
wake_up(&adapter->reset_waitqueue);
- mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
return;
@@ -3229,7 +3200,6 @@ reset_err:
}
iavf_disable_vf(adapter);
- mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
}
@@ -3329,48 +3299,6 @@ out:
}
/**
- * iavf_client_task - worker thread to perform client work
- * @work: pointer to work_struct containing our data
- *
- * This task handles client interactions. Because client calls can be
- * reentrant, we can't handle them in the watchdog.
- **/
-static void iavf_client_task(struct work_struct *work)
-{
- struct iavf_adapter *adapter =
- container_of(work, struct iavf_adapter, client_task.work);
-
- /* If we can't get the client bit, just give up. We'll be rescheduled
- * later.
- */
-
- if (!mutex_trylock(&adapter->client_lock))
- return;
-
- if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
- iavf_client_subtask(adapter);
- adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
- goto out;
- }
- if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
- iavf_notify_client_l2_params(&adapter->vsi);
- adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
- goto out;
- }
- if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
- iavf_notify_client_close(&adapter->vsi, false);
- adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
- goto out;
- }
- if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
- iavf_notify_client_open(&adapter->vsi);
- adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
- }
-out:
- mutex_unlock(&adapter->client_lock);
-}
-
-/**
* iavf_free_all_tx_resources - Free Tx Resources for All Queues
* @adapter: board private structure
*
@@ -4302,8 +4230,6 @@ static int iavf_close(struct net_device *netdev)
}
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
- if (CLIENT_ENABLED(adapter))
- adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
/* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
* IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl
* deadlock with adminq_task() until iavf_close timeouts. We must send
@@ -4372,10 +4298,6 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
- if (CLIENT_ENABLED(adapter)) {
- iavf_notify_client_l2_params(&adapter->vsi);
- adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
- }
if (netif_running(netdev)) {
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
@@ -4410,6 +4332,9 @@ static int iavf_set_features(struct net_device *netdev,
(features & NETIF_VLAN_OFFLOAD_FEATURES))
iavf_set_vlan_offload_features(adapter, netdev->features,
features);
+ if (CRC_OFFLOAD_ALLOWED(adapter) &&
+ ((netdev->features & NETIF_F_RXFCS) ^ (features & NETIF_F_RXFCS)))
+ iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
return 0;
}
@@ -4531,6 +4456,9 @@ iavf_get_netdev_vlan_hw_features(struct iavf_adapter *adapter)
}
}
+ if (CRC_OFFLOAD_ALLOWED(adapter))
+ hw_features |= NETIF_F_RXFCS;
+
return hw_features;
}
@@ -4695,6 +4623,55 @@ iavf_fix_netdev_vlan_features(struct iavf_adapter *adapter,
}
/**
+ * iavf_fix_strip_features - fix NETDEV CRC and VLAN strip features
+ * @adapter: board private structure
+ * @requested_features: stack requested NETDEV features
+ *
+ * Returns fixed-up features bits
+ **/
+static netdev_features_t
+iavf_fix_strip_features(struct iavf_adapter *adapter,
+ netdev_features_t requested_features)
+{
+ struct net_device *netdev = adapter->netdev;
+ bool crc_offload_req, is_vlan_strip;
+ netdev_features_t vlan_strip;
+ int num_non_zero_vlan;
+
+ crc_offload_req = CRC_OFFLOAD_ALLOWED(adapter) &&
+ (requested_features & NETIF_F_RXFCS);
+ num_non_zero_vlan = iavf_get_num_vlans_added(adapter);
+ vlan_strip = (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX);
+ is_vlan_strip = requested_features & vlan_strip;
+
+ if (!crc_offload_req)
+ return requested_features;
+
+ if (!num_non_zero_vlan && (netdev->features & vlan_strip) &&
+ !(netdev->features & NETIF_F_RXFCS) && is_vlan_strip) {
+ requested_features &= ~vlan_strip;
+ netdev_info(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
+ return requested_features;
+ }
+
+ if ((netdev->features & NETIF_F_RXFCS) && is_vlan_strip) {
+ requested_features &= ~vlan_strip;
+ if (!(netdev->features & vlan_strip))
+ netdev_info(netdev, "To enable VLAN stripping, first need to enable FCS/CRC stripping");
+
+ return requested_features;
+ }
+
+ if (num_non_zero_vlan && is_vlan_strip &&
+ !(netdev->features & NETIF_F_RXFCS)) {
+ requested_features &= ~NETIF_F_RXFCS;
+ netdev_info(netdev, "To disable FCS/CRC stripping, first need to disable VLAN stripping");
+ }
+
+ return requested_features;
+}
+
+/**
* iavf_fix_features - fix up the netdev feature bits
* @netdev: our net device
* @features: desired feature bits
@@ -4706,7 +4683,9 @@ static netdev_features_t iavf_fix_features(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- return iavf_fix_netdev_vlan_features(adapter, features);
+ features = iavf_fix_netdev_vlan_features(adapter, features);
+
+ return iavf_fix_strip_features(adapter, features);
}
static const struct net_device_ops iavf_netdev_ops = {
@@ -4743,7 +4722,7 @@ static int iavf_check_reset_complete(struct iavf_hw *hw)
if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
(rstat == VIRTCHNL_VFR_COMPLETED))
return 0;
- usleep_range(10, 20);
+ msleep(IAVF_RESET_WAIT_MS);
}
return -EBUSY;
}
@@ -4962,7 +4941,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* and destroy them only once in remove
*/
mutex_init(&adapter->crit_lock);
- mutex_init(&adapter->client_lock);
mutex_init(&hw->aq.asq_mutex);
mutex_init(&hw->aq.arq_mutex);
@@ -4970,6 +4948,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->cloud_filter_list_lock);
spin_lock_init(&adapter->fdir_fltr_lock);
spin_lock_init(&adapter->adv_rss_lock);
+ spin_lock_init(&adapter->current_netdev_promisc_flags_lock);
INIT_LIST_HEAD(&adapter->mac_filter_list);
INIT_LIST_HEAD(&adapter->vlan_filter_list);
@@ -4981,7 +4960,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
INIT_WORK(&adapter->finish_config, iavf_finish_config);
INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
- INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
/* Setup the wait queue for indicating transition to down status */
init_waitqueue_head(&adapter->down_waitqueue);
@@ -5022,8 +5000,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d)
netif_device_detach(netdev);
- while (!mutex_trylock(&adapter->crit_lock))
- usleep_range(500, 1000);
+ mutex_lock(&adapter->crit_lock);
if (netif_running(netdev)) {
rtnl_lock();
@@ -5094,7 +5071,6 @@ static void iavf_remove(struct pci_dev *pdev)
struct iavf_mac_filter *f, *ftmp;
struct net_device *netdev;
struct iavf_hw *hw;
- int err;
netdev = adapter->netdev;
hw = &adapter->hw;
@@ -5125,19 +5101,8 @@ static void iavf_remove(struct pci_dev *pdev)
cancel_delayed_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->finish_config);
- rtnl_lock();
- if (adapter->netdev_registered) {
- unregister_netdevice(netdev);
- adapter->netdev_registered = false;
- }
- rtnl_unlock();
-
- if (CLIENT_ALLOWED(adapter)) {
- err = iavf_lan_del_device(adapter);
- if (err)
- dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
- err);
- }
+ if (netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(netdev);
mutex_lock(&adapter->crit_lock);
dev_info(&adapter->pdev->dev, "Removing device\n");
@@ -5156,7 +5121,6 @@ static void iavf_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->reset_task);
cancel_delayed_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->adminq_task);
- cancel_delayed_work_sync(&adapter->client_task);
adapter->aq_required = 0;
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
@@ -5164,9 +5128,7 @@ static void iavf_remove(struct pci_dev *pdev)
iavf_free_all_tx_resources(adapter);
iavf_free_all_rx_resources(adapter);
iavf_free_misc_irq(adapter);
-
- iavf_reset_interrupt_capability(adapter);
- iavf_free_q_vectors(adapter);
+ iavf_free_interrupt_scheme(adapter);
iavf_free_rss(adapter);
@@ -5176,13 +5138,11 @@ static void iavf_remove(struct pci_dev *pdev)
/* destroy the locks only once, here */
mutex_destroy(&hw->aq.arq_mutex);
mutex_destroy(&hw->aq.asq_mutex);
- mutex_destroy(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
mutex_destroy(&adapter->crit_lock);
iounmap(hw->hw_addr);
pci_release_regions(pdev);
- iavf_free_queues(adapter);
kfree(adapter->vf_res);
spin_lock_bh(&adapter->mac_vlan_list_lock);
/* If we got removed before an up/down sequence, we've got a filter
diff --git a/drivers/net/ethernet/intel/iavf/iavf_prototype.h b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
index 940cb4203fbe..4a48e6171405 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_prototype.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
@@ -45,8 +45,6 @@ enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid,
enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid,
struct iavf_aqc_get_set_rss_key_data *key);
-enum iavf_status iavf_set_mac_type(struct iavf_hw *hw);
-
extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[];
static inline struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 8c5f6096b002..d64c4997136b 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -7,8 +7,8 @@
#include "iavf_trace.h"
#include "iavf_prototype.h"
-static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
- u32 td_tag)
+static __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
+ u32 td_tag)
{
return cpu_to_le64(IAVF_TX_DESC_DTYPE_DATA |
((u64)td_cmd << IAVF_TXD_QW1_CMD_SHIFT) |
@@ -370,8 +370,8 @@ static void iavf_enable_wb_on_itr(struct iavf_vsi *vsi,
q_vector->arm_wb_state = true;
}
-static inline bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
- struct iavf_ring_container *rc)
+static bool iavf_container_is_rx(struct iavf_q_vector *q_vector,
+ struct iavf_ring_container *rc)
{
return &q_vector->rx == rc;
}
@@ -806,7 +806,7 @@ err:
* @rx_ring: ring to bump
* @val: new head index
**/
-static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
+static void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
{
rx_ring->next_to_use = val;
@@ -828,7 +828,7 @@ static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
*
* Returns the offset value for ring into the data buffer.
*/
-static inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring)
+static unsigned int iavf_rx_offset(struct iavf_ring *rx_ring)
{
return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0;
}
@@ -977,9 +977,9 @@ no_buffers:
* @skb: skb currently being received and modified
* @rx_desc: the receive descriptor
**/
-static inline void iavf_rx_checksum(struct iavf_vsi *vsi,
- struct sk_buff *skb,
- union iavf_rx_desc *rx_desc)
+static void iavf_rx_checksum(struct iavf_vsi *vsi,
+ struct sk_buff *skb,
+ union iavf_rx_desc *rx_desc)
{
struct iavf_rx_ptype_decoded decoded;
u32 rx_error, rx_status;
@@ -1061,7 +1061,7 @@ checksum_fail:
*
* Returns a hash type to be used by skb_set_hash
**/
-static inline int iavf_ptype_to_htype(u8 ptype)
+static int iavf_ptype_to_htype(u8 ptype)
{
struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
@@ -1085,10 +1085,10 @@ static inline int iavf_ptype_to_htype(u8 ptype)
* @skb: skb currently being received and modified
* @rx_ptype: Rx packet type
**/
-static inline void iavf_rx_hash(struct iavf_ring *ring,
- union iavf_rx_desc *rx_desc,
- struct sk_buff *skb,
- u8 rx_ptype)
+static void iavf_rx_hash(struct iavf_ring *ring,
+ union iavf_rx_desc *rx_desc,
+ struct sk_buff *skb,
+ u8 rx_ptype)
{
u32 hash;
const __le64 rss_mask =
@@ -1115,10 +1115,10 @@ static inline void iavf_rx_hash(struct iavf_ring *ring,
* order to populate the hash, checksum, VLAN, protocol, and
* other fields within the skb.
**/
-static inline
-void iavf_process_skb_fields(struct iavf_ring *rx_ring,
- union iavf_rx_desc *rx_desc, struct sk_buff *skb,
- u8 rx_ptype)
+static void
+iavf_process_skb_fields(struct iavf_ring *rx_ring,
+ union iavf_rx_desc *rx_desc, struct sk_buff *skb,
+ u8 rx_ptype)
{
iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
@@ -1662,8 +1662,8 @@ static inline u32 iavf_buildreg_itr(const int type, u16 itr)
* @q_vector: q_vector for which itr is being updated and interrupt enabled
*
**/
-static inline void iavf_update_enable_itr(struct iavf_vsi *vsi,
- struct iavf_q_vector *q_vector)
+static void iavf_update_enable_itr(struct iavf_vsi *vsi,
+ struct iavf_q_vector *q_vector)
{
struct iavf_hw *hw = &vsi->back->hw;
u32 intval;
@@ -2275,9 +2275,9 @@ int __iavf_maybe_stop_tx(struct iavf_ring *tx_ring, int size)
* @td_cmd: the command field in the descriptor
* @td_offset: offset for checksum or crc
**/
-static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
- struct iavf_tx_buffer *first, u32 tx_flags,
- const u8 hdr_len, u32 td_cmd, u32 td_offset)
+static void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
+ struct iavf_tx_buffer *first, u32 tx_flags,
+ const u8 hdr_len, u32 td_cmd, u32 td_offset)
{
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h
index 9f1f523807c4..2b6a207fa441 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_type.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_type.h
@@ -69,15 +69,6 @@ enum iavf_debug_mask {
* the Firmware and AdminQ are intended to insulate the driver from most of the
* future changes, but these structures will also do part of the job.
*/
-enum iavf_mac_type {
- IAVF_MAC_UNKNOWN = 0,
- IAVF_MAC_XL710,
- IAVF_MAC_VF,
- IAVF_MAC_X722,
- IAVF_MAC_X722_VF,
- IAVF_MAC_GENERIC,
-};
-
enum iavf_vsi_type {
IAVF_VSI_MAIN = 0,
IAVF_VSI_VMDQ1 = 1,
@@ -110,11 +101,8 @@ struct iavf_hw_capabilities {
};
struct iavf_mac_info {
- enum iavf_mac_type type;
u8 addr[ETH_ALEN];
u8 perm_addr[ETH_ALEN];
- u8 san_addr[ETH_ALEN];
- u16 max_fcoeq;
};
/* PCI bus types */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index f9727e9c3d63..64c4443dbef9 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -3,7 +3,6 @@
#include "iavf.h"
#include "iavf_prototype.h"
-#include "iavf_client.h"
/**
* iavf_send_pf_msg
@@ -142,6 +141,7 @@ int iavf_send_vf_config_msg(struct iavf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
VIRTCHNL_VF_OFFLOAD_ENCAP |
VIRTCHNL_VF_OFFLOAD_VLAN_V2 |
+ VIRTCHNL_VF_OFFLOAD_CRC |
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
VIRTCHNL_VF_OFFLOAD_ADQ |
@@ -312,6 +312,9 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
vqpi->rxq.databuffer_size =
ALIGN(adapter->rx_rings[i].rx_buf_len,
BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT));
+ if (CRC_OFFLOAD_ALLOWED(adapter))
+ vqpi->rxq.crc_disable = !!(adapter->netdev->features &
+ NETIF_F_RXFCS);
vqpi++;
}
@@ -936,14 +939,14 @@ void iavf_del_vlans(struct iavf_adapter *adapter)
/**
* iavf_set_promiscuous
* @adapter: adapter structure
- * @flags: bitmask to control unicast/multicast promiscuous.
*
* Request that the PF enable promiscuous mode for our VSI.
**/
-void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
+void iavf_set_promiscuous(struct iavf_adapter *adapter)
{
+ struct net_device *netdev = adapter->netdev;
struct virtchnl_promisc_info vpi;
- int promisc_all;
+ unsigned int flags;
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -952,36 +955,57 @@ void iavf_set_promiscuous(struct iavf_adapter *adapter, int flags)
return;
}
- promisc_all = FLAG_VF_UNICAST_PROMISC |
- FLAG_VF_MULTICAST_PROMISC;
- if ((flags & promisc_all) == promisc_all) {
- adapter->flags |= IAVF_FLAG_PROMISC_ON;
- adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_PROMISC;
- dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
- }
+ /* prevent changes to promiscuous flags */
+ spin_lock_bh(&adapter->current_netdev_promisc_flags_lock);
- if (flags & FLAG_VF_MULTICAST_PROMISC) {
- adapter->flags |= IAVF_FLAG_ALLMULTI_ON;
- adapter->aq_required &= ~IAVF_FLAG_AQ_REQUEST_ALLMULTI;
- dev_info(&adapter->pdev->dev, "%s is entering multicast promiscuous mode\n",
- adapter->netdev->name);
+ /* sanity check to prevent duplicate AQ calls */
+ if (!iavf_promiscuous_mode_changed(adapter)) {
+ adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
+ dev_dbg(&adapter->pdev->dev, "No change in promiscuous mode\n");
+ /* allow changes to promiscuous flags */
+ spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
+ return;
}
- if (!flags) {
- if (adapter->flags & IAVF_FLAG_PROMISC_ON) {
- adapter->flags &= ~IAVF_FLAG_PROMISC_ON;
- adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_PROMISC;
- dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
- }
+ /* there are 2 bits, but only 3 states */
+ if (!(netdev->flags & IFF_PROMISC) &&
+ netdev->flags & IFF_ALLMULTI) {
+ /* State 1 - only multicast promiscuous mode enabled
+ * - !IFF_PROMISC && IFF_ALLMULTI
+ */
+ flags = FLAG_VF_MULTICAST_PROMISC;
+ adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
+ adapter->current_netdev_promisc_flags &= ~IFF_PROMISC;
+ dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
+ } else if (!(netdev->flags & IFF_PROMISC) &&
+ !(netdev->flags & IFF_ALLMULTI)) {
+ /* State 2 - unicast/multicast promiscuous mode disabled
+ * - !IFF_PROMISC && !IFF_ALLMULTI
+ */
+ flags = 0;
+ adapter->current_netdev_promisc_flags &=
+ ~(IFF_PROMISC | IFF_ALLMULTI);
+ dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
+ } else {
+ /* State 3 - unicast/multicast promiscuous mode enabled
+ * - IFF_PROMISC && IFF_ALLMULTI
+ * - IFF_PROMISC && !IFF_ALLMULTI
+ */
+ flags = FLAG_VF_UNICAST_PROMISC | FLAG_VF_MULTICAST_PROMISC;
+ adapter->current_netdev_promisc_flags |= IFF_PROMISC;
+ if (netdev->flags & IFF_ALLMULTI)
+ adapter->current_netdev_promisc_flags |= IFF_ALLMULTI;
+ else
+ adapter->current_netdev_promisc_flags &= ~IFF_ALLMULTI;
- if (adapter->flags & IAVF_FLAG_ALLMULTI_ON) {
- adapter->flags &= ~IAVF_FLAG_ALLMULTI_ON;
- adapter->aq_required &= ~IAVF_FLAG_AQ_RELEASE_ALLMULTI;
- dev_info(&adapter->pdev->dev, "%s is leaving multicast promiscuous mode\n",
- adapter->netdev->name);
- }
+ dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
}
+ adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_PROMISC_MODE;
+
+ /* allow changes to promiscuous flags */
+ spin_unlock_bh(&adapter->current_netdev_promisc_flags_lock);
+
adapter->current_op = VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
vpi.vsi_id = adapter->vsi_res->vsi_id;
vpi.flags = flags;
@@ -1353,8 +1377,6 @@ void iavf_disable_vlan_insertion_v2(struct iavf_adapter *adapter, u16 tpid)
VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2);
}
-#define IAVF_MAX_SPEED_STRLEN 13
-
/**
* iavf_print_link_message - print link up or down
* @adapter: adapter structure
@@ -1372,10 +1394,6 @@ static void iavf_print_link_message(struct iavf_adapter *adapter)
return;
}
- speed = kzalloc(IAVF_MAX_SPEED_STRLEN, GFP_KERNEL);
- if (!speed)
- return;
-
if (ADV_LINK_SUPPORT(adapter)) {
link_speed_mbps = adapter->link_speed_mbps;
goto print_link_msg;
@@ -1413,17 +1431,17 @@ static void iavf_print_link_message(struct iavf_adapter *adapter)
print_link_msg:
if (link_speed_mbps > SPEED_1000) {
- if (link_speed_mbps == SPEED_2500)
- snprintf(speed, IAVF_MAX_SPEED_STRLEN, "2.5 Gbps");
- else
+ if (link_speed_mbps == SPEED_2500) {
+ speed = kasprintf(GFP_KERNEL, "%s", "2.5 Gbps");
+ } else {
/* convert to Gbps inline */
- snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s",
- link_speed_mbps / 1000, "Gbps");
+ speed = kasprintf(GFP_KERNEL, "%d Gbps",
+ link_speed_mbps / 1000);
+ }
} else if (link_speed_mbps == SPEED_UNKNOWN) {
- snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%s", "Unknown Mbps");
+ speed = kasprintf(GFP_KERNEL, "%s", "Unknown Mbps");
} else {
- snprintf(speed, IAVF_MAX_SPEED_STRLEN, "%d %s",
- link_speed_mbps, "Mbps");
+ speed = kasprintf(GFP_KERNEL, "%d Mbps", link_speed_mbps);
}
netdev_info(netdev, "NIC Link is Up Speed is %s Full Duplex\n", speed);
@@ -2290,19 +2308,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (v_opcode != adapter->current_op)
return;
break;
- case VIRTCHNL_OP_RDMA:
- /* Gobble zero-length replies from the PF. They indicate that
- * a previous message was received OK, and the client doesn't
- * care about that.
- */
- if (msglen && CLIENT_ENABLED(adapter))
- iavf_notify_client_message(&adapter->vsi, msg, msglen);
- break;
-
- case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
- adapter->client_pending &=
- ~(BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP));
- break;
case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 960277d78e09..0679907980f7 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -43,7 +43,7 @@ ice-$(CONFIG_PCI_IOV) += \
ice_vf_mbx.o \
ice_vf_vsi_vlan_ops.o \
ice_vf_lib.o
-ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o
+ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o ice_dpll.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 5022b036ca4f..351e0d36df44 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -76,6 +76,7 @@
#include "ice_vsi_vlan_ops.h"
#include "ice_gnss.h"
#include "ice_irq.h"
+#include "ice_dpll.h"
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
@@ -195,10 +196,13 @@
#define ice_pf_to_dev(pf) (&((pf)->pdev->dev))
+#define ice_pf_src_tmr_owned(pf) ((pf)->hw.func_caps.ts_func_info.src_tmr_owned)
+
enum ice_feature {
ICE_F_DSCP,
- ICE_F_PTP_EXTTS,
+ ICE_F_PHY_RCLK,
ICE_F_SMA_CTRL,
+ ICE_F_CGU,
ICE_F_GNSS,
ICE_F_ROCE_LAG,
ICE_F_SRIOV_LAG,
@@ -508,6 +512,7 @@ enum ice_pf_flags {
ICE_FLAG_UNPLUG_AUX_DEV,
ICE_FLAG_MTU_CHANGED,
ICE_FLAG_GNSS, /* GNSS successfully initialized */
+ ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -549,6 +554,8 @@ struct ice_pf {
* MSIX vectors allowed on this PF.
*/
u16 sriov_base_vector;
+ unsigned long *sriov_irq_bm; /* bitmap to track irq usage */
+ u16 sriov_irq_size; /* size of the irq_bm bitmap */
u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */
@@ -640,6 +647,7 @@ struct ice_pf {
#define ICE_VF_AGG_NODE_ID_START 65
#define ICE_MAX_VF_AGG_NODES 32
struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
+ struct ice_dplls dplls;
};
extern struct workqueue_struct *ice_lag_wq;
@@ -668,6 +676,18 @@ static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv)
}
/**
+ * ice_ptp_pf_handles_tx_interrupt - Check if PF handles Tx interrupt
+ * @pf: Board private structure
+ *
+ * Return true if this PF should respond to the Tx timestamp interrupt
+ * indication in the miscellaneous OICR interrupt handler.
+ */
+static inline bool ice_ptp_pf_handles_tx_interrupt(struct ice_pf *pf)
+{
+ return pf->ptp.tx_interrupt_mode != ICE_PTP_TX_INTERRUPT_NONE;
+}
+
+/**
* ice_irq_dynamic_ena - Enable default interrupt generation settings
* @hw: pointer to HW struct
* @vsi: pointer to VSI struct, can be NULL
@@ -942,6 +962,7 @@ int ice_stop(struct net_device *netdev);
void ice_service_task_schedule(struct ice_pf *pf);
int ice_load(struct ice_pf *pf);
void ice_unload(struct ice_pf *pf);
+void ice_adv_lnk_speed_maps_init(void);
/**
* ice_set_rdma_cap - enable RDMA support
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 29f7a9852aec..d7fdb7ba7268 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1099,7 +1099,15 @@ struct ice_aqc_get_phy_caps {
#define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2)
#define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3)
#define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4)
-#define ICE_PHY_TYPE_HIGH_MAX_INDEX 4
+#define ICE_PHY_TYPE_HIGH_200G_CR4_PAM4 BIT_ULL(5)
+#define ICE_PHY_TYPE_HIGH_200G_SR4 BIT_ULL(6)
+#define ICE_PHY_TYPE_HIGH_200G_FR4 BIT_ULL(7)
+#define ICE_PHY_TYPE_HIGH_200G_LR4 BIT_ULL(8)
+#define ICE_PHY_TYPE_HIGH_200G_DR4 BIT_ULL(9)
+#define ICE_PHY_TYPE_HIGH_200G_KR4_PAM4 BIT_ULL(10)
+#define ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC BIT_ULL(11)
+#define ICE_PHY_TYPE_HIGH_200G_AUI4 BIT_ULL(12)
+#define ICE_PHY_TYPE_HIGH_MAX_INDEX 12
struct ice_aqc_get_phy_caps_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
@@ -1319,11 +1327,41 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_SPEED_40GB BIT(8)
#define ICE_AQ_LINK_SPEED_50GB BIT(9)
#define ICE_AQ_LINK_SPEED_100GB BIT(10)
+#define ICE_AQ_LINK_SPEED_200GB BIT(11)
#define ICE_AQ_LINK_SPEED_UNKNOWN BIT(15)
- __le32 reserved3; /* Aligns next field to 8-byte boundary */
- __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
- __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
-};
+ /* Aligns next field to 8-byte boundary */
+ __le16 reserved3;
+ u8 ext_fec_status;
+ /* RS 272 FEC enabled */
+#define ICE_AQ_LINK_RS_272_FEC_EN BIT(0)
+ u8 reserved4;
+ /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 phy_type_low;
+ /* Use values from ICE_PHY_TYPE_HIGH_* */
+ __le64 phy_type_high;
+#define ICE_AQC_LS_DATA_SIZE_V1 \
+ offsetofend(struct ice_aqc_get_link_status_data, phy_type_high)
+ /* Get link status v2 link partner data */
+ __le64 lp_phy_type_low;
+ __le64 lp_phy_type_high;
+ u8 lp_fec_adv;
+#define ICE_AQ_LINK_LP_10G_KR_FEC_CAP BIT(0)
+#define ICE_AQ_LINK_LP_25G_KR_FEC_CAP BIT(1)
+#define ICE_AQ_LINK_LP_RS_528_FEC_CAP BIT(2)
+#define ICE_AQ_LINK_LP_50G_KR_272_FEC_CAP BIT(3)
+#define ICE_AQ_LINK_LP_100G_KR_272_FEC_CAP BIT(4)
+#define ICE_AQ_LINK_LP_200G_KR_272_FEC_CAP BIT(5)
+ u8 lp_fec_req;
+#define ICE_AQ_LINK_LP_10G_KR_FEC_REQ BIT(0)
+#define ICE_AQ_LINK_LP_25G_KR_FEC_REQ BIT(1)
+#define ICE_AQ_LINK_LP_RS_528_FEC_REQ BIT(2)
+#define ICE_AQ_LINK_LP_KR_272_FEC_REQ BIT(3)
+ u8 lp_flowcontrol;
+#define ICE_AQ_LINK_LP_PAUSE_ADV BIT(0)
+#define ICE_AQ_LINK_LP_ASM_DIR_ADV BIT(1)
+#define ICE_AQC_LS_DATA_SIZE_V2 \
+ offsetofend(struct ice_aqc_get_link_status_data, lp_flowcontrol)
+} __packed;
/* Set event mask command (direct 0x0613) */
struct ice_aqc_set_event_mask {
@@ -1351,6 +1389,30 @@ struct ice_aqc_set_mac_lb {
u8 reserved[15];
};
+/* Set PHY recovered clock output (direct 0x0630) */
+struct ice_aqc_set_phy_rec_clk_out {
+ u8 phy_output;
+ u8 port_num;
+#define ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT 0xFF
+ u8 flags;
+#define ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN BIT(0)
+ u8 rsvd;
+ __le32 freq;
+ u8 rsvd2[6];
+ __le16 node_handle;
+};
+
+/* Get PHY recovered clock output (direct 0x0631) */
+struct ice_aqc_get_phy_rec_clk_out {
+ u8 phy_output;
+ u8 port_num;
+#define ICE_AQC_GET_PHY_REC_CLK_OUT_CURR_PORT 0xFF
+ u8 flags;
+#define ICE_AQC_GET_PHY_REC_CLK_OUT_OUT_EN BIT(0)
+ u8 rsvd[11];
+ __le16 node_handle;
+};
+
struct ice_aqc_link_topo_params {
u8 lport_num;
u8 lport_num_valid;
@@ -1367,6 +1429,9 @@ struct ice_aqc_link_topo_params {
#define ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE 6
#define ICE_AQC_LINK_TOPO_NODE_TYPE_MEZZ 7
#define ICE_AQC_LINK_TOPO_NODE_TYPE_ID_EEPROM 8
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL 9
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX 10
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_GPS 11
#define ICE_AQC_LINK_TOPO_NODE_CTX_S 4
#define ICE_AQC_LINK_TOPO_NODE_CTX_M \
(0xF << ICE_AQC_LINK_TOPO_NODE_CTX_S)
@@ -1403,8 +1468,13 @@ struct ice_aqc_link_topo_addr {
struct ice_aqc_get_link_topo {
struct ice_aqc_link_topo_addr addr;
u8 node_part_num;
-#define ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
-#define ICE_AQC_GET_LINK_TOPO_NODE_NR_C827 0x31
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032 0x24
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384 0x25
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY 0x30
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_C827 0x31
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX 0x47
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS 0x48
u8 rsvd[9];
};
@@ -2125,6 +2195,193 @@ struct ice_aqc_get_pkg_info_resp {
struct ice_aqc_get_pkg_info pkg_info[];
};
+/* Get CGU abilities command response data structure (indirect 0x0C61) */
+struct ice_aqc_get_cgu_abilities {
+ u8 num_inputs;
+ u8 num_outputs;
+ u8 pps_dpll_idx;
+ u8 eec_dpll_idx;
+ __le32 max_in_freq;
+ __le32 max_in_phase_adj;
+ __le32 max_out_freq;
+ __le32 max_out_phase_adj;
+ u8 cgu_part_num;
+ u8 rsvd[3];
+};
+
+/* Set CGU input config (direct 0x0C62) */
+struct ice_aqc_set_cgu_input_config {
+ u8 input_idx;
+ u8 flags1;
+#define ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_FREQ BIT(6)
+#define ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_DELAY BIT(7)
+ u8 flags2;
+#define ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN BIT(5)
+#define ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN BIT(6)
+ u8 rsvd;
+ __le32 freq;
+ __le32 phase_delay;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+/* Get CGU input config response descriptor structure (direct 0x0C63) */
+struct ice_aqc_get_cgu_input_config {
+ u8 input_idx;
+ u8 status;
+#define ICE_AQC_GET_CGU_IN_CFG_STATUS_LOS BIT(0)
+#define ICE_AQC_GET_CGU_IN_CFG_STATUS_SCM_FAIL BIT(1)
+#define ICE_AQC_GET_CGU_IN_CFG_STATUS_CFM_FAIL BIT(2)
+#define ICE_AQC_GET_CGU_IN_CFG_STATUS_GST_FAIL BIT(3)
+#define ICE_AQC_GET_CGU_IN_CFG_STATUS_PFM_FAIL BIT(4)
+#define ICE_AQC_GET_CGU_IN_CFG_STATUS_ESYNC_FAIL BIT(6)
+#define ICE_AQC_GET_CGU_IN_CFG_STATUS_ESYNC_CAP BIT(7)
+ u8 type;
+#define ICE_AQC_GET_CGU_IN_CFG_TYPE_READ_ONLY BIT(0)
+#define ICE_AQC_GET_CGU_IN_CFG_TYPE_GPS BIT(4)
+#define ICE_AQC_GET_CGU_IN_CFG_TYPE_EXTERNAL BIT(5)
+#define ICE_AQC_GET_CGU_IN_CFG_TYPE_PHY BIT(6)
+ u8 flags1;
+#define ICE_AQC_GET_CGU_IN_CFG_FLG1_PHASE_DELAY_SUPP BIT(0)
+#define ICE_AQC_GET_CGU_IN_CFG_FLG1_1PPS_SUPP BIT(2)
+#define ICE_AQC_GET_CGU_IN_CFG_FLG1_10MHZ_SUPP BIT(3)
+#define ICE_AQC_GET_CGU_IN_CFG_FLG1_ANYFREQ BIT(7)
+ __le32 freq;
+ __le32 phase_delay;
+ u8 flags2;
+#define ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN BIT(5)
+#define ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN BIT(6)
+ u8 rsvd[1];
+ __le16 node_handle;
+};
+
+/* Set CGU output config (direct 0x0C64) */
+struct ice_aqc_set_cgu_output_config {
+ u8 output_idx;
+ u8 flags;
+#define ICE_AQC_SET_CGU_OUT_CFG_OUT_EN BIT(0)
+#define ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN BIT(1)
+#define ICE_AQC_SET_CGU_OUT_CFG_UPDATE_FREQ BIT(2)
+#define ICE_AQC_SET_CGU_OUT_CFG_UPDATE_PHASE BIT(3)
+#define ICE_AQC_SET_CGU_OUT_CFG_UPDATE_SRC_SEL BIT(4)
+ u8 src_sel;
+#define ICE_AQC_SET_CGU_OUT_CFG_DPLL_SRC_SEL ICE_M(0x1F, 0)
+ u8 rsvd;
+ __le32 freq;
+ __le32 phase_delay;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+/* Get CGU output config (direct 0x0C65) */
+struct ice_aqc_get_cgu_output_config {
+ u8 output_idx;
+ u8 flags;
+#define ICE_AQC_GET_CGU_OUT_CFG_OUT_EN BIT(0)
+#define ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN BIT(1)
+#define ICE_AQC_GET_CGU_OUT_CFG_ESYNC_ABILITY BIT(2)
+ u8 src_sel;
+#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT 0
+#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL \
+ ICE_M(0x1F, ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT)
+#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT 5
+#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_MODE \
+ ICE_M(0x7, ICE_AQC_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT)
+ u8 rsvd;
+ __le32 freq;
+ __le32 src_freq;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+/* Get CGU DPLL status (direct 0x0C66) */
+struct ice_aqc_get_cgu_dpll_status {
+ u8 dpll_num;
+ u8 ref_state;
+#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_LOS BIT(0)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_SCM BIT(1)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_CFM BIT(2)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_GST BIT(3)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_PFM BIT(4)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_FAST_LOCK_EN BIT(5)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_ESYNC BIT(6)
+ u8 dpll_state;
+#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_LOCK BIT(0)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_HO BIT(1)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_HO_READY BIT(2)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_FLHIT BIT(5)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_PSLHIT BIT(7)
+ u8 config;
+#define ICE_AQC_GET_CGU_DPLL_CONFIG_CLK_REF_SEL ICE_M(0x1F, 0)
+#define ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_SHIFT 5
+#define ICE_AQC_GET_CGU_DPLL_CONFIG_MODE \
+ ICE_M(0x7, ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_SHIFT)
+#define ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_FREERUN 0
+#define ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_AUTOMATIC \
+ ICE_M(0x3, ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_SHIFT)
+ __le32 phase_offset_h;
+ __le32 phase_offset_l;
+ u8 eec_mode;
+#define ICE_AQC_GET_CGU_DPLL_STATUS_EEC_MODE_1 0xA
+#define ICE_AQC_GET_CGU_DPLL_STATUS_EEC_MODE_2 0xB
+#define ICE_AQC_GET_CGU_DPLL_STATUS_EEC_MODE_UNKNOWN 0xF
+ u8 rsvd[1];
+ __le16 node_handle;
+};
+
+/* Set CGU DPLL config (direct 0x0C67) */
+struct ice_aqc_set_cgu_dpll_config {
+ u8 dpll_num;
+ u8 ref_state;
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_LOS BIT(0)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_SCM BIT(1)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_CFM BIT(2)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_GST BIT(3)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_PFM BIT(4)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_FLOCK_EN BIT(5)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_ESYNC BIT(6)
+ u8 rsvd;
+ u8 config;
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_CLK_REF_SEL ICE_M(0x1F, 0)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_SHIFT 5
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_MODE \
+ ICE_M(0x7, ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_SHIFT)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_FREERUN 0
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_AUTOMATIC \
+ ICE_M(0x3, ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_SHIFT)
+ u8 rsvd2[8];
+ u8 eec_mode;
+ u8 rsvd3[1];
+ __le16 node_handle;
+};
+
+/* Set CGU reference priority (direct 0x0C68) */
+struct ice_aqc_set_cgu_ref_prio {
+ u8 dpll_num;
+ u8 ref_idx;
+ u8 ref_priority;
+ u8 rsvd[11];
+ __le16 node_handle;
+};
+
+/* Get CGU reference priority (direct 0x0C69) */
+struct ice_aqc_get_cgu_ref_prio {
+ u8 dpll_num;
+ u8 ref_idx;
+ u8 ref_priority; /* Valid only in response */
+ u8 rsvd[13];
+};
+
+/* Get CGU info (direct 0x0C6A) */
+struct ice_aqc_get_cgu_info {
+ __le32 cgu_id;
+ __le32 cgu_cfg_ver;
+ __le32 cgu_fw_ver;
+ u8 node_part_num;
+ u8 dev_rev;
+ __le16 node_handle;
+};
+
/* Driver Shared Parameters (direct, 0x0C90) */
struct ice_aqc_driver_shared_params {
u8 set_or_get_op;
@@ -2139,16 +2396,6 @@ struct ice_aqc_driver_shared_params {
__le32 addr_low;
};
-enum ice_aqc_driver_params {
- /* OS clock index for PTP timer Domain 0 */
- ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0 = 0,
- /* OS clock index for PTP timer Domain 1 */
- ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1,
-
- /* Add new parameters above */
- ICE_AQC_DRIVER_PARAM_MAX = 16,
-};
-
/* Lan Queue Overflow Event (direct, 0x1001) */
struct ice_aqc_event_lan_overflow {
__le32 prtdcb_ruptq;
@@ -2194,6 +2441,8 @@ struct ice_aq_desc {
struct ice_aqc_get_phy_caps get_phy;
struct ice_aqc_set_phy_cfg set_phy;
struct ice_aqc_restart_an restart_an;
+ struct ice_aqc_set_phy_rec_clk_out set_phy_rec_clk_out;
+ struct ice_aqc_get_phy_rec_clk_out get_phy_rec_clk_out;
struct ice_aqc_gpio read_write_gpio;
struct ice_aqc_sff_eeprom read_write_sff_param;
struct ice_aqc_set_port_id_led set_port_id_led;
@@ -2234,6 +2483,15 @@ struct ice_aq_desc {
struct ice_aqc_fw_logging fw_logging;
struct ice_aqc_get_clear_fw_log get_clear_fw_log;
struct ice_aqc_download_pkg download_pkg;
+ struct ice_aqc_set_cgu_input_config set_cgu_input_config;
+ struct ice_aqc_get_cgu_input_config get_cgu_input_config;
+ struct ice_aqc_set_cgu_output_config set_cgu_output_config;
+ struct ice_aqc_get_cgu_output_config get_cgu_output_config;
+ struct ice_aqc_get_cgu_dpll_status get_cgu_dpll_status;
+ struct ice_aqc_set_cgu_dpll_config set_cgu_dpll_config;
+ struct ice_aqc_set_cgu_ref_prio set_cgu_ref_prio;
+ struct ice_aqc_get_cgu_ref_prio get_cgu_ref_prio;
+ struct ice_aqc_get_cgu_info get_cgu_info;
struct ice_aqc_driver_shared_params drv_shared_params;
struct ice_aqc_set_mac_lb set_mac_lb;
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
@@ -2358,6 +2616,8 @@ enum ice_adminq_opc {
ice_aqc_opc_get_link_status = 0x0607,
ice_aqc_opc_set_event_mask = 0x0613,
ice_aqc_opc_set_mac_lb = 0x0620,
+ ice_aqc_opc_set_phy_rec_clk_out = 0x0630,
+ ice_aqc_opc_get_phy_rec_clk_out = 0x0631,
ice_aqc_opc_get_link_topo = 0x06E0,
ice_aqc_opc_read_i2c = 0x06E2,
ice_aqc_opc_write_i2c = 0x06E3,
@@ -2413,6 +2673,18 @@ enum ice_adminq_opc {
ice_aqc_opc_update_pkg = 0x0C42,
ice_aqc_opc_get_pkg_info_list = 0x0C43,
+ /* 1588/SyncE commands/events */
+ ice_aqc_opc_get_cgu_abilities = 0x0C61,
+ ice_aqc_opc_set_cgu_input_config = 0x0C62,
+ ice_aqc_opc_get_cgu_input_config = 0x0C63,
+ ice_aqc_opc_set_cgu_output_config = 0x0C64,
+ ice_aqc_opc_get_cgu_output_config = 0x0C65,
+ ice_aqc_opc_get_cgu_dpll_status = 0x0C66,
+ ice_aqc_opc_set_cgu_dpll_config = 0x0C67,
+ ice_aqc_opc_set_cgu_ref_prio = 0x0C68,
+ ice_aqc_opc_get_cgu_ref_prio = 0x0C69,
+ ice_aqc_opc_get_cgu_info = 0x0C6A,
+
ice_aqc_opc_driver_shared_params = 0x0C90,
/* Standalone Commands/Events */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 80deca45ab59..9a6c25f98632 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2018, Intel Corporation. */
+/* Copyright (c) 2018-2023, Intel Corporation. */
#include "ice_common.h"
#include "ice_sched.h"
@@ -8,6 +8,7 @@
#include "ice_ptp_hw.h"
#define ICE_PF_RESET_WAIT_COUNT 300
+#define ICE_MAX_NETLIST_SIZE 10
static const char * const ice_link_mode_str_low[] = {
[0] = "100BASE_TX",
@@ -153,6 +154,12 @@ static int ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E823L_SFP:
hw->mac_type = ICE_MAC_GENERIC;
break;
+ case ICE_DEV_ID_E830_BACKPLANE:
+ case ICE_DEV_ID_E830_QSFP56:
+ case ICE_DEV_ID_E830_SFP:
+ case ICE_DEV_ID_E830_SFP_DD:
+ hw->mac_type = ICE_MAC_E830;
+ break;
default:
hw->mac_type = ICE_MAC_UNKNOWN;
break;
@@ -436,6 +443,80 @@ ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
}
/**
+ * ice_aq_get_netlist_node
+ * @hw: pointer to the hw struct
+ * @cmd: get_link_topo AQ structure
+ * @node_part_number: output node part number if node found
+ * @node_handle: output node handle parameter if node found
+ *
+ * Get netlist node handle.
+ */
+int
+ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+ desc.params.get_link_topo = *cmd;
+
+ if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
+ return -EINTR;
+
+ if (node_handle)
+ *node_handle =
+ le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ if (node_part_number)
+ *node_part_number = desc.params.get_link_topo.node_part_num;
+
+ return 0;
+}
+
+/**
+ * ice_find_netlist_node
+ * @hw: pointer to the hw struct
+ * @node_type_ctx: type of netlist node to look for
+ * @node_part_number: node part number to look for
+ * @node_handle: output parameter if node found - optional
+ *
+ * Scan the netlist for a node handle of the given node type and part number.
+ *
+ * If node_handle is non-NULL it will be modified on function exit. It is only
+ * valid if the function returns zero, and should be ignored on any non-zero
+ * return value.
+ *
+ * Returns: 0 if the node is found, -ENOENT if no handle was found, and
+ * a negative error code on failure to access the AQ.
+ */
+static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx,
+ u8 node_part_number, u16 *node_handle)
+{
+ u8 idx;
+
+ for (idx = 0; idx < ICE_MAX_NETLIST_SIZE; idx++) {
+ struct ice_aqc_get_link_topo cmd = {};
+ u8 rec_node_part_number;
+ int status;
+
+ cmd.addr.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M,
+ node_type_ctx);
+ cmd.addr.topo_params.index = idx;
+
+ status = ice_aq_get_netlist_node(hw, &cmd,
+ &rec_node_part_number,
+ node_handle);
+ if (status)
+ return status;
+
+ if (rec_node_part_number == node_part_number)
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+/**
* ice_is_media_cage_present
* @pi: port information structure
*
@@ -571,6 +652,24 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
}
/**
+ * ice_get_link_status_datalen
+ * @hw: pointer to the HW struct
+ *
+ * Returns datalength for the Get Link Status AQ command, which is bigger for
+ * newer adapter families handled by ice driver.
+ */
+static u16 ice_get_link_status_datalen(struct ice_hw *hw)
+{
+ switch (hw->mac_type) {
+ case ICE_MAC_E830:
+ return ICE_AQC_LS_DATA_SIZE_V2;
+ case ICE_MAC_E810:
+ default:
+ return ICE_AQC_LS_DATA_SIZE_V1;
+ }
+}
+
+/**
* ice_aq_get_link_info
* @pi: port information structure
* @ena_lse: enable/disable LinkStatusEvent reporting
@@ -608,8 +707,8 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
resp->cmd_flags = cpu_to_le16(cmd_flags);
resp->lport_num = pi->lport;
- status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
-
+ status = ice_aq_send_cmd(hw, &desc, &link_data,
+ ice_get_link_status_datalen(hw), cd);
if (status)
return status;
@@ -684,8 +783,7 @@ static void
ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
struct ice_aqc_set_mac_cfg *cmd)
{
- u16 fc_thres_val, tx_timer_val;
- u32 val;
+ u32 val, fc_thres_m;
/* We read back the transmit timer and FC threshold value of
* LFC. Thus, we will use index =
@@ -694,19 +792,32 @@ ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
* Also, because we are operating on transmit timer and FC
* threshold of LFC, we don't turn on any bit in tx_tmr_priority
*/
-#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
-
- /* Retrieve the transmit timer */
- val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
- tx_timer_val = val &
- PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
- cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
-
- /* Retrieve the FC threshold */
- val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
- fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
-
- cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
+#define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX
+#define E800_REFRESH_TMR E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR
+
+ if (hw->mac_type == ICE_MAC_E830) {
+ /* Retrieve the transmit timer */
+ val = rd32(hw, E830_PRTMAC_CL01_PS_QNT);
+ cmd->tx_tmr_value =
+ le16_encode_bits(val, E830_PRTMAC_CL01_PS_QNT_CL0_M);
+
+ /* Retrieve the fc threshold */
+ val = rd32(hw, E830_PRTMAC_CL01_QNT_THR);
+ fc_thres_m = E830_PRTMAC_CL01_QNT_THR_CL0_M;
+ } else {
+ /* Retrieve the transmit timer */
+ val = rd32(hw,
+ E800_PRTMAC_HSEC_CTL_TX_PS_QNT(E800_IDX_OF_LFC));
+ cmd->tx_tmr_value =
+ le16_encode_bits(val,
+ E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M);
+
+ /* Retrieve the fc threshold */
+ val = rd32(hw,
+ E800_REFRESH_TMR(E800_IDX_OF_LFC));
+ fc_thres_m = E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M;
+ }
+ cmd->fc_refresh_threshold = le16_encode_bits(val, fc_thres_m);
}
/**
@@ -2389,16 +2500,21 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
static void
ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
{
- u32 reg_val, val;
+ u32 reg_val, gsize, bsize;
reg_val = rd32(hw, GLQF_FD_SIZE);
- val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
- GLQF_FD_SIZE_FD_GSIZE_S;
- func_p->fd_fltr_guar =
- ice_get_num_per_func(hw, val);
- val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
- GLQF_FD_SIZE_FD_BSIZE_S;
- func_p->fd_fltr_best_effort = val;
+ switch (hw->mac_type) {
+ case ICE_MAC_E830:
+ gsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_GSIZE_M, reg_val);
+ bsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_BSIZE_M, reg_val);
+ break;
+ case ICE_MAC_E810:
+ default:
+ gsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_GSIZE_M, reg_val);
+ bsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_BSIZE_M, reg_val);
+ }
+ func_p->fd_fltr_guar = ice_get_num_per_func(hw, gsize);
+ func_p->fd_fltr_best_effort = bsize;
ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
func_p->fd_fltr_guar);
@@ -2655,33 +2771,6 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
- * ice_aq_get_netlist_node
- * @hw: pointer to the hw struct
- * @cmd: get_link_topo AQ structure
- * @node_part_number: output node part number if node found
- * @node_handle: output node handle parameter if node found
- */
-static int
-ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
- u8 *node_part_number, u16 *node_handle)
-{
- struct ice_aq_desc desc;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
- desc.params.get_link_topo = *cmd;
-
- if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
- return -EIO;
-
- if (node_handle)
- *node_handle = le16_to_cpu(desc.params.get_link_topo.addr.handle);
- if (node_part_number)
- *node_part_number = desc.params.get_link_topo.node_part_num;
-
- return 0;
-}
-
-/**
* ice_is_pf_c827 - check if pf contains c827 phy
* @hw: pointer to the hw struct
*/
@@ -2716,6 +2805,82 @@ bool ice_is_pf_c827(struct ice_hw *hw)
}
/**
+ * ice_is_phy_rclk_in_netlist
+ * @hw: pointer to the hw struct
+ *
+ * Check if the PHY Recovered Clock device is present in the netlist
+ */
+bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw)
+{
+ if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) &&
+ ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_is_clock_mux_in_netlist
+ * @hw: pointer to the hw struct
+ *
+ * Check if the Clock Multiplexer device is present in the netlist
+ */
+bool ice_is_clock_mux_in_netlist(struct ice_hw *hw)
+{
+ if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX,
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX,
+ NULL))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_is_cgu_in_netlist - check for CGU presence
+ * @hw: pointer to the hw struct
+ *
+ * Check if the Clock Generation Unit (CGU) device is present in the netlist.
+ * Save the CGU part number in the hw structure for later use.
+ * Return:
+ * * true - cgu is present
+ * * false - cgu is not present
+ */
+bool ice_is_cgu_in_netlist(struct ice_hw *hw)
+{
+ if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032,
+ NULL)) {
+ hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032;
+ return true;
+ } else if (!ice_find_netlist_node(hw,
+ ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384,
+ NULL)) {
+ hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_is_gps_in_netlist
+ * @hw: pointer to the hw struct
+ *
+ * Check if the GPS generic device is present in the netlist
+ */
+bool ice_is_gps_in_netlist(struct ice_hw *hw)
+{
+ if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS,
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL))
+ return false;
+
+ return true;
+}
+
+/**
* ice_aq_list_caps - query function/device capabilities
* @hw: pointer to the HW struct
* @buf: a buffer to hold the capabilities
@@ -4726,11 +4891,11 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
- struct ice_aqc_dis_txq_item *qg_list;
+ DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
+ u16 i, buf_size = __struct_size(qg_list);
struct ice_q_ctx *q_ctx;
int status = -ENOENT;
struct ice_hw *hw;
- u16 i, buf_size;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return -EIO;
@@ -4748,11 +4913,6 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
return -EIO;
}
- buf_size = struct_size(qg_list, q_id, 1);
- qg_list = kzalloc(buf_size, GFP_KERNEL);
- if (!qg_list)
- return -ENOMEM;
-
mutex_lock(&pi->sched_lock);
for (i = 0; i < num_queues; i++) {
@@ -4785,7 +4945,6 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
q_ctx->q_teid = ICE_INVAL_TEID;
}
mutex_unlock(&pi->sched_lock);
- kfree(qg_list);
return status;
}
@@ -4954,10 +5113,10 @@ int
ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id)
{
- struct ice_aqc_dis_txq_item *qg_list;
+ DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
+ u16 qg_size = __struct_size(qg_list);
struct ice_hw *hw;
int status = 0;
- u16 qg_size;
int i;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
@@ -4965,11 +5124,6 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
hw = pi->hw;
- qg_size = struct_size(qg_list, q_id, 1);
- qg_list = kzalloc(qg_size, GFP_KERNEL);
- if (!qg_list)
- return -ENOMEM;
-
mutex_lock(&pi->sched_lock);
for (i = 0; i < count; i++) {
@@ -4994,7 +5148,395 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
}
mutex_unlock(&pi->sched_lock);
- kfree(qg_list);
+ return status;
+}
+
+/**
+ * ice_aq_get_cgu_abilities - get cgu abilities
+ * @hw: pointer to the HW struct
+ * @abilities: CGU abilities
+ *
+ * Get CGU abilities (0x0C61)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_get_cgu_abilities(struct ice_hw *hw,
+ struct ice_aqc_get_cgu_abilities *abilities)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities);
+ return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL);
+}
+
+/**
+ * ice_aq_set_input_pin_cfg - set input pin config
+ * @hw: pointer to the HW struct
+ * @input_idx: Input index
+ * @flags1: Input flags
+ * @flags2: Input flags
+ * @freq: Frequency in Hz
+ * @phase_delay: Delay in ps
+ *
+ * Set CGU input config (0x0C62)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2,
+ u32 freq, s32 phase_delay)
+{
+ struct ice_aqc_set_cgu_input_config *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config);
+ cmd = &desc.params.set_cgu_input_config;
+ cmd->input_idx = input_idx;
+ cmd->flags1 = flags1;
+ cmd->flags2 = flags2;
+ cmd->freq = cpu_to_le32(freq);
+ cmd->phase_delay = cpu_to_le32(phase_delay);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_aq_get_input_pin_cfg - get input pin config
+ * @hw: pointer to the HW struct
+ * @input_idx: Input index
+ * @status: Pin status
+ * @type: Pin type
+ * @flags1: Input flags
+ * @flags2: Input flags
+ * @freq: Frequency in Hz
+ * @phase_delay: Delay in ps
+ *
+ * Get CGU input config (0x0C63)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type,
+ u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay)
+{
+ struct ice_aqc_get_cgu_input_config *cmd;
+ struct ice_aq_desc desc;
+ int ret;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config);
+ cmd = &desc.params.get_cgu_input_config;
+ cmd->input_idx = input_idx;
+
+ ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!ret) {
+ if (status)
+ *status = cmd->status;
+ if (type)
+ *type = cmd->type;
+ if (flags1)
+ *flags1 = cmd->flags1;
+ if (flags2)
+ *flags2 = cmd->flags2;
+ if (freq)
+ *freq = le32_to_cpu(cmd->freq);
+ if (phase_delay)
+ *phase_delay = le32_to_cpu(cmd->phase_delay);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_aq_set_output_pin_cfg - set output pin config
+ * @hw: pointer to the HW struct
+ * @output_idx: Output index
+ * @flags: Output flags
+ * @src_sel: Index of DPLL block
+ * @freq: Output frequency
+ * @phase_delay: Output phase compensation
+ *
+ * Set CGU output config (0x0C64)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags,
+ u8 src_sel, u32 freq, s32 phase_delay)
+{
+ struct ice_aqc_set_cgu_output_config *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config);
+ cmd = &desc.params.set_cgu_output_config;
+ cmd->output_idx = output_idx;
+ cmd->flags = flags;
+ cmd->src_sel = src_sel;
+ cmd->freq = cpu_to_le32(freq);
+ cmd->phase_delay = cpu_to_le32(phase_delay);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_aq_get_output_pin_cfg - get output pin config
+ * @hw: pointer to the HW struct
+ * @output_idx: Output index
+ * @flags: Output flags
+ * @src_sel: Internal DPLL source
+ * @freq: Output frequency
+ * @src_freq: Source frequency
+ *
+ * Get CGU output config (0x0C65)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags,
+ u8 *src_sel, u32 *freq, u32 *src_freq)
+{
+ struct ice_aqc_get_cgu_output_config *cmd;
+ struct ice_aq_desc desc;
+ int ret;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config);
+ cmd = &desc.params.get_cgu_output_config;
+ cmd->output_idx = output_idx;
+
+ ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!ret) {
+ if (flags)
+ *flags = cmd->flags;
+ if (src_sel)
+ *src_sel = cmd->src_sel;
+ if (freq)
+ *freq = le32_to_cpu(cmd->freq);
+ if (src_freq)
+ *src_freq = le32_to_cpu(cmd->src_freq);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_aq_get_cgu_dpll_status - get dpll status
+ * @hw: pointer to the HW struct
+ * @dpll_num: DPLL index
+ * @ref_state: Reference clock state
+ * @config: current DPLL config
+ * @dpll_state: current DPLL state
+ * @phase_offset: Phase offset in ns
+ * @eec_mode: EEC_mode
+ *
+ * Get CGU DPLL status (0x0C66)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state,
+ u8 *dpll_state, u8 *config, s64 *phase_offset,
+ u8 *eec_mode)
+{
+ struct ice_aqc_get_cgu_dpll_status *cmd;
+ const s64 nsec_per_psec = 1000LL;
+ struct ice_aq_desc desc;
+ int status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status);
+ cmd = &desc.params.get_cgu_dpll_status;
+ cmd->dpll_num = dpll_num;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!status) {
+ *ref_state = cmd->ref_state;
+ *dpll_state = cmd->dpll_state;
+ *config = cmd->config;
+ *phase_offset = le32_to_cpu(cmd->phase_offset_h);
+ *phase_offset <<= 32;
+ *phase_offset += le32_to_cpu(cmd->phase_offset_l);
+ *phase_offset = div64_s64(sign_extend64(*phase_offset, 47),
+ nsec_per_psec);
+ *eec_mode = cmd->eec_mode;
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_set_cgu_dpll_config - set dpll config
+ * @hw: pointer to the HW struct
+ * @dpll_num: DPLL index
+ * @ref_state: Reference clock state
+ * @config: DPLL config
+ * @eec_mode: EEC mode
+ *
+ * Set CGU DPLL config (0x0C67)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state,
+ u8 config, u8 eec_mode)
+{
+ struct ice_aqc_set_cgu_dpll_config *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config);
+ cmd = &desc.params.set_cgu_dpll_config;
+ cmd->dpll_num = dpll_num;
+ cmd->ref_state = ref_state;
+ cmd->config = config;
+ cmd->eec_mode = eec_mode;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_aq_set_cgu_ref_prio - set input reference priority
+ * @hw: pointer to the HW struct
+ * @dpll_num: DPLL index
+ * @ref_idx: Reference pin index
+ * @ref_priority: Reference input priority
+ *
+ * Set CGU reference priority (0x0C68)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
+ u8 ref_priority)
+{
+ struct ice_aqc_set_cgu_ref_prio *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio);
+ cmd = &desc.params.set_cgu_ref_prio;
+ cmd->dpll_num = dpll_num;
+ cmd->ref_idx = ref_idx;
+ cmd->ref_priority = ref_priority;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_aq_get_cgu_ref_prio - get input reference priority
+ * @hw: pointer to the HW struct
+ * @dpll_num: DPLL index
+ * @ref_idx: Reference pin index
+ * @ref_prio: Reference input priority
+ *
+ * Get CGU reference priority (0x0C69)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
+ u8 *ref_prio)
+{
+ struct ice_aqc_get_cgu_ref_prio *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio);
+ cmd = &desc.params.get_cgu_ref_prio;
+ cmd->dpll_num = dpll_num;
+ cmd->ref_idx = ref_idx;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!status)
+ *ref_prio = cmd->ref_priority;
+
+ return status;
+}
+
+/**
+ * ice_aq_get_cgu_info - get cgu info
+ * @hw: pointer to the HW struct
+ * @cgu_id: CGU ID
+ * @cgu_cfg_ver: CGU config version
+ * @cgu_fw_ver: CGU firmware version
+ *
+ * Get CGU info (0x0C6A)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver,
+ u32 *cgu_fw_ver)
+{
+ struct ice_aqc_get_cgu_info *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info);
+ cmd = &desc.params.get_cgu_info;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!status) {
+ *cgu_id = le32_to_cpu(cmd->cgu_id);
+ *cgu_cfg_ver = le32_to_cpu(cmd->cgu_cfg_ver);
+ *cgu_fw_ver = le32_to_cpu(cmd->cgu_fw_ver);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_set_phy_rec_clk_out - set RCLK phy out
+ * @hw: pointer to the HW struct
+ * @phy_output: PHY reference clock output pin
+ * @enable: GPIO state to be applied
+ * @freq: PHY output frequency
+ *
+ * Set phy recovered clock as reference (0x0630)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable,
+ u32 *freq)
+{
+ struct ice_aqc_set_phy_rec_clk_out *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out);
+ cmd = &desc.params.set_phy_rec_clk_out;
+ cmd->phy_output = phy_output;
+ cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT;
+ cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN;
+ cmd->freq = cpu_to_le32(*freq);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!status)
+ *freq = le32_to_cpu(cmd->freq);
+
+ return status;
+}
+
+/**
+ * ice_aq_get_phy_rec_clk_out - get phy recovered signal info
+ * @hw: pointer to the HW struct
+ * @phy_output: PHY reference clock output pin
+ * @port_num: Port number
+ * @flags: PHY flags
+ * @node_handle: PHY output frequency
+ *
+ * Get PHY recovered clock output info (0x0631)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num,
+ u8 *flags, u16 *node_handle)
+{
+ struct ice_aqc_get_phy_rec_clk_out *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out);
+ cmd = &desc.params.get_phy_rec_clk_out;
+ cmd->phy_output = *phy_output;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!status) {
+ *phy_output = cmd->phy_output;
+ if (port_num)
+ *port_num = cmd->port_num;
+ if (flags)
+ *flags = cmd->flags;
+ if (node_handle)
+ *node_handle = le16_to_cpu(cmd->node_handle);
+ }
+
return status;
}
@@ -5268,81 +5810,6 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
}
/**
- * ice_aq_set_driver_param - Set driver parameter to share via firmware
- * @hw: pointer to the HW struct
- * @idx: parameter index to set
- * @value: the value to set the parameter to
- * @cd: pointer to command details structure or NULL
- *
- * Set the value of one of the software defined parameters. All PFs connected
- * to this device can read the value using ice_aq_get_driver_param.
- *
- * Note that firmware provides no synchronization or locking, and will not
- * save the parameter value during a device reset. It is expected that
- * a single PF will write the parameter value, while all other PFs will only
- * read it.
- */
-int
-ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
- u32 value, struct ice_sq_cd *cd)
-{
- struct ice_aqc_driver_shared_params *cmd;
- struct ice_aq_desc desc;
-
- if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
- return -EIO;
-
- cmd = &desc.params.drv_shared_params;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
-
- cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET;
- cmd->param_indx = idx;
- cmd->param_val = cpu_to_le32(value);
-
- return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
-}
-
-/**
- * ice_aq_get_driver_param - Get driver parameter shared via firmware
- * @hw: pointer to the HW struct
- * @idx: parameter index to set
- * @value: storage to return the shared parameter
- * @cd: pointer to command details structure or NULL
- *
- * Get the value of one of the software defined parameters.
- *
- * Note that firmware provides no synchronization or locking. It is expected
- * that only a single PF will write a given parameter.
- */
-int
-ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
- u32 *value, struct ice_sq_cd *cd)
-{
- struct ice_aqc_driver_shared_params *cmd;
- struct ice_aq_desc desc;
- int status;
-
- if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
- return -EIO;
-
- cmd = &desc.params.drv_shared_params;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
-
- cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET;
- cmd->param_indx = idx;
-
- status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
- if (status)
- return status;
-
- *value = le32_to_cpu(cmd->param_val);
-
- return 0;
-}
-
-/**
* ice_aq_set_gpio
* @hw: pointer to the hw struct
* @gpio_ctrl_handle: GPIO controller node handle
@@ -5643,6 +6110,7 @@ static const u32 ice_aq_to_link_speed[] = {
SPEED_40000,
SPEED_50000,
SPEED_100000, /* BIT(10) */
+ SPEED_200000,
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 226b81f97a92..31fdcac33986 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -93,6 +93,13 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
bool ice_is_pf_c827(struct ice_hw *hw);
+bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw);
+bool ice_is_clock_mux_in_netlist(struct ice_hw *hw);
+bool ice_is_cgu_in_netlist(struct ice_hw *hw);
+bool ice_is_gps_in_netlist(struct ice_hw *hw);
+int
+ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle);
int
ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
@@ -196,6 +203,44 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in);
+int
+ice_aq_get_cgu_abilities(struct ice_hw *hw,
+ struct ice_aqc_get_cgu_abilities *abilities);
+int
+ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2,
+ u32 freq, s32 phase_delay);
+int
+ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type,
+ u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay);
+int
+ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags,
+ u8 src_sel, u32 freq, s32 phase_delay);
+int
+ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags,
+ u8 *src_sel, u32 *freq, u32 *src_freq);
+int
+ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state,
+ u8 *dpll_state, u8 *config, s64 *phase_offset,
+ u8 *eec_mode);
+int
+ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state,
+ u8 config, u8 eec_mode);
+int
+ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
+ u8 ref_priority);
+int
+ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
+ u8 *ref_prio);
+int
+ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver,
+ u32 *cgu_fw_ver);
+
+int
+ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable,
+ u32 *freq);
+int
+ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num,
+ u8 *flags, u16 *node_handle);
void
ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
@@ -208,12 +253,6 @@ int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
int
-ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
- u32 value, struct ice_sq_cd *cd);
-int
-ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
- u32 *value, struct ice_sq_cd *cd);
-int
ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
struct ice_sq_cd *cd);
int
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index b27ec93638b6..cfb1580f5850 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -1201,23 +1201,120 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
}
/**
- * ice_dwnld_cfg_bufs
+ * ice_get_pkg_seg_by_idx
+ * @pkg_hdr: pointer to the package header to be searched
+ * @idx: index of segment
+ */
+static struct ice_generic_seg_hdr *
+ice_get_pkg_seg_by_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx)
+{
+ if (idx < le32_to_cpu(pkg_hdr->seg_count))
+ return (struct ice_generic_seg_hdr *)
+ ((u8 *)pkg_hdr +
+ le32_to_cpu(pkg_hdr->seg_offset[idx]));
+
+ return NULL;
+}
+
+/**
+ * ice_is_signing_seg_at_idx - determine if segment is a signing segment
+ * @pkg_hdr: pointer to package header
+ * @idx: segment index
+ */
+static bool ice_is_signing_seg_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx)
+{
+ struct ice_generic_seg_hdr *seg;
+
+ seg = ice_get_pkg_seg_by_idx(pkg_hdr, idx);
+ if (!seg)
+ return false;
+
+ return le32_to_cpu(seg->seg_type) == SEGMENT_TYPE_SIGNING;
+}
+
+/**
+ * ice_is_signing_seg_type_at_idx
+ * @pkg_hdr: pointer to package header
+ * @idx: segment index
+ * @seg_id: segment id that is expected
+ * @sign_type: signing type
+ *
+ * Determine if a segment is a signing segment of the correct type
+ */
+static bool
+ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx,
+ u32 seg_id, u32 sign_type)
+{
+ struct ice_sign_seg *seg;
+
+ if (!ice_is_signing_seg_at_idx(pkg_hdr, idx))
+ return false;
+
+ seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
+
+ if (seg && le32_to_cpu(seg->seg_id) == seg_id &&
+ le32_to_cpu(seg->sign_type) == sign_type)
+ return true;
+
+ return false;
+}
+
+/**
+ * ice_is_buffer_metadata - determine if package buffer is a metadata buffer
+ * @buf: pointer to buffer header
+ */
+static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf)
+{
+ if (le32_to_cpu(buf->section_entry[0].type) & ICE_METADATA_BUF)
+ return true;
+
+ return false;
+}
+
+/**
+ * ice_is_last_download_buffer
+ * @buf: pointer to current buffer header
+ * @idx: index of the buffer in the current sequence
+ * @count: the buffer count in the current sequence
+ *
+ * Note: this routine should only be called if the buffer is not the last buffer
+ */
+static bool
+ice_is_last_download_buffer(struct ice_buf_hdr *buf, u32 idx, u32 count)
+{
+ struct ice_buf *next_buf;
+
+ if ((idx + 1) == count)
+ return true;
+
+ /* A set metadata flag in the next buffer will signal that the current
+ * buffer will be the last buffer downloaded
+ */
+ next_buf = ((struct ice_buf *)buf) + 1;
+
+ return ice_is_buffer_metadata((struct ice_buf_hdr *)next_buf);
+}
+
+/**
+ * ice_dwnld_cfg_bufs_no_lock
* @hw: pointer to the hardware structure
* @bufs: pointer to an array of buffers
- * @count: the number of buffers in the array
+ * @start: buffer index of first buffer to download
+ * @count: the number of buffers to download
+ * @indicate_last: if true, then set last buffer flag on last buffer download
*
- * Obtains global config lock and downloads the package configuration buffers
- * to the firmware. Metadata buffers are skipped, and the first metadata buffer
- * found indicates that the rest of the buffers are all metadata buffers.
+ * Downloads package configuration buffers to the firmware. Metadata buffers
+ * are skipped, and the first metadata buffer found indicates that the rest
+ * of the buffers are all metadata buffers.
*/
-static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw,
- struct ice_buf *bufs, u32 count)
+static enum ice_ddp_state
+ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
+ u32 count, bool indicate_last)
{
enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
struct ice_buf_hdr *bh;
enum ice_aq_err err;
u32 offset, info, i;
- int status;
if (!bufs || !count)
return ICE_DDP_PKG_ERR;
@@ -1226,43 +1323,25 @@ static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw,
* then there are no buffers to be downloaded, and the operation is
* considered a success.
*/
- bh = (struct ice_buf_hdr *)bufs;
+ bh = (struct ice_buf_hdr *)(bufs + start);
if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
return ICE_DDP_PKG_SUCCESS;
- status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
- if (status) {
- if (status == -EALREADY)
- return ICE_DDP_PKG_ALREADY_LOADED;
- return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
- }
-
for (i = 0; i < count; i++) {
- bool last = ((i + 1) == count);
+ bool last = false;
+ int status;
- if (!last) {
- /* check next buffer for metadata flag */
- bh = (struct ice_buf_hdr *)(bufs + i + 1);
+ bh = (struct ice_buf_hdr *)(bufs + start + i);
- /* A set metadata flag in the next buffer will signal
- * that the current buffer will be the last buffer
- * downloaded
- */
- if (le16_to_cpu(bh->section_count))
- if (le32_to_cpu(bh->section_entry[0].type) &
- ICE_METADATA_BUF)
- last = true;
- }
-
- bh = (struct ice_buf_hdr *)(bufs + i);
+ if (indicate_last)
+ last = ice_is_last_download_buffer(bh, i, count);
status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
&offset, &info, NULL);
/* Save AQ status from download package */
if (status) {
- ice_debug(hw, ICE_DBG_PKG,
- "Pkg download failed: err %d off %d inf %d\n",
+ ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
status, offset, info);
err = hw->adminq.sq_last_status;
state = ice_map_aq_err_to_ddp_state(err);
@@ -1273,72 +1352,196 @@ static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw,
break;
}
- if (!status) {
- status = ice_set_vlan_mode(hw);
- if (status)
- ice_debug(hw, ICE_DBG_PKG,
- "Failed to set VLAN mode: err %d\n", status);
+ return state;
+}
+
+/**
+ * ice_download_pkg_sig_seg - download a signature segment
+ * @hw: pointer to the hardware structure
+ * @seg: pointer to signature segment
+ */
+static enum ice_ddp_state
+ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg)
+{
+ return ice_dwnld_cfg_bufs_no_lock(hw, seg->buf_tbl.buf_array, 0,
+ le32_to_cpu(seg->buf_tbl.buf_count),
+ false);
+}
+
+/**
+ * ice_download_pkg_config_seg - download a config segment
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to package header
+ * @idx: segment index
+ * @start: starting buffer
+ * @count: buffer count
+ *
+ * Note: idx must reference a ICE segment
+ */
+static enum ice_ddp_state
+ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
+ u32 idx, u32 start, u32 count)
+{
+ struct ice_buf_table *bufs;
+ struct ice_seg *seg;
+ u32 buf_count;
+
+ seg = (struct ice_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
+ if (!seg)
+ return ICE_DDP_PKG_ERR;
+
+ bufs = ice_find_buf_table(seg);
+ buf_count = le32_to_cpu(bufs->buf_count);
+
+ if (start >= buf_count || start + count > buf_count)
+ return ICE_DDP_PKG_ERR;
+
+ return ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start, count,
+ true);
+}
+
+/**
+ * ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to package header
+ * @idx: segment index (must be a signature segment)
+ *
+ * Note: idx must reference a signature segment
+ */
+static enum ice_ddp_state
+ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
+ u32 idx)
+{
+ enum ice_ddp_state state;
+ struct ice_sign_seg *seg;
+ u32 conf_idx;
+ u32 start;
+ u32 count;
+
+ seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
+ if (!seg) {
+ state = ICE_DDP_PKG_ERR;
+ goto exit;
}
- ice_release_global_cfg_lock(hw);
+ conf_idx = le32_to_cpu(seg->signed_seg_idx);
+ start = le32_to_cpu(seg->signed_buf_start);
+ count = le32_to_cpu(seg->signed_buf_count);
+ state = ice_download_pkg_sig_seg(hw, seg);
+ if (state)
+ goto exit;
+
+ state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start,
+ count);
+
+exit:
return state;
}
/**
- * ice_aq_get_pkg_info_list
+ * ice_match_signing_seg - determine if a matching signing segment exists
+ * @pkg_hdr: pointer to package header
+ * @seg_id: segment id that is expected
+ * @sign_type: signing type
+ */
+static bool
+ice_match_signing_seg(struct ice_pkg_hdr *pkg_hdr, u32 seg_id, u32 sign_type)
+{
+ u32 i;
+
+ for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
+ if (ice_is_signing_seg_type_at_idx(pkg_hdr, i, seg_id,
+ sign_type))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_post_dwnld_pkg_actions - perform post download package actions
* @hw: pointer to the hardware structure
- * @pkg_info: the buffer which will receive the information list
- * @buf_size: the size of the pkg_info information buffer
- * @cd: pointer to command details structure or NULL
- *
- * Get Package Info List (0x0C43)
*/
-static int ice_aq_get_pkg_info_list(struct ice_hw *hw,
- struct ice_aqc_get_pkg_info_resp *pkg_info,
- u16 buf_size, struct ice_sq_cd *cd)
+static enum ice_ddp_state
+ice_post_dwnld_pkg_actions(struct ice_hw *hw)
{
- struct ice_aq_desc desc;
+ int status;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
+ status = ice_set_vlan_mode(hw);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
+ status);
+ return ICE_DDP_PKG_ERR;
+ }
- return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
+ return ICE_DDP_PKG_SUCCESS;
}
/**
* ice_download_pkg
* @hw: pointer to the hardware structure
- * @ice_seg: pointer to the segment of the package to be downloaded
+ * @pkg_hdr: pointer to package header
*
* Handles the download of a complete package.
*/
-static enum ice_ddp_state ice_download_pkg(struct ice_hw *hw,
- struct ice_seg *ice_seg)
+static enum ice_ddp_state
+ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
{
- struct ice_buf_table *ice_buf_tbl;
+ enum ice_aq_err aq_err = hw->adminq.sq_last_status;
+ enum ice_ddp_state state = ICE_DDP_PKG_ERR;
int status;
+ u32 i;
- ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
- ice_seg->hdr.seg_format_ver.major,
- ice_seg->hdr.seg_format_ver.minor,
- ice_seg->hdr.seg_format_ver.update,
- ice_seg->hdr.seg_format_ver.draft);
+ ice_debug(hw, ICE_DBG_INIT, "Segment ID %d\n", hw->pkg_seg_id);
+ ice_debug(hw, ICE_DBG_INIT, "Signature type %d\n", hw->pkg_sign_type);
- ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
- le32_to_cpu(ice_seg->hdr.seg_type),
- le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
+ status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
+ if (status) {
+ if (status == -EALREADY)
+ state = ICE_DDP_PKG_ALREADY_LOADED;
+ else
+ state = ice_map_aq_err_to_ddp_state(aq_err);
+ return state;
+ }
- ice_buf_tbl = ice_find_buf_table(ice_seg);
+ for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
+ if (!ice_is_signing_seg_type_at_idx(pkg_hdr, i, hw->pkg_seg_id,
+ hw->pkg_sign_type))
+ continue;
- ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
- le32_to_cpu(ice_buf_tbl->buf_count));
+ state = ice_dwnld_sign_and_cfg_segs(hw, pkg_hdr, i);
+ if (state)
+ break;
+ }
- status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
- le32_to_cpu(ice_buf_tbl->buf_count));
+ if (!state)
+ state = ice_post_dwnld_pkg_actions(hw);
+ ice_release_global_cfg_lock(hw);
ice_post_pkg_dwnld_vlan_mode_cfg(hw);
- return status;
+ return state;
+}
+
+/**
+ * ice_aq_get_pkg_info_list
+ * @hw: pointer to the hardware structure
+ * @pkg_info: the buffer which will receive the information list
+ * @buf_size: the size of the pkg_info information buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get Package Info List (0x0C43)
+ */
+static int ice_aq_get_pkg_info_list(struct ice_hw *hw,
+ struct ice_aqc_get_pkg_info_resp *pkg_info,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
+
+ return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
}
/**
@@ -1498,6 +1701,73 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
}
/**
+ * ice_has_signing_seg - determine if package has a signing segment
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to the driver's package hdr
+ */
+static bool ice_has_signing_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
+{
+ struct ice_generic_seg_hdr *seg_hdr;
+
+ seg_hdr = (struct ice_generic_seg_hdr *)
+ ice_find_seg_in_pkg(hw, SEGMENT_TYPE_SIGNING, pkg_hdr);
+
+ return seg_hdr ? true : false;
+}
+
+/**
+ * ice_get_pkg_segment_id - get correct package segment id, based on device
+ * @mac_type: MAC type of the device
+ */
+static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type)
+{
+ u32 seg_id;
+
+ switch (mac_type) {
+ case ICE_MAC_E830:
+ seg_id = SEGMENT_TYPE_ICE_E830;
+ break;
+ case ICE_MAC_GENERIC:
+ default:
+ seg_id = SEGMENT_TYPE_ICE_E810;
+ break;
+ }
+
+ return seg_id;
+}
+
+/**
+ * ice_get_pkg_sign_type - get package segment sign type, based on device
+ * @mac_type: MAC type of the device
+ */
+static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type)
+{
+ u32 sign_type;
+
+ switch (mac_type) {
+ case ICE_MAC_E830:
+ sign_type = SEGMENT_SIGN_TYPE_RSA3K_SBB;
+ break;
+ case ICE_MAC_GENERIC:
+ default:
+ sign_type = SEGMENT_SIGN_TYPE_RSA2K;
+ break;
+ }
+
+ return sign_type;
+}
+
+/**
+ * ice_get_signing_req - get correct package requirements, based on device
+ * @hw: pointer to the hardware structure
+ */
+static void ice_get_signing_req(struct ice_hw *hw)
+{
+ hw->pkg_seg_id = ice_get_pkg_segment_id(hw->mac_type);
+ hw->pkg_sign_type = ice_get_pkg_sign_type(hw->mac_type);
+}
+
+/**
* ice_init_pkg_info
* @hw: pointer to the hardware structure
* @pkg_hdr: pointer to the driver's package hdr
@@ -1512,7 +1782,14 @@ static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw,
if (!pkg_hdr)
return ICE_DDP_PKG_ERR;
- seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
+ hw->pkg_has_signing_seg = ice_has_signing_seg(hw, pkg_hdr);
+ ice_get_signing_req(hw);
+
+ ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n",
+ hw->pkg_seg_id);
+
+ seg_hdr = (struct ice_generic_seg_hdr *)
+ ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr);
if (seg_hdr) {
struct ice_meta_sect *meta;
struct ice_pkg_enum state;
@@ -1560,21 +1837,14 @@ static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw,
*/
static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
{
- enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
- struct ice_aqc_get_pkg_info_resp *pkg_info;
- u16 size;
+ DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg_info, pkg_info,
+ ICE_PKG_CNT);
+ u16 size = __struct_size(pkg_info);
u32 i;
- size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
- pkg_info = kzalloc(size, GFP_KERNEL);
- if (!pkg_info)
+ if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL))
return ICE_DDP_PKG_ERR;
- if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) {
- state = ICE_DDP_PKG_ERR;
- goto init_pkg_free_alloc;
- }
-
for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
#define ICE_PKG_FLAG_COUNT 4
char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
@@ -1604,10 +1874,7 @@ static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
pkg_info->pkg_info[i].name, flags);
}
-init_pkg_free_alloc:
- kfree(pkg_info);
-
- return state;
+ return ICE_DDP_PKG_SUCCESS;
}
/**
@@ -1622,9 +1889,10 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
struct ice_pkg_hdr *ospkg,
struct ice_seg **seg)
{
- struct ice_aqc_get_pkg_info_resp *pkg;
+ DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg, pkg_info,
+ ICE_PKG_CNT);
+ u16 size = __struct_size(pkg);
enum ice_ddp_state state;
- u16 size;
u32 i;
/* Check package version compatibility */
@@ -1635,7 +1903,7 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
}
/* find ICE segment in given package */
- *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
+ *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id,
ospkg);
if (!*seg) {
ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
@@ -1643,15 +1911,8 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
}
/* Check if FW is compatible with the OS package */
- size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
- pkg = kzalloc(size, GFP_KERNEL);
- if (!pkg)
- return ICE_DDP_PKG_ERR;
-
- if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) {
- state = ICE_DDP_PKG_LOAD_ERROR;
- goto fw_ddp_compat_free_alloc;
- }
+ if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL))
+ return ICE_DDP_PKG_LOAD_ERROR;
for (i = 0; i < le32_to_cpu(pkg->count); i++) {
/* loop till we find the NVM package */
@@ -1668,8 +1929,7 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
/* done processing NVM package so break */
break;
}
-fw_ddp_compat_free_alloc:
- kfree(pkg);
+
return state;
}
@@ -1809,6 +2069,11 @@ enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
if (state)
return state;
+ /* must be a matching segment */
+ if (hw->pkg_has_signing_seg &&
+ !ice_match_signing_seg(pkg, hw->pkg_seg_id, hw->pkg_sign_type))
+ return ICE_DDP_PKG_ERR;
+
/* before downloading the package, check package version for
* compatibility with driver
*/
@@ -1818,7 +2083,7 @@ enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
/* initialize package hints and then download package */
ice_init_pkg_hints(hw, seg);
- state = ice_download_pkg(hw, seg);
+ state = ice_download_pkg(hw, pkg);
if (state == ICE_DDP_PKG_ALREADY_LOADED) {
ice_debug(hw, ICE_DBG_INIT,
"package previously loaded - no work.\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h
index abb5f32f2ef4..ff66c2ffb1a2 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.h
@@ -98,10 +98,21 @@ struct ice_pkg_hdr {
__le32 seg_offset[];
};
+/* Package signing algorithm types */
+#define SEGMENT_SIGN_TYPE_INVALID 0x00000000
+#define SEGMENT_SIGN_TYPE_RSA2K 0x00000001
+#define SEGMENT_SIGN_TYPE_RSA3K 0x00000002
+#define SEGMENT_SIGN_TYPE_RSA3K_SBB 0x00000003 /* Secure Boot Block */
+#define SEGMENT_SIGN_TYPE_RSA3K_E825 0x00000005
+
/* generic segment */
struct ice_generic_seg_hdr {
-#define SEGMENT_TYPE_METADATA 0x00000001
-#define SEGMENT_TYPE_ICE 0x00000010
+#define SEGMENT_TYPE_INVALID 0x00000000
+#define SEGMENT_TYPE_METADATA 0x00000001
+#define SEGMENT_TYPE_ICE_E810 0x00000010
+#define SEGMENT_TYPE_SIGNING 0x00001001
+#define SEGMENT_TYPE_ICE_RUN_TIME_CFG 0x00000020
+#define SEGMENT_TYPE_ICE_E830 0x00000017
__le32 seg_type;
struct ice_pkg_ver seg_format_ver;
__le32 seg_size;
@@ -163,6 +174,18 @@ struct ice_global_metadata_seg {
#define ICE_MIN_S_SZ 1
#define ICE_MAX_S_SZ 4084
+struct ice_sign_seg {
+ struct ice_generic_seg_hdr hdr;
+ __le32 seg_id;
+ __le32 sign_type;
+ __le32 signed_seg_idx;
+ __le32 signed_buf_start;
+ __le32 signed_buf_count;
+#define ICE_SIGN_SEG_RESERVED_COUNT 44
+ u8 reserved[ICE_SIGN_SEG_RESERVED_COUNT];
+ struct ice_buf_table buf_tbl;
+};
+
/* section information */
struct ice_section_entry {
__le32 type;
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index 6d560d1c74a4..a2d384dbfc76 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018, Intel Corporation. */
+/* Copyright (c) 2018-2023, Intel Corporation. */
#ifndef _ICE_DEVIDS_H_
#define _ICE_DEVIDS_H_
@@ -16,6 +16,14 @@
#define ICE_DEV_ID_E823L_1GBE 0x124F
/* Intel(R) Ethernet Connection E823-L for QSFP */
#define ICE_DEV_ID_E823L_QSFP 0x151D
+/* Intel(R) Ethernet Controller E830-C for backplane */
+#define ICE_DEV_ID_E830_BACKPLANE 0x12D1
+/* Intel(R) Ethernet Controller E830-C for QSFP */
+#define ICE_DEV_ID_E830_QSFP56 0x12D2
+/* Intel(R) Ethernet Controller E830-C for SFP */
+#define ICE_DEV_ID_E830_SFP 0x12D3
+/* Intel(R) Ethernet Controller E830-C for SFP-DD */
+#define ICE_DEV_ID_E830_SFP_DD 0x12D4
/* Intel(R) Ethernet Controller E810-C for backplane */
#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
/* Intel(R) Ethernet Controller E810-C for QSFP */
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
new file mode 100644
index 000000000000..835c419ccc74
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -0,0 +1,2120 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_trace.h"
+#include <linux/dpll.h>
+
+#define ICE_CGU_STATE_ACQ_ERR_THRESHOLD 50
+#define ICE_DPLL_PIN_IDX_INVALID 0xff
+#define ICE_DPLL_RCLK_NUM_PER_PF 1
+
+/**
+ * enum ice_dpll_pin_type - enumerate ice pin types:
+ * @ICE_DPLL_PIN_INVALID: invalid pin type
+ * @ICE_DPLL_PIN_TYPE_INPUT: input pin
+ * @ICE_DPLL_PIN_TYPE_OUTPUT: output pin
+ * @ICE_DPLL_PIN_TYPE_RCLK_INPUT: recovery clock input pin
+ */
+enum ice_dpll_pin_type {
+ ICE_DPLL_PIN_INVALID,
+ ICE_DPLL_PIN_TYPE_INPUT,
+ ICE_DPLL_PIN_TYPE_OUTPUT,
+ ICE_DPLL_PIN_TYPE_RCLK_INPUT,
+};
+
+static const char * const pin_type_name[] = {
+ [ICE_DPLL_PIN_TYPE_INPUT] = "input",
+ [ICE_DPLL_PIN_TYPE_OUTPUT] = "output",
+ [ICE_DPLL_PIN_TYPE_RCLK_INPUT] = "rclk-input",
+};
+
+/**
+ * ice_dpll_pin_freq_set - set pin's frequency
+ * @pf: private board structure
+ * @pin: pointer to a pin
+ * @pin_type: type of pin being configured
+ * @freq: frequency to be set
+ * @extack: error reporting
+ *
+ * Set requested frequency on a pin.
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error on AQ or wrong pin type given
+ */
+static int
+ice_dpll_pin_freq_set(struct ice_pf *pf, struct ice_dpll_pin *pin,
+ enum ice_dpll_pin_type pin_type, const u32 freq,
+ struct netlink_ext_ack *extack)
+{
+ u8 flags;
+ int ret;
+
+ switch (pin_type) {
+ case ICE_DPLL_PIN_TYPE_INPUT:
+ flags = ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_FREQ;
+ ret = ice_aq_set_input_pin_cfg(&pf->hw, pin->idx, flags,
+ pin->flags[0], freq, 0);
+ break;
+ case ICE_DPLL_PIN_TYPE_OUTPUT:
+ flags = ICE_AQC_SET_CGU_OUT_CFG_UPDATE_FREQ;
+ ret = ice_aq_set_output_pin_cfg(&pf->hw, pin->idx, flags,
+ 0, freq, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ret) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "err:%d %s failed to set pin freq:%u on pin:%u\n",
+ ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status),
+ freq, pin->idx);
+ return ret;
+ }
+ pin->freq = freq;
+
+ return 0;
+}
+
+/**
+ * ice_dpll_frequency_set - wrapper for pin callback for set frequency
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: frequency to be set
+ * @extack: error reporting
+ * @pin_type: type of pin being configured
+ *
+ * Wraps internal set frequency command on a pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not found or couldn't set in hw
+ */
+static int
+ice_dpll_frequency_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const u32 frequency,
+ struct netlink_ext_ack *extack,
+ enum ice_dpll_pin_type pin_type)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ int ret;
+
+ mutex_lock(&pf->dplls.lock);
+ ret = ice_dpll_pin_freq_set(pf, p, pin_type, frequency, extack);
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_input_frequency_set - input pin callback for set frequency
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: frequency to be set
+ * @extack: error reporting
+ *
+ * Wraps internal set frequency command on a pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not found or couldn't set in hw
+ */
+static int
+ice_dpll_input_frequency_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 frequency, struct netlink_ext_ack *extack)
+{
+ return ice_dpll_frequency_set(pin, pin_priv, dpll, dpll_priv, frequency,
+ extack, ICE_DPLL_PIN_TYPE_INPUT);
+}
+
+/**
+ * ice_dpll_output_frequency_set - output pin callback for set frequency
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: frequency to be set
+ * @extack: error reporting
+ *
+ * Wraps internal set frequency command on a pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not found or couldn't set in hw
+ */
+static int
+ice_dpll_output_frequency_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 frequency, struct netlink_ext_ack *extack)
+{
+ return ice_dpll_frequency_set(pin, pin_priv, dpll, dpll_priv, frequency,
+ extack, ICE_DPLL_PIN_TYPE_OUTPUT);
+}
+
+/**
+ * ice_dpll_frequency_get - wrapper for pin callback for get frequency
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: on success holds pin's frequency
+ * @extack: error reporting
+ * @pin_type: type of pin being configured
+ *
+ * Wraps internal get frequency command of a pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not found or couldn't get from hw
+ */
+static int
+ice_dpll_frequency_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 *frequency, struct netlink_ext_ack *extack,
+ enum ice_dpll_pin_type pin_type)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ *frequency = p->freq;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_input_frequency_get - input pin callback for get frequency
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: on success holds pin's frequency
+ * @extack: error reporting
+ *
+ * Wraps internal get frequency command of a input pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not found or couldn't get from hw
+ */
+static int
+ice_dpll_input_frequency_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 *frequency, struct netlink_ext_ack *extack)
+{
+ return ice_dpll_frequency_get(pin, pin_priv, dpll, dpll_priv, frequency,
+ extack, ICE_DPLL_PIN_TYPE_INPUT);
+}
+
+/**
+ * ice_dpll_output_frequency_get - output pin callback for get frequency
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: on success holds pin's frequency
+ * @extack: error reporting
+ *
+ * Wraps internal get frequency command of a pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not found or couldn't get from hw
+ */
+static int
+ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 *frequency, struct netlink_ext_ack *extack)
+{
+ return ice_dpll_frequency_get(pin, pin_priv, dpll, dpll_priv, frequency,
+ extack, ICE_DPLL_PIN_TYPE_OUTPUT);
+}
+
+/**
+ * ice_dpll_pin_enable - enable a pin on dplls
+ * @hw: board private hw structure
+ * @pin: pointer to a pin
+ * @pin_type: type of pin being enabled
+ * @extack: error reporting
+ *
+ * Enable a pin on both dplls. Store current state in pin->flags.
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - OK
+ * * negative - error
+ */
+static int
+ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
+ enum ice_dpll_pin_type pin_type,
+ struct netlink_ext_ack *extack)
+{
+ u8 flags = 0;
+ int ret;
+
+ switch (pin_type) {
+ case ICE_DPLL_PIN_TYPE_INPUT:
+ if (pin->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN)
+ flags |= ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN;
+ flags |= ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN;
+ ret = ice_aq_set_input_pin_cfg(hw, pin->idx, 0, flags, 0, 0);
+ break;
+ case ICE_DPLL_PIN_TYPE_OUTPUT:
+ if (pin->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN)
+ flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
+ flags |= ICE_AQC_SET_CGU_OUT_CFG_OUT_EN;
+ ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, 0, 0, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ret)
+ NL_SET_ERR_MSG_FMT(extack,
+ "err:%d %s failed to enable %s pin:%u\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status),
+ pin_type_name[pin_type], pin->idx);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_pin_disable - disable a pin on dplls
+ * @hw: board private hw structure
+ * @pin: pointer to a pin
+ * @pin_type: type of pin being disabled
+ * @extack: error reporting
+ *
+ * Disable a pin on both dplls. Store current state in pin->flags.
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - OK
+ * * negative - error
+ */
+static int
+ice_dpll_pin_disable(struct ice_hw *hw, struct ice_dpll_pin *pin,
+ enum ice_dpll_pin_type pin_type,
+ struct netlink_ext_ack *extack)
+{
+ u8 flags = 0;
+ int ret;
+
+ switch (pin_type) {
+ case ICE_DPLL_PIN_TYPE_INPUT:
+ if (pin->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN)
+ flags |= ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN;
+ ret = ice_aq_set_input_pin_cfg(hw, pin->idx, 0, flags, 0, 0);
+ break;
+ case ICE_DPLL_PIN_TYPE_OUTPUT:
+ if (pin->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN)
+ flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
+ ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, 0, 0, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ret)
+ NL_SET_ERR_MSG_FMT(extack,
+ "err:%d %s failed to disable %s pin:%u\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status),
+ pin_type_name[pin_type], pin->idx);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_pin_state_update - update pin's state
+ * @pf: private board struct
+ * @pin: structure with pin attributes to be updated
+ * @pin_type: type of pin being updated
+ * @extack: error reporting
+ *
+ * Determine pin current state and frequency, then update struct
+ * holding the pin info. For input pin states are separated for each
+ * dpll, for rclk pins states are separated for each parent.
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - OK
+ * * negative - error
+ */
+static int
+ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
+ enum ice_dpll_pin_type pin_type,
+ struct netlink_ext_ack *extack)
+{
+ u8 parent, port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT;
+ int ret;
+
+ switch (pin_type) {
+ case ICE_DPLL_PIN_TYPE_INPUT:
+ ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, NULL, NULL,
+ NULL, &pin->flags[0],
+ &pin->freq, NULL);
+ if (ret)
+ goto err;
+ if (ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN & pin->flags[0]) {
+ if (pin->pin) {
+ pin->state[pf->dplls.eec.dpll_idx] =
+ pin->pin == pf->dplls.eec.active_input ?
+ DPLL_PIN_STATE_CONNECTED :
+ DPLL_PIN_STATE_SELECTABLE;
+ pin->state[pf->dplls.pps.dpll_idx] =
+ pin->pin == pf->dplls.pps.active_input ?
+ DPLL_PIN_STATE_CONNECTED :
+ DPLL_PIN_STATE_SELECTABLE;
+ } else {
+ pin->state[pf->dplls.eec.dpll_idx] =
+ DPLL_PIN_STATE_SELECTABLE;
+ pin->state[pf->dplls.pps.dpll_idx] =
+ DPLL_PIN_STATE_SELECTABLE;
+ }
+ } else {
+ pin->state[pf->dplls.eec.dpll_idx] =
+ DPLL_PIN_STATE_DISCONNECTED;
+ pin->state[pf->dplls.pps.dpll_idx] =
+ DPLL_PIN_STATE_DISCONNECTED;
+ }
+ break;
+ case ICE_DPLL_PIN_TYPE_OUTPUT:
+ ret = ice_aq_get_output_pin_cfg(&pf->hw, pin->idx,
+ &pin->flags[0], NULL,
+ &pin->freq, NULL);
+ if (ret)
+ goto err;
+ if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0])
+ pin->state[0] = DPLL_PIN_STATE_CONNECTED;
+ else
+ pin->state[0] = DPLL_PIN_STATE_DISCONNECTED;
+ break;
+ case ICE_DPLL_PIN_TYPE_RCLK_INPUT:
+ for (parent = 0; parent < pf->dplls.rclk.num_parents;
+ parent++) {
+ u8 p = parent;
+
+ ret = ice_aq_get_phy_rec_clk_out(&pf->hw, &p,
+ &port_num,
+ &pin->flags[parent],
+ NULL);
+ if (ret)
+ goto err;
+ if (ICE_AQC_GET_PHY_REC_CLK_OUT_OUT_EN &
+ pin->flags[parent])
+ pin->state[parent] = DPLL_PIN_STATE_CONNECTED;
+ else
+ pin->state[parent] =
+ DPLL_PIN_STATE_DISCONNECTED;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+err:
+ if (extack)
+ NL_SET_ERR_MSG_FMT(extack,
+ "err:%d %s failed to update %s pin:%u\n",
+ ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status),
+ pin_type_name[pin_type], pin->idx);
+ else
+ dev_err_ratelimited(ice_pf_to_dev(pf),
+ "err:%d %s failed to update %s pin:%u\n",
+ ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status),
+ pin_type_name[pin_type], pin->idx);
+ return ret;
+}
+
+/**
+ * ice_dpll_hw_input_prio_set - set input priority value in hardware
+ * @pf: board private structure
+ * @dpll: ice dpll pointer
+ * @pin: ice pin pointer
+ * @prio: priority value being set on a dpll
+ * @extack: error reporting
+ *
+ * Internal wrapper for setting the priority in the hardware.
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failure
+ */
+static int
+ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll,
+ struct ice_dpll_pin *pin, const u32 prio,
+ struct netlink_ext_ack *extack)
+{
+ int ret;
+
+ ret = ice_aq_set_cgu_ref_prio(&pf->hw, dpll->dpll_idx, pin->idx,
+ (u8)prio);
+ if (ret)
+ NL_SET_ERR_MSG_FMT(extack,
+ "err:%d %s failed to set pin prio:%u on pin:%u\n",
+ ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status),
+ prio, pin->idx);
+ else
+ dpll->input_prio[pin->idx] = prio;
+
+ return ret;
+}
+
+/**
+ * ice_dpll_lock_status_get - get dpll lock status callback
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @status: on success holds dpll's lock status
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback, provides dpll's lock status.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failure
+ */
+static int
+ice_dpll_lock_status_get(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_lock_status *status,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ *status = d->dpll_state;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_mode_supported - check if dpll's working mode is supported
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @mode: mode to be checked for support
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Provides information if working mode is supported
+ * by dpll.
+ *
+ * Return:
+ * * true - mode is supported
+ * * false - mode is not supported
+ */
+static bool ice_dpll_mode_supported(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_mode mode,
+ struct netlink_ext_ack *extack)
+{
+ if (mode == DPLL_MODE_AUTOMATIC)
+ return true;
+
+ return false;
+}
+
+/**
+ * ice_dpll_mode_get - get dpll's working mode
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @mode: on success holds current working mode of dpll
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Provides working mode of dpll.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failure
+ */
+static int ice_dpll_mode_get(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_mode *mode,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ *mode = d->mode;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_pin_state_set - set pin's state on dpll
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @enable: if pin shalll be enabled
+ * @extack: error reporting
+ * @pin_type: type of a pin
+ *
+ * Set pin state on a pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - OK or no change required
+ * * negative - error
+ */
+static int
+ice_dpll_pin_state_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ bool enable, struct netlink_ext_ack *extack,
+ enum ice_dpll_pin_type pin_type)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ int ret;
+
+ mutex_lock(&pf->dplls.lock);
+ if (enable)
+ ret = ice_dpll_pin_enable(&pf->hw, p, pin_type, extack);
+ else
+ ret = ice_dpll_pin_disable(&pf->hw, p, pin_type, extack);
+ if (!ret)
+ ret = ice_dpll_pin_state_update(pf, p, pin_type, extack);
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_output_state_set - enable/disable output pin on dpll device
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: dpll being configured
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: state of pin to be set
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Set given state on output type pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - successfully enabled mode
+ * * negative - failed to enable mode
+ */
+static int
+ice_dpll_output_state_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ bool enable = state == DPLL_PIN_STATE_CONNECTED;
+
+ return ice_dpll_pin_state_set(pin, pin_priv, dpll, dpll_priv, enable,
+ extack, ICE_DPLL_PIN_TYPE_OUTPUT);
+}
+
+/**
+ * ice_dpll_input_state_set - enable/disable input pin on dpll levice
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: dpll being configured
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: state of pin to be set
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Enables given mode on input type pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - successfully enabled mode
+ * * negative - failed to enable mode
+ */
+static int
+ice_dpll_input_state_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ bool enable = state == DPLL_PIN_STATE_SELECTABLE;
+
+ return ice_dpll_pin_state_set(pin, pin_priv, dpll, dpll_priv, enable,
+ extack, ICE_DPLL_PIN_TYPE_INPUT);
+}
+
+/**
+ * ice_dpll_pin_state_get - set pin's state on dpll
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: on success holds state of the pin
+ * @extack: error reporting
+ * @pin_type: type of questioned pin
+ *
+ * Determine pin state set it on a pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failed to get state
+ */
+static int
+ice_dpll_pin_state_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack,
+ enum ice_dpll_pin_type pin_type)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ int ret;
+
+ mutex_lock(&pf->dplls.lock);
+ ret = ice_dpll_pin_state_update(pf, p, pin_type, extack);
+ if (ret)
+ goto unlock;
+ if (pin_type == ICE_DPLL_PIN_TYPE_INPUT)
+ *state = p->state[d->dpll_idx];
+ else if (pin_type == ICE_DPLL_PIN_TYPE_OUTPUT)
+ *state = p->state[0];
+ ret = 0;
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_output_state_get - get output pin state on dpll device
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: on success holds state of the pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Check state of a pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failed to get state
+ */
+static int
+ice_dpll_output_state_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ return ice_dpll_pin_state_get(pin, pin_priv, dpll, dpll_priv, state,
+ extack, ICE_DPLL_PIN_TYPE_OUTPUT);
+}
+
+/**
+ * ice_dpll_input_state_get - get input pin state on dpll device
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: on success holds state of the pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Check state of a input pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failed to get state
+ */
+static int
+ice_dpll_input_state_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ return ice_dpll_pin_state_get(pin, pin_priv, dpll, dpll_priv, state,
+ extack, ICE_DPLL_PIN_TYPE_INPUT);
+}
+
+/**
+ * ice_dpll_input_prio_get - get dpll's input prio
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @prio: on success - returns input priority on dpll
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting priority of a input pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failure
+ */
+static int
+ice_dpll_input_prio_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 *prio, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ *prio = d->input_prio[p->idx];
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_input_prio_set - set dpll input prio
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @prio: input priority to be set on dpll
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting priority of a input pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failure
+ */
+static int
+ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 prio, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ int ret;
+
+ if (prio > ICE_DPLL_PRIO_MAX) {
+ NL_SET_ERR_MSG_FMT(extack, "prio out of supported range 0-%d",
+ ICE_DPLL_PRIO_MAX);
+ return -EINVAL;
+ }
+
+ mutex_lock(&pf->dplls.lock);
+ ret = ice_dpll_hw_input_prio_set(pf, d, p, prio, extack);
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_input_direction - callback for get input pin direction
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @direction: holds input pin direction
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting direction of a input pin.
+ *
+ * Return:
+ * * 0 - success
+ */
+static int
+ice_dpll_input_direction(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack)
+{
+ *direction = DPLL_PIN_DIRECTION_INPUT;
+
+ return 0;
+}
+
+/**
+ * ice_dpll_output_direction - callback for get output pin direction
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @direction: holds output pin direction
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting direction of an output pin.
+ *
+ * Return:
+ * * 0 - success
+ */
+static int
+ice_dpll_output_direction(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack)
+{
+ *direction = DPLL_PIN_DIRECTION_OUTPUT;
+
+ return 0;
+}
+
+/**
+ * ice_dpll_pin_phase_adjust_get - callback for get pin phase adjust value
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @phase_adjust: on success holds pin phase_adjust value
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting phase adjust value of a pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_pin_phase_adjust_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s32 *phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_pf *pf = p->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ *phase_adjust = p->phase_adjust;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_pin_phase_adjust_set - helper for setting a pin phase adjust value
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @phase_adjust: phase_adjust to be set
+ * @extack: error reporting
+ * @type: type of a pin
+ *
+ * Helper for dpll subsystem callback. Handler for setting phase adjust value
+ * of a pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s32 phase_adjust,
+ struct netlink_ext_ack *extack,
+ enum ice_dpll_pin_type type)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ u8 flag, flags_en = 0;
+ int ret;
+
+ mutex_lock(&pf->dplls.lock);
+ switch (type) {
+ case ICE_DPLL_PIN_TYPE_INPUT:
+ flag = ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_DELAY;
+ if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN)
+ flags_en |= ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN;
+ if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN)
+ flags_en |= ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN;
+ ret = ice_aq_set_input_pin_cfg(&pf->hw, p->idx, flag, flags_en,
+ 0, phase_adjust);
+ break;
+ case ICE_DPLL_PIN_TYPE_OUTPUT:
+ flag = ICE_AQC_SET_CGU_OUT_CFG_UPDATE_PHASE;
+ if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_OUT_EN)
+ flag |= ICE_AQC_SET_CGU_OUT_CFG_OUT_EN;
+ if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN)
+ flag |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
+ ret = ice_aq_set_output_pin_cfg(&pf->hw, p->idx, flag, 0, 0,
+ phase_adjust);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (!ret)
+ p->phase_adjust = phase_adjust;
+ mutex_unlock(&pf->dplls.lock);
+ if (ret)
+ NL_SET_ERR_MSG_FMT(extack,
+ "err:%d %s failed to set pin phase_adjust:%d for pin:%u on dpll:%u\n",
+ ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status),
+ phase_adjust, p->idx, d->dpll_idx);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_input_phase_adjust_set - callback for set input pin phase adjust
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @phase_adjust: phase_adjust to be set
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Wraps a handler for setting phase adjust on input
+ * pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_input_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s32 phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ return ice_dpll_pin_phase_adjust_set(pin, pin_priv, dpll, dpll_priv,
+ phase_adjust, extack,
+ ICE_DPLL_PIN_TYPE_INPUT);
+}
+
+/**
+ * ice_dpll_output_phase_adjust_set - callback for set output pin phase adjust
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @phase_adjust: phase_adjust to be set
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Wraps a handler for setting phase adjust on output
+ * pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_output_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s32 phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ return ice_dpll_pin_phase_adjust_set(pin, pin_priv, dpll, dpll_priv,
+ phase_adjust, extack,
+ ICE_DPLL_PIN_TYPE_OUTPUT);
+}
+
+#define ICE_DPLL_PHASE_OFFSET_DIVIDER 100
+#define ICE_DPLL_PHASE_OFFSET_FACTOR \
+ (DPLL_PHASE_OFFSET_DIVIDER / ICE_DPLL_PHASE_OFFSET_DIVIDER)
+/**
+ * ice_dpll_phase_offset_get - callback for get dpll phase shift value
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @phase_offset: on success holds pin phase_offset value
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting phase shift value between
+ * dpll's input and output.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_phase_offset_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s64 *phase_offset, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ if (d->active_input == pin)
+ *phase_offset = d->phase_offset * ICE_DPLL_PHASE_OFFSET_FACTOR;
+ else
+ *phase_offset = 0;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_rclk_state_on_pin_set - set a state on rclk pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @parent_pin: pin parent pointer
+ * @parent_pin_priv: parent private data pointer passed on pin registration
+ * @state: state to be set on pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback, set a state of a rclk pin on a parent pin
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failure
+ */
+static int
+ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *parent_pin,
+ void *parent_pin_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv, *parent = parent_pin_priv;
+ bool enable = state == DPLL_PIN_STATE_CONNECTED;
+ struct ice_pf *pf = p->pf;
+ int ret = -EINVAL;
+ u32 hw_idx;
+
+ mutex_lock(&pf->dplls.lock);
+ hw_idx = parent->idx - pf->dplls.base_rclk_idx;
+ if (hw_idx >= pf->dplls.num_inputs)
+ goto unlock;
+
+ if ((enable && p->state[hw_idx] == DPLL_PIN_STATE_CONNECTED) ||
+ (!enable && p->state[hw_idx] == DPLL_PIN_STATE_DISCONNECTED)) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "pin:%u state:%u on parent:%u already set",
+ p->idx, state, parent->idx);
+ goto unlock;
+ }
+ ret = ice_aq_set_phy_rec_clk_out(&pf->hw, hw_idx, enable,
+ &p->freq);
+ if (ret)
+ NL_SET_ERR_MSG_FMT(extack,
+ "err:%d %s failed to set pin state:%u for pin:%u on parent:%u\n",
+ ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status),
+ state, p->idx, parent->idx);
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_rclk_state_on_pin_get - get a state of rclk pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @parent_pin: pin parent pointer
+ * @parent_pin_priv: pin parent priv data pointer passed on pin registration
+ * @state: on success holds pin state on parent pin
+ * @extack: error reporting
+ *
+ * dpll subsystem callback, get a state of a recovered clock pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failure
+ */
+static int
+ice_dpll_rclk_state_on_pin_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *parent_pin,
+ void *parent_pin_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv, *parent = parent_pin_priv;
+ struct ice_pf *pf = p->pf;
+ int ret = -EINVAL;
+ u32 hw_idx;
+
+ mutex_lock(&pf->dplls.lock);
+ hw_idx = parent->idx - pf->dplls.base_rclk_idx;
+ if (hw_idx >= pf->dplls.num_inputs)
+ goto unlock;
+
+ ret = ice_dpll_pin_state_update(pf, p, ICE_DPLL_PIN_TYPE_RCLK_INPUT,
+ extack);
+ if (ret)
+ goto unlock;
+
+ *state = p->state[hw_idx];
+ ret = 0;
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+static const struct dpll_pin_ops ice_dpll_rclk_ops = {
+ .state_on_pin_set = ice_dpll_rclk_state_on_pin_set,
+ .state_on_pin_get = ice_dpll_rclk_state_on_pin_get,
+ .direction_get = ice_dpll_input_direction,
+};
+
+static const struct dpll_pin_ops ice_dpll_input_ops = {
+ .frequency_get = ice_dpll_input_frequency_get,
+ .frequency_set = ice_dpll_input_frequency_set,
+ .state_on_dpll_get = ice_dpll_input_state_get,
+ .state_on_dpll_set = ice_dpll_input_state_set,
+ .prio_get = ice_dpll_input_prio_get,
+ .prio_set = ice_dpll_input_prio_set,
+ .direction_get = ice_dpll_input_direction,
+ .phase_adjust_get = ice_dpll_pin_phase_adjust_get,
+ .phase_adjust_set = ice_dpll_input_phase_adjust_set,
+ .phase_offset_get = ice_dpll_phase_offset_get,
+};
+
+static const struct dpll_pin_ops ice_dpll_output_ops = {
+ .frequency_get = ice_dpll_output_frequency_get,
+ .frequency_set = ice_dpll_output_frequency_set,
+ .state_on_dpll_get = ice_dpll_output_state_get,
+ .state_on_dpll_set = ice_dpll_output_state_set,
+ .direction_get = ice_dpll_output_direction,
+ .phase_adjust_get = ice_dpll_pin_phase_adjust_get,
+ .phase_adjust_set = ice_dpll_output_phase_adjust_set,
+};
+
+static const struct dpll_device_ops ice_dpll_ops = {
+ .lock_status_get = ice_dpll_lock_status_get,
+ .mode_supported = ice_dpll_mode_supported,
+ .mode_get = ice_dpll_mode_get,
+};
+
+/**
+ * ice_generate_clock_id - generates unique clock_id for registering dpll.
+ * @pf: board private structure
+ *
+ * Generates unique (per board) clock_id for allocation and search of dpll
+ * devices in Linux dpll subsystem.
+ *
+ * Return: generated clock id for the board
+ */
+static u64 ice_generate_clock_id(struct ice_pf *pf)
+{
+ return pci_get_dsn(pf->pdev);
+}
+
+/**
+ * ice_dpll_notify_changes - notify dpll subsystem about changes
+ * @d: pointer do dpll
+ *
+ * Once change detected appropriate event is submitted to the dpll subsystem.
+ */
+static void ice_dpll_notify_changes(struct ice_dpll *d)
+{
+ bool pin_notified = false;
+
+ if (d->prev_dpll_state != d->dpll_state) {
+ d->prev_dpll_state = d->dpll_state;
+ dpll_device_change_ntf(d->dpll);
+ }
+ if (d->prev_input != d->active_input) {
+ if (d->prev_input)
+ dpll_pin_change_ntf(d->prev_input);
+ d->prev_input = d->active_input;
+ if (d->active_input) {
+ dpll_pin_change_ntf(d->active_input);
+ pin_notified = true;
+ }
+ }
+ if (d->prev_phase_offset != d->phase_offset) {
+ d->prev_phase_offset = d->phase_offset;
+ if (!pin_notified && d->active_input)
+ dpll_pin_change_ntf(d->active_input);
+ }
+}
+
+/**
+ * ice_dpll_update_state - update dpll state
+ * @pf: pf private structure
+ * @d: pointer to queried dpll device
+ * @init: if function called on initialization of ice dpll
+ *
+ * Poll current state of dpll from hw and update ice_dpll struct.
+ *
+ * Context: Called by kworker under pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - AQ failure
+ */
+static int
+ice_dpll_update_state(struct ice_pf *pf, struct ice_dpll *d, bool init)
+{
+ struct ice_dpll_pin *p = NULL;
+ int ret;
+
+ ret = ice_get_cgu_state(&pf->hw, d->dpll_idx, d->prev_dpll_state,
+ &d->input_idx, &d->ref_state, &d->eec_mode,
+ &d->phase_offset, &d->dpll_state);
+
+ dev_dbg(ice_pf_to_dev(pf),
+ "update dpll=%d, prev_src_idx:%u, src_idx:%u, state:%d, prev:%d mode:%d\n",
+ d->dpll_idx, d->prev_input_idx, d->input_idx,
+ d->dpll_state, d->prev_dpll_state, d->mode);
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf),
+ "update dpll=%d state failed, ret=%d %s\n",
+ d->dpll_idx, ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status));
+ return ret;
+ }
+ if (init) {
+ if (d->dpll_state == DPLL_LOCK_STATUS_LOCKED ||
+ d->dpll_state == DPLL_LOCK_STATUS_LOCKED_HO_ACQ)
+ d->active_input = pf->dplls.inputs[d->input_idx].pin;
+ p = &pf->dplls.inputs[d->input_idx];
+ return ice_dpll_pin_state_update(pf, p,
+ ICE_DPLL_PIN_TYPE_INPUT, NULL);
+ }
+ if (d->dpll_state == DPLL_LOCK_STATUS_HOLDOVER ||
+ d->dpll_state == DPLL_LOCK_STATUS_UNLOCKED) {
+ d->active_input = NULL;
+ if (d->input_idx != ICE_DPLL_PIN_IDX_INVALID)
+ p = &pf->dplls.inputs[d->input_idx];
+ d->prev_input_idx = ICE_DPLL_PIN_IDX_INVALID;
+ d->input_idx = ICE_DPLL_PIN_IDX_INVALID;
+ if (!p)
+ return 0;
+ ret = ice_dpll_pin_state_update(pf, p,
+ ICE_DPLL_PIN_TYPE_INPUT, NULL);
+ } else if (d->input_idx != d->prev_input_idx) {
+ if (d->prev_input_idx != ICE_DPLL_PIN_IDX_INVALID) {
+ p = &pf->dplls.inputs[d->prev_input_idx];
+ ice_dpll_pin_state_update(pf, p,
+ ICE_DPLL_PIN_TYPE_INPUT,
+ NULL);
+ }
+ if (d->input_idx != ICE_DPLL_PIN_IDX_INVALID) {
+ p = &pf->dplls.inputs[d->input_idx];
+ d->active_input = p->pin;
+ ice_dpll_pin_state_update(pf, p,
+ ICE_DPLL_PIN_TYPE_INPUT,
+ NULL);
+ }
+ d->prev_input_idx = d->input_idx;
+ }
+
+ return ret;
+}
+
+/**
+ * ice_dpll_periodic_work - DPLLs periodic worker
+ * @work: pointer to kthread_work structure
+ *
+ * DPLLs periodic worker is responsible for polling state of dpll.
+ * Context: Holds pf->dplls.lock
+ */
+static void ice_dpll_periodic_work(struct kthread_work *work)
+{
+ struct ice_dplls *d = container_of(work, struct ice_dplls, work.work);
+ struct ice_pf *pf = container_of(d, struct ice_pf, dplls);
+ struct ice_dpll *de = &pf->dplls.eec;
+ struct ice_dpll *dp = &pf->dplls.pps;
+ int ret;
+
+ mutex_lock(&pf->dplls.lock);
+ ret = ice_dpll_update_state(pf, de, false);
+ if (!ret)
+ ret = ice_dpll_update_state(pf, dp, false);
+ if (ret) {
+ d->cgu_state_acq_err_num++;
+ /* stop rescheduling this worker */
+ if (d->cgu_state_acq_err_num >
+ ICE_CGU_STATE_ACQ_ERR_THRESHOLD) {
+ dev_err(ice_pf_to_dev(pf),
+ "EEC/PPS DPLLs periodic work disabled\n");
+ mutex_unlock(&pf->dplls.lock);
+ return;
+ }
+ }
+ mutex_unlock(&pf->dplls.lock);
+ ice_dpll_notify_changes(de);
+ ice_dpll_notify_changes(dp);
+
+ /* Run twice a second or reschedule if update failed */
+ kthread_queue_delayed_work(d->kworker, &d->work,
+ ret ? msecs_to_jiffies(10) :
+ msecs_to_jiffies(500));
+}
+
+/**
+ * ice_dpll_release_pins - release pins resources from dpll subsystem
+ * @pins: pointer to pins array
+ * @count: number of pins
+ *
+ * Release resources of given pins array in the dpll subsystem.
+ */
+static void ice_dpll_release_pins(struct ice_dpll_pin *pins, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ dpll_pin_put(pins[i].pin);
+}
+
+/**
+ * ice_dpll_get_pins - get pins from dpll subsystem
+ * @pf: board private structure
+ * @pins: pointer to pins array
+ * @start_idx: get starts from this pin idx value
+ * @count: number of pins
+ * @clock_id: clock_id of dpll device
+ *
+ * Get pins - allocate - in dpll subsystem, store them in pin field of given
+ * pins array.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - allocation failure reason
+ */
+static int
+ice_dpll_get_pins(struct ice_pf *pf, struct ice_dpll_pin *pins,
+ int start_idx, int count, u64 clock_id)
+{
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ pins[i].pin = dpll_pin_get(clock_id, i + start_idx, THIS_MODULE,
+ &pins[i].prop);
+ if (IS_ERR(pins[i].pin)) {
+ ret = PTR_ERR(pins[i].pin);
+ goto release_pins;
+ }
+ }
+
+ return 0;
+
+release_pins:
+ while (--i >= 0)
+ dpll_pin_put(pins[i].pin);
+ return ret;
+}
+
+/**
+ * ice_dpll_unregister_pins - unregister pins from a dpll
+ * @dpll: dpll device pointer
+ * @pins: pointer to pins array
+ * @ops: callback ops registered with the pins
+ * @count: number of pins
+ *
+ * Unregister pins of a given array of pins from given dpll device registered in
+ * dpll subsystem.
+ */
+static void
+ice_dpll_unregister_pins(struct dpll_device *dpll, struct ice_dpll_pin *pins,
+ const struct dpll_pin_ops *ops, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
+}
+
+/**
+ * ice_dpll_register_pins - register pins with a dpll
+ * @dpll: dpll pointer to register pins with
+ * @pins: pointer to pins array
+ * @ops: callback ops registered with the pins
+ * @count: number of pins
+ *
+ * Register pins of a given array with given dpll in dpll subsystem.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - registration failure reason
+ */
+static int
+ice_dpll_register_pins(struct dpll_device *dpll, struct ice_dpll_pin *pins,
+ const struct dpll_pin_ops *ops, int count)
+{
+ int ret, i;
+
+ for (i = 0; i < count; i++) {
+ ret = dpll_pin_register(dpll, pins[i].pin, ops, &pins[i]);
+ if (ret)
+ goto unregister_pins;
+ }
+
+ return 0;
+
+unregister_pins:
+ while (--i >= 0)
+ dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
+ return ret;
+}
+
+/**
+ * ice_dpll_deinit_direct_pins - deinitialize direct pins
+ * @cgu: if cgu is present and controlled by this NIC
+ * @pins: pointer to pins array
+ * @count: number of pins
+ * @ops: callback ops registered with the pins
+ * @first: dpll device pointer
+ * @second: dpll device pointer
+ *
+ * If cgu is owned unregister pins from given dplls.
+ * Release pins resources to the dpll subsystem.
+ */
+static void
+ice_dpll_deinit_direct_pins(bool cgu, struct ice_dpll_pin *pins, int count,
+ const struct dpll_pin_ops *ops,
+ struct dpll_device *first,
+ struct dpll_device *second)
+{
+ if (cgu) {
+ ice_dpll_unregister_pins(first, pins, ops, count);
+ ice_dpll_unregister_pins(second, pins, ops, count);
+ }
+ ice_dpll_release_pins(pins, count);
+}
+
+/**
+ * ice_dpll_init_direct_pins - initialize direct pins
+ * @pf: board private structure
+ * @cgu: if cgu is present and controlled by this NIC
+ * @pins: pointer to pins array
+ * @start_idx: on which index shall allocation start in dpll subsystem
+ * @count: number of pins
+ * @ops: callback ops registered with the pins
+ * @first: dpll device pointer
+ * @second: dpll device pointer
+ *
+ * Allocate directly connected pins of a given array in dpll subsystem.
+ * If cgu is owned register allocated pins with given dplls.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - registration failure reason
+ */
+static int
+ice_dpll_init_direct_pins(struct ice_pf *pf, bool cgu,
+ struct ice_dpll_pin *pins, int start_idx, int count,
+ const struct dpll_pin_ops *ops,
+ struct dpll_device *first, struct dpll_device *second)
+{
+ int ret;
+
+ ret = ice_dpll_get_pins(pf, pins, start_idx, count, pf->dplls.clock_id);
+ if (ret)
+ return ret;
+ if (cgu) {
+ ret = ice_dpll_register_pins(first, pins, ops, count);
+ if (ret)
+ goto release_pins;
+ ret = ice_dpll_register_pins(second, pins, ops, count);
+ if (ret)
+ goto unregister_first;
+ }
+
+ return 0;
+
+unregister_first:
+ ice_dpll_unregister_pins(first, pins, ops, count);
+release_pins:
+ ice_dpll_release_pins(pins, count);
+ return ret;
+}
+
+/**
+ * ice_dpll_deinit_rclk_pin - release rclk pin resources
+ * @pf: board private structure
+ *
+ * Deregister rclk pin from parent pins and release resources in dpll subsystem.
+ */
+static void ice_dpll_deinit_rclk_pin(struct ice_pf *pf)
+{
+ struct ice_dpll_pin *rclk = &pf->dplls.rclk;
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ struct dpll_pin *parent;
+ int i;
+
+ for (i = 0; i < rclk->num_parents; i++) {
+ parent = pf->dplls.inputs[rclk->parent_idx[i]].pin;
+ if (!parent)
+ continue;
+ dpll_pin_on_pin_unregister(parent, rclk->pin,
+ &ice_dpll_rclk_ops, rclk);
+ }
+ if (WARN_ON_ONCE(!vsi || !vsi->netdev))
+ return;
+ netdev_dpll_pin_clear(vsi->netdev);
+ dpll_pin_put(rclk->pin);
+}
+
+/**
+ * ice_dpll_init_rclk_pins - initialize recovered clock pin
+ * @pf: board private structure
+ * @pin: pin to register
+ * @start_idx: on which index shall allocation start in dpll subsystem
+ * @ops: callback ops registered with the pins
+ *
+ * Allocate resource for recovered clock pin in dpll subsystem. Register the
+ * pin with the parents it has in the info. Register pin with the pf's main vsi
+ * netdev.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - registration failure reason
+ */
+static int
+ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin,
+ int start_idx, const struct dpll_pin_ops *ops)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ struct dpll_pin *parent;
+ int ret, i;
+
+ ret = ice_dpll_get_pins(pf, pin, start_idx, ICE_DPLL_RCLK_NUM_PER_PF,
+ pf->dplls.clock_id);
+ if (ret)
+ return ret;
+ for (i = 0; i < pf->dplls.rclk.num_parents; i++) {
+ parent = pf->dplls.inputs[pf->dplls.rclk.parent_idx[i]].pin;
+ if (!parent) {
+ ret = -ENODEV;
+ goto unregister_pins;
+ }
+ ret = dpll_pin_on_pin_register(parent, pf->dplls.rclk.pin,
+ ops, &pf->dplls.rclk);
+ if (ret)
+ goto unregister_pins;
+ }
+ if (WARN_ON((!vsi || !vsi->netdev)))
+ return -EINVAL;
+ netdev_dpll_pin_set(vsi->netdev, pf->dplls.rclk.pin);
+
+ return 0;
+
+unregister_pins:
+ while (i) {
+ parent = pf->dplls.inputs[pf->dplls.rclk.parent_idx[--i]].pin;
+ dpll_pin_on_pin_unregister(parent, pf->dplls.rclk.pin,
+ &ice_dpll_rclk_ops, &pf->dplls.rclk);
+ }
+ ice_dpll_release_pins(pin, ICE_DPLL_RCLK_NUM_PER_PF);
+ return ret;
+}
+
+/**
+ * ice_dpll_deinit_pins - deinitialize direct pins
+ * @pf: board private structure
+ * @cgu: if cgu is controlled by this pf
+ *
+ * If cgu is owned unregister directly connected pins from the dplls.
+ * Release resources of directly connected pins from the dpll subsystem.
+ */
+static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu)
+{
+ struct ice_dpll_pin *outputs = pf->dplls.outputs;
+ struct ice_dpll_pin *inputs = pf->dplls.inputs;
+ int num_outputs = pf->dplls.num_outputs;
+ int num_inputs = pf->dplls.num_inputs;
+ struct ice_dplls *d = &pf->dplls;
+ struct ice_dpll *de = &d->eec;
+ struct ice_dpll *dp = &d->pps;
+
+ ice_dpll_deinit_rclk_pin(pf);
+ if (cgu) {
+ ice_dpll_unregister_pins(dp->dpll, inputs, &ice_dpll_input_ops,
+ num_inputs);
+ ice_dpll_unregister_pins(de->dpll, inputs, &ice_dpll_input_ops,
+ num_inputs);
+ }
+ ice_dpll_release_pins(inputs, num_inputs);
+ if (cgu) {
+ ice_dpll_unregister_pins(dp->dpll, outputs,
+ &ice_dpll_output_ops, num_outputs);
+ ice_dpll_unregister_pins(de->dpll, outputs,
+ &ice_dpll_output_ops, num_outputs);
+ ice_dpll_release_pins(outputs, num_outputs);
+ }
+}
+
+/**
+ * ice_dpll_init_pins - init pins and register pins with a dplls
+ * @pf: board private structure
+ * @cgu: if cgu is present and controlled by this NIC
+ *
+ * Initialize directly connected pf's pins within pf's dplls in a Linux dpll
+ * subsystem.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - initialization failure reason
+ */
+static int ice_dpll_init_pins(struct ice_pf *pf, bool cgu)
+{
+ u32 rclk_idx;
+ int ret;
+
+ ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.inputs, 0,
+ pf->dplls.num_inputs,
+ &ice_dpll_input_ops,
+ pf->dplls.eec.dpll, pf->dplls.pps.dpll);
+ if (ret)
+ return ret;
+ if (cgu) {
+ ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.outputs,
+ pf->dplls.num_inputs,
+ pf->dplls.num_outputs,
+ &ice_dpll_output_ops,
+ pf->dplls.eec.dpll,
+ pf->dplls.pps.dpll);
+ if (ret)
+ goto deinit_inputs;
+ }
+ rclk_idx = pf->dplls.num_inputs + pf->dplls.num_outputs + pf->hw.pf_id;
+ ret = ice_dpll_init_rclk_pins(pf, &pf->dplls.rclk, rclk_idx,
+ &ice_dpll_rclk_ops);
+ if (ret)
+ goto deinit_outputs;
+
+ return 0;
+deinit_outputs:
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.outputs,
+ pf->dplls.num_outputs,
+ &ice_dpll_output_ops, pf->dplls.pps.dpll,
+ pf->dplls.eec.dpll);
+deinit_inputs:
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.inputs, pf->dplls.num_inputs,
+ &ice_dpll_input_ops, pf->dplls.pps.dpll,
+ pf->dplls.eec.dpll);
+ return ret;
+}
+
+/**
+ * ice_dpll_deinit_dpll - deinitialize dpll device
+ * @pf: board private structure
+ * @d: pointer to ice_dpll
+ * @cgu: if cgu is present and controlled by this NIC
+ *
+ * If cgu is owned unregister the dpll from dpll subsystem.
+ * Release resources of dpll device from dpll subsystem.
+ */
+static void
+ice_dpll_deinit_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu)
+{
+ if (cgu)
+ dpll_device_unregister(d->dpll, &ice_dpll_ops, d);
+ dpll_device_put(d->dpll);
+}
+
+/**
+ * ice_dpll_init_dpll - initialize dpll device in dpll subsystem
+ * @pf: board private structure
+ * @d: dpll to be initialized
+ * @cgu: if cgu is present and controlled by this NIC
+ * @type: type of dpll being initialized
+ *
+ * Allocate dpll instance for this board in dpll subsystem, if cgu is controlled
+ * by this NIC, register dpll with the callback ops.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - initialization failure reason
+ */
+static int
+ice_dpll_init_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu,
+ enum dpll_type type)
+{
+ u64 clock_id = pf->dplls.clock_id;
+ int ret;
+
+ d->dpll = dpll_device_get(clock_id, d->dpll_idx, THIS_MODULE);
+ if (IS_ERR(d->dpll)) {
+ ret = PTR_ERR(d->dpll);
+ dev_err(ice_pf_to_dev(pf),
+ "dpll_device_get failed (%p) err=%d\n", d, ret);
+ return ret;
+ }
+ d->pf = pf;
+ if (cgu) {
+ ret = dpll_device_register(d->dpll, type, &ice_dpll_ops, d);
+ if (ret) {
+ dpll_device_put(d->dpll);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_dpll_deinit_worker - deinitialize dpll kworker
+ * @pf: board private structure
+ *
+ * Stop dpll's kworker, release it's resources.
+ */
+static void ice_dpll_deinit_worker(struct ice_pf *pf)
+{
+ struct ice_dplls *d = &pf->dplls;
+
+ kthread_cancel_delayed_work_sync(&d->work);
+ kthread_destroy_worker(d->kworker);
+}
+
+/**
+ * ice_dpll_init_worker - Initialize DPLLs periodic worker
+ * @pf: board private structure
+ *
+ * Create and start DPLLs periodic worker.
+ *
+ * Context: Shall be called after pf->dplls.lock is initialized.
+ * Return:
+ * * 0 - success
+ * * negative - create worker failure
+ */
+static int ice_dpll_init_worker(struct ice_pf *pf)
+{
+ struct ice_dplls *d = &pf->dplls;
+ struct kthread_worker *kworker;
+
+ ice_dpll_update_state(pf, &d->eec, true);
+ ice_dpll_update_state(pf, &d->pps, true);
+ kthread_init_delayed_work(&d->work, ice_dpll_periodic_work);
+ kworker = kthread_create_worker(0, "ice-dplls-%s",
+ dev_name(ice_pf_to_dev(pf)));
+ if (IS_ERR(kworker))
+ return PTR_ERR(kworker);
+ d->kworker = kworker;
+ d->cgu_state_acq_err_num = 0;
+ kthread_queue_delayed_work(d->kworker, &d->work, 0);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_init_info_direct_pins - initializes direct pins info
+ * @pf: board private structure
+ * @pin_type: type of pins being initialized
+ *
+ * Init information for directly connected pins, cache them in pf's pins
+ * structures.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - init failure reason
+ */
+static int
+ice_dpll_init_info_direct_pins(struct ice_pf *pf,
+ enum ice_dpll_pin_type pin_type)
+{
+ struct ice_dpll *de = &pf->dplls.eec, *dp = &pf->dplls.pps;
+ int num_pins, i, ret = -EINVAL;
+ struct ice_hw *hw = &pf->hw;
+ struct ice_dpll_pin *pins;
+ u8 freq_supp_num;
+ bool input;
+
+ switch (pin_type) {
+ case ICE_DPLL_PIN_TYPE_INPUT:
+ pins = pf->dplls.inputs;
+ num_pins = pf->dplls.num_inputs;
+ input = true;
+ break;
+ case ICE_DPLL_PIN_TYPE_OUTPUT:
+ pins = pf->dplls.outputs;
+ num_pins = pf->dplls.num_outputs;
+ input = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_pins; i++) {
+ pins[i].idx = i;
+ pins[i].prop.board_label = ice_cgu_get_pin_name(hw, i, input);
+ pins[i].prop.type = ice_cgu_get_pin_type(hw, i, input);
+ if (input) {
+ ret = ice_aq_get_cgu_ref_prio(hw, de->dpll_idx, i,
+ &de->input_prio[i]);
+ if (ret)
+ return ret;
+ ret = ice_aq_get_cgu_ref_prio(hw, dp->dpll_idx, i,
+ &dp->input_prio[i]);
+ if (ret)
+ return ret;
+ pins[i].prop.capabilities |=
+ DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE;
+ pins[i].prop.phase_range.min =
+ pf->dplls.input_phase_adj_max;
+ pins[i].prop.phase_range.max =
+ -pf->dplls.input_phase_adj_max;
+ } else {
+ pins[i].prop.phase_range.min =
+ pf->dplls.output_phase_adj_max;
+ pins[i].prop.phase_range.max =
+ -pf->dplls.output_phase_adj_max;
+ }
+ pins[i].prop.capabilities |=
+ DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+ ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
+ if (ret)
+ return ret;
+ pins[i].prop.freq_supported =
+ ice_cgu_get_pin_freq_supp(hw, i, input, &freq_supp_num);
+ pins[i].prop.freq_supported_num = freq_supp_num;
+ pins[i].pf = pf;
+ }
+
+ return ret;
+}
+
+/**
+ * ice_dpll_init_info_rclk_pin - initializes rclk pin information
+ * @pf: board private structure
+ *
+ * Init information for rclk pin, cache them in pf->dplls.rclk.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - init failure reason
+ */
+static int ice_dpll_init_info_rclk_pin(struct ice_pf *pf)
+{
+ struct ice_dpll_pin *pin = &pf->dplls.rclk;
+
+ pin->prop.type = DPLL_PIN_TYPE_SYNCE_ETH_PORT;
+ pin->prop.capabilities |= DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+ pin->pf = pf;
+
+ return ice_dpll_pin_state_update(pf, pin,
+ ICE_DPLL_PIN_TYPE_RCLK_INPUT, NULL);
+}
+
+/**
+ * ice_dpll_init_pins_info - init pins info wrapper
+ * @pf: board private structure
+ * @pin_type: type of pins being initialized
+ *
+ * Wraps functions for pin initialization.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - init failure reason
+ */
+static int
+ice_dpll_init_pins_info(struct ice_pf *pf, enum ice_dpll_pin_type pin_type)
+{
+ switch (pin_type) {
+ case ICE_DPLL_PIN_TYPE_INPUT:
+ case ICE_DPLL_PIN_TYPE_OUTPUT:
+ return ice_dpll_init_info_direct_pins(pf, pin_type);
+ case ICE_DPLL_PIN_TYPE_RCLK_INPUT:
+ return ice_dpll_init_info_rclk_pin(pf);
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * ice_dpll_deinit_info - release memory allocated for pins info
+ * @pf: board private structure
+ *
+ * Release memory allocated for pins by ice_dpll_init_info function.
+ */
+static void ice_dpll_deinit_info(struct ice_pf *pf)
+{
+ kfree(pf->dplls.inputs);
+ kfree(pf->dplls.outputs);
+ kfree(pf->dplls.eec.input_prio);
+ kfree(pf->dplls.pps.input_prio);
+}
+
+/**
+ * ice_dpll_init_info - prepare pf's dpll information structure
+ * @pf: board private structure
+ * @cgu: if cgu is present and controlled by this NIC
+ *
+ * Acquire (from HW) and set basic dpll information (on pf->dplls struct).
+ *
+ * Return:
+ * * 0 - success
+ * * negative - init failure reason
+ */
+static int ice_dpll_init_info(struct ice_pf *pf, bool cgu)
+{
+ struct ice_aqc_get_cgu_abilities abilities;
+ struct ice_dpll *de = &pf->dplls.eec;
+ struct ice_dpll *dp = &pf->dplls.pps;
+ struct ice_dplls *d = &pf->dplls;
+ struct ice_hw *hw = &pf->hw;
+ int ret, alloc_size, i;
+
+ d->clock_id = ice_generate_clock_id(pf);
+ ret = ice_aq_get_cgu_abilities(hw, &abilities);
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf),
+ "err:%d %s failed to read cgu abilities\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
+ return ret;
+ }
+
+ de->dpll_idx = abilities.eec_dpll_idx;
+ dp->dpll_idx = abilities.pps_dpll_idx;
+ d->num_inputs = abilities.num_inputs;
+ d->num_outputs = abilities.num_outputs;
+ d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj);
+ d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj);
+
+ alloc_size = sizeof(*d->inputs) * d->num_inputs;
+ d->inputs = kzalloc(alloc_size, GFP_KERNEL);
+ if (!d->inputs)
+ return -ENOMEM;
+
+ alloc_size = sizeof(*de->input_prio) * d->num_inputs;
+ de->input_prio = kzalloc(alloc_size, GFP_KERNEL);
+ if (!de->input_prio)
+ return -ENOMEM;
+
+ dp->input_prio = kzalloc(alloc_size, GFP_KERNEL);
+ if (!dp->input_prio)
+ return -ENOMEM;
+
+ ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_INPUT);
+ if (ret)
+ goto deinit_info;
+
+ if (cgu) {
+ alloc_size = sizeof(*d->outputs) * d->num_outputs;
+ d->outputs = kzalloc(alloc_size, GFP_KERNEL);
+ if (!d->outputs) {
+ ret = -ENOMEM;
+ goto deinit_info;
+ }
+
+ ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_OUTPUT);
+ if (ret)
+ goto deinit_info;
+ }
+
+ ret = ice_get_cgu_rclk_pin_info(&pf->hw, &d->base_rclk_idx,
+ &pf->dplls.rclk.num_parents);
+ if (ret)
+ return ret;
+ for (i = 0; i < pf->dplls.rclk.num_parents; i++)
+ pf->dplls.rclk.parent_idx[i] = d->base_rclk_idx + i;
+ ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_RCLK_INPUT);
+ if (ret)
+ return ret;
+ de->mode = DPLL_MODE_AUTOMATIC;
+ dp->mode = DPLL_MODE_AUTOMATIC;
+
+ dev_dbg(ice_pf_to_dev(pf),
+ "%s - success, inputs:%u, outputs:%u rclk-parents:%u\n",
+ __func__, d->num_inputs, d->num_outputs, d->rclk.num_parents);
+
+ return 0;
+
+deinit_info:
+ dev_err(ice_pf_to_dev(pf),
+ "%s - fail: d->inputs:%p, de->input_prio:%p, dp->input_prio:%p, d->outputs:%p\n",
+ __func__, d->inputs, de->input_prio,
+ dp->input_prio, d->outputs);
+ ice_dpll_deinit_info(pf);
+ return ret;
+}
+
+/**
+ * ice_dpll_deinit - Disable the driver/HW support for dpll subsystem
+ * the dpll device.
+ * @pf: board private structure
+ *
+ * Handles the cleanup work required after dpll initialization, freeing
+ * resources and unregistering the dpll, pin and all resources used for
+ * handling them.
+ *
+ * Context: Destroys pf->dplls.lock mutex. Call only if ICE_FLAG_DPLL was set.
+ */
+void ice_dpll_deinit(struct ice_pf *pf)
+{
+ bool cgu = ice_is_feature_supported(pf, ICE_F_CGU);
+
+ clear_bit(ICE_FLAG_DPLL, pf->flags);
+ if (cgu)
+ ice_dpll_deinit_worker(pf);
+
+ ice_dpll_deinit_pins(pf, cgu);
+ ice_dpll_deinit_dpll(pf, &pf->dplls.pps, cgu);
+ ice_dpll_deinit_dpll(pf, &pf->dplls.eec, cgu);
+ ice_dpll_deinit_info(pf);
+ mutex_destroy(&pf->dplls.lock);
+}
+
+/**
+ * ice_dpll_init - initialize support for dpll subsystem
+ * @pf: board private structure
+ *
+ * Set up the device dplls, register them and pins connected within Linux dpll
+ * subsystem. Allow userspace to obtain state of DPLL and handling of DPLL
+ * configuration requests.
+ *
+ * Context: Initializes pf->dplls.lock mutex.
+ */
+void ice_dpll_init(struct ice_pf *pf)
+{
+ bool cgu = ice_is_feature_supported(pf, ICE_F_CGU);
+ struct ice_dplls *d = &pf->dplls;
+ int err = 0;
+
+ err = ice_dpll_init_info(pf, cgu);
+ if (err)
+ goto err_exit;
+ err = ice_dpll_init_dpll(pf, &pf->dplls.eec, cgu, DPLL_TYPE_EEC);
+ if (err)
+ goto deinit_info;
+ err = ice_dpll_init_dpll(pf, &pf->dplls.pps, cgu, DPLL_TYPE_PPS);
+ if (err)
+ goto deinit_eec;
+ err = ice_dpll_init_pins(pf, cgu);
+ if (err)
+ goto deinit_pps;
+ mutex_init(&d->lock);
+ if (cgu) {
+ err = ice_dpll_init_worker(pf);
+ if (err)
+ goto deinit_pins;
+ }
+ set_bit(ICE_FLAG_DPLL, pf->flags);
+
+ return;
+
+deinit_pins:
+ ice_dpll_deinit_pins(pf, cgu);
+deinit_pps:
+ ice_dpll_deinit_dpll(pf, &pf->dplls.pps, cgu);
+deinit_eec:
+ ice_dpll_deinit_dpll(pf, &pf->dplls.eec, cgu);
+deinit_info:
+ ice_dpll_deinit_info(pf);
+err_exit:
+ mutex_destroy(&d->lock);
+ dev_warn(ice_pf_to_dev(pf), "DPLLs init failure err:%d\n", err);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.h b/drivers/net/ethernet/intel/ice/ice_dpll.h
new file mode 100644
index 000000000000..bb32b6d88373
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _ICE_DPLL_H_
+#define _ICE_DPLL_H_
+
+#include "ice.h"
+
+#define ICE_DPLL_PRIO_MAX 0xF
+#define ICE_DPLL_RCLK_NUM_MAX 4
+
+/** ice_dpll_pin - store info about pins
+ * @pin: dpll pin structure
+ * @pf: pointer to pf, which has registered the dpll_pin
+ * @idx: ice pin private idx
+ * @num_parents: hols number of parent pins
+ * @parent_idx: hold indexes of parent pins
+ * @flags: pin flags returned from HW
+ * @state: state of a pin
+ * @prop: pin properties
+ * @freq: current frequency of a pin
+ * @phase_adjust: current phase adjust value
+ */
+struct ice_dpll_pin {
+ struct dpll_pin *pin;
+ struct ice_pf *pf;
+ u8 idx;
+ u8 num_parents;
+ u8 parent_idx[ICE_DPLL_RCLK_NUM_MAX];
+ u8 flags[ICE_DPLL_RCLK_NUM_MAX];
+ u8 state[ICE_DPLL_RCLK_NUM_MAX];
+ struct dpll_pin_properties prop;
+ u32 freq;
+ s32 phase_adjust;
+};
+
+/** ice_dpll - store info required for DPLL control
+ * @dpll: pointer to dpll dev
+ * @pf: pointer to pf, which has registered the dpll_device
+ * @dpll_idx: index of dpll on the NIC
+ * @input_idx: currently selected input index
+ * @prev_input_idx: previously selected input index
+ * @ref_state: state of dpll reference signals
+ * @eec_mode: eec_mode dpll is configured for
+ * @phase_offset: phase offset of active pin vs dpll signal
+ * @prev_phase_offset: previous phase offset of active pin vs dpll signal
+ * @input_prio: priorities of each input
+ * @dpll_state: current dpll sync state
+ * @prev_dpll_state: last dpll sync state
+ * @active_input: pointer to active input pin
+ * @prev_input: pointer to previous active input pin
+ */
+struct ice_dpll {
+ struct dpll_device *dpll;
+ struct ice_pf *pf;
+ u8 dpll_idx;
+ u8 input_idx;
+ u8 prev_input_idx;
+ u8 ref_state;
+ u8 eec_mode;
+ s64 phase_offset;
+ s64 prev_phase_offset;
+ u8 *input_prio;
+ enum dpll_lock_status dpll_state;
+ enum dpll_lock_status prev_dpll_state;
+ enum dpll_mode mode;
+ struct dpll_pin *active_input;
+ struct dpll_pin *prev_input;
+};
+
+/** ice_dplls - store info required for CCU (clock controlling unit)
+ * @kworker: periodic worker
+ * @work: periodic work
+ * @lock: locks access to configuration of a dpll
+ * @eec: pointer to EEC dpll dev
+ * @pps: pointer to PPS dpll dev
+ * @inputs: input pins pointer
+ * @outputs: output pins pointer
+ * @rclk: recovered pins pointer
+ * @num_inputs: number of input pins available on dpll
+ * @num_outputs: number of output pins available on dpll
+ * @cgu_state_acq_err_num: number of errors returned during periodic work
+ * @base_rclk_idx: idx of first pin used for clock revocery pins
+ * @clock_id: clock_id of dplls
+ * @input_phase_adj_max: max phase adjust value for an input pins
+ * @output_phase_adj_max: max phase adjust value for an output pins
+ */
+struct ice_dplls {
+ struct kthread_worker *kworker;
+ struct kthread_delayed_work work;
+ struct mutex lock;
+ struct ice_dpll eec;
+ struct ice_dpll pps;
+ struct ice_dpll_pin *inputs;
+ struct ice_dpll_pin *outputs;
+ struct ice_dpll_pin rclk;
+ u8 num_inputs;
+ u8 num_outputs;
+ int cgu_state_acq_err_num;
+ u8 base_rclk_idx;
+ u64 clock_id;
+ s32 input_phase_adj_max;
+ s32 output_phase_adj_max;
+};
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+void ice_dpll_init(struct ice_pf *pf);
+void ice_dpll_deinit(struct ice_pf *pf);
+#else
+static inline void ice_dpll_init(struct ice_pf *pf) { }
+static inline void ice_dpll_deinit(struct ice_pf *pf) { }
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
index 67bfd1f61cdd..6ae0269bdf73 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
@@ -73,7 +73,7 @@ ice_eswitch_br_ingress_rule_setup(struct ice_adv_rule_info *rule_info,
rule_info->sw_act.vsi_handle = vf_vsi_idx;
rule_info->sw_act.flag |= ICE_FLTR_RX;
rule_info->sw_act.src = pf_id;
- rule_info->priority = 5;
+ rule_info->priority = 2;
}
static void
@@ -84,7 +84,7 @@ ice_eswitch_br_egress_rule_setup(struct ice_adv_rule_info *rule_info,
rule_info->sw_act.flag |= ICE_FLTR_TX;
rule_info->flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
rule_info->flags_info.act_valid = true;
- rule_info->priority = 5;
+ rule_info->priority = 2;
}
static int
@@ -207,7 +207,7 @@ ice_eswitch_br_guard_rule_create(struct ice_hw *hw, u16 vsi_idx,
rule_info.allow_pass_l2 = true;
rule_info.sw_act.vsi_handle = vsi_idx;
rule_info.sw_act.fltr_act = ICE_NOP;
- rule_info.priority = 5;
+ rule_info.priority = 2;
err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule);
if (err)
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index ad4d4702129f..a34083567e6f 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -345,6 +345,88 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
+static const u32 ice_adv_lnk_speed_100[] __initconst = {
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+};
+
+static const u32 ice_adv_lnk_speed_1000[] __initconst = {
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+};
+
+static const u32 ice_adv_lnk_speed_2500[] __initconst = {
+ ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+};
+
+static const u32 ice_adv_lnk_speed_5000[] __initconst = {
+ ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+};
+
+static const u32 ice_adv_lnk_speed_10000[] __initconst = {
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+};
+
+static const u32 ice_adv_lnk_speed_25000[] __initconst = {
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+};
+
+static const u32 ice_adv_lnk_speed_40000[] __initconst = {
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+};
+
+static const u32 ice_adv_lnk_speed_50000[] __initconst = {
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+};
+
+static const u32 ice_adv_lnk_speed_100000[] __initconst = {
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
+};
+
+static const u32 ice_adv_lnk_speed_200000[] __initconst = {
+ ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
+};
+
+static struct ethtool_forced_speed_map ice_adv_lnk_speed_maps[] __ro_after_init = {
+ ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 100),
+ ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 1000),
+ ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 2500),
+ ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 5000),
+ ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 10000),
+ ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 25000),
+ ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 40000),
+ ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 50000),
+ ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 100000),
+ ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 200000),
+};
+
+void __init ice_adv_lnk_speed_maps_init(void)
+{
+ ethtool_forced_speed_maps_init(ice_adv_lnk_speed_maps,
+ ARRAY_SIZE(ice_adv_lnk_speed_maps));
+}
+
static void
__ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo,
struct ice_vsi *vsi)
@@ -1060,7 +1142,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ICE_VSI_STATS_LEN; i++)
- ethtool_sprintf(&p,
+ ethtool_sprintf(&p, "%s",
ice_gstrings_vsi_stats[i].stat_string);
if (ice_is_port_repr_netdev(netdev))
@@ -1080,7 +1162,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
return;
for (i = 0; i < ICE_PF_STATS_LEN; i++)
- ethtool_sprintf(&p,
+ ethtool_sprintf(&p, "%s",
ice_gstrings_pf_stats[i].stat_string);
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
@@ -1097,7 +1179,8 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++)
- ethtool_sprintf(&p, ice_gstrings_priv_flags[i].name);
+ ethtool_sprintf(&p, "%s",
+ ice_gstrings_priv_flags[i].name);
break;
default:
break;
@@ -1638,6 +1721,15 @@ ice_get_ethtool_stats(struct net_device *netdev,
ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \
ICE_PHY_TYPE_HIGH_100G_AUI2)
+#define ICE_PHY_TYPE_HIGH_MASK_200G (ICE_PHY_TYPE_HIGH_200G_CR4_PAM4 | \
+ ICE_PHY_TYPE_HIGH_200G_SR4 | \
+ ICE_PHY_TYPE_HIGH_200G_FR4 | \
+ ICE_PHY_TYPE_HIGH_200G_LR4 | \
+ ICE_PHY_TYPE_HIGH_200G_DR4 | \
+ ICE_PHY_TYPE_HIGH_200G_KR4_PAM4 | \
+ ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC | \
+ ICE_PHY_TYPE_HIGH_200G_AUI4)
+
/**
* ice_mask_min_supported_speeds
* @hw: pointer to the HW structure
@@ -1652,8 +1744,9 @@ ice_mask_min_supported_speeds(struct ice_hw *hw,
u64 phy_types_high, u64 *phy_types_low)
{
/* if QSFP connection with 100G speed, minimum supported speed is 25G */
- if (*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G ||
- phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G)
+ if ((*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G) ||
+ (phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G) ||
+ (phy_types_high & ICE_PHY_TYPE_HIGH_MASK_200G))
*phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_25G;
else if (!ice_is_100m_speed_supported(hw))
*phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_1G;
@@ -1796,6 +1889,9 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
ice_phy_type_to_ethtool(netdev, ks);
switch (link_info->link_speed) {
+ case ICE_AQ_LINK_SPEED_200GB:
+ ks->base.speed = SPEED_200000;
+ break;
case ICE_AQ_LINK_SPEED_100GB:
ks->base.speed = SPEED_100000;
break;
@@ -2008,79 +2104,69 @@ done:
}
/**
+ * ice_speed_to_aq_link - Get AQ link speed by Ethtool forced speed
+ * @speed: ethtool forced speed
+ */
+static u16 ice_speed_to_aq_link(int speed)
+{
+ int aq_speed;
+
+ switch (speed) {
+ case SPEED_10:
+ aq_speed = ICE_AQ_LINK_SPEED_10MB;
+ break;
+ case SPEED_100:
+ aq_speed = ICE_AQ_LINK_SPEED_100MB;
+ break;
+ case SPEED_1000:
+ aq_speed = ICE_AQ_LINK_SPEED_1000MB;
+ break;
+ case SPEED_2500:
+ aq_speed = ICE_AQ_LINK_SPEED_2500MB;
+ break;
+ case SPEED_5000:
+ aq_speed = ICE_AQ_LINK_SPEED_5GB;
+ break;
+ case SPEED_10000:
+ aq_speed = ICE_AQ_LINK_SPEED_10GB;
+ break;
+ case SPEED_20000:
+ aq_speed = ICE_AQ_LINK_SPEED_20GB;
+ break;
+ case SPEED_25000:
+ aq_speed = ICE_AQ_LINK_SPEED_25GB;
+ break;
+ case SPEED_40000:
+ aq_speed = ICE_AQ_LINK_SPEED_40GB;
+ break;
+ case SPEED_50000:
+ aq_speed = ICE_AQ_LINK_SPEED_50GB;
+ break;
+ case SPEED_100000:
+ aq_speed = ICE_AQ_LINK_SPEED_100GB;
+ break;
+ default:
+ aq_speed = ICE_AQ_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ return aq_speed;
+}
+
+/**
* ice_ksettings_find_adv_link_speed - Find advertising link speed
* @ks: ethtool ksettings
*/
static u16
ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
{
+ const struct ethtool_forced_speed_map *map;
u16 adv_link_speed = 0;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 100baseT_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_100MB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 1000baseX_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 1000baseT_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 1000baseKX_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 2500baseT_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 2500baseX_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 5000baseT_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_5GB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 10000baseT_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 10000baseKR_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 10000baseSR_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 10000baseLR_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_10GB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 25000baseCR_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 25000baseSR_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 25000baseKR_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_25GB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 40000baseCR4_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 40000baseSR4_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 40000baseLR4_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 40000baseKR4_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_40GB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 50000baseCR2_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 50000baseKR2_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 50000baseSR2_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_50GB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 100000baseCR4_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 100000baseSR4_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 100000baseLR4_ER4_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 100000baseKR4_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 100000baseCR2_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 100000baseSR2_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 100000baseKR2_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_100GB;
+ for (u32 i = 0; i < ARRAY_SIZE(ice_adv_lnk_speed_maps); i++) {
+ map = ice_adv_lnk_speed_maps + i;
+ if (linkmode_intersects(ks->link_modes.advertising, map->caps))
+ adv_link_speed |= ice_speed_to_aq_link(map->speed);
+ }
return adv_link_speed;
}
@@ -3285,7 +3371,7 @@ ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = ice_get_ptp_clock_index(pf);
+ info->phc_index = ice_ptp_clock_index(pf);
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h
index b403ee79cd5e..b88e3da06f13 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.h
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h
@@ -100,6 +100,14 @@ phy_type_high_lkup[] = {
[2] = ICE_PHY_TYPE(100GB, 100000baseCR2_Full),
[3] = ICE_PHY_TYPE(100GB, 100000baseSR2_Full),
[4] = ICE_PHY_TYPE(100GB, 100000baseCR2_Full),
+ [5] = ICE_PHY_TYPE(200GB, 200000baseCR4_Full),
+ [6] = ICE_PHY_TYPE(200GB, 200000baseSR4_Full),
+ [7] = ICE_PHY_TYPE(200GB, 200000baseLR4_ER4_FR4_Full),
+ [8] = ICE_PHY_TYPE(200GB, 200000baseLR4_ER4_FR4_Full),
+ [9] = ICE_PHY_TYPE(200GB, 200000baseDR4_Full),
+ [10] = ICE_PHY_TYPE(200GB, 200000baseKR4_Full),
+ [11] = ICE_PHY_TYPE(200GB, 200000baseSR4_Full),
+ [12] = ICE_PHY_TYPE(200GB, 200000baseCR4_Full),
};
#endif /* !_ICE_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 8c6e13f87b7d..d151e5bacfec 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2018-2020, Intel Corporation. */
+/* Copyright (C) 2018-2023, Intel Corporation. */
/* flow director ethtool support for ice */
@@ -540,16 +540,24 @@ static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
/* total guaranteed filters assigned to this VSI */
num_guar = vsi->num_gfltr;
- /* minus the guaranteed filters programed by this VSI */
- num_guar -= (rd32(hw, VSIQF_FD_CNT(vsi_num)) &
- VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S;
-
/* total global best effort filters */
num_be = hw->func_caps.fd_fltr_best_effort;
- /* minus the global best effort filters programmed */
- num_be -= (rd32(hw, GLQF_FD_CNT) & GLQF_FD_CNT_FD_BCNT_M) >>
- GLQF_FD_CNT_FD_BCNT_S;
+ /* Subtract the number of programmed filters from the global values */
+ switch (hw->mac_type) {
+ case ICE_MAC_E830:
+ num_guar -= FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M,
+ rd32(hw, VSIQF_FD_CNT(vsi_num)));
+ num_be -= FIELD_GET(E830_GLQF_FD_CNT_FD_BCNT_M,
+ rd32(hw, GLQF_FD_CNT));
+ break;
+ case ICE_MAC_E810:
+ default:
+ num_guar -= FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M,
+ rd32(hw, VSIQF_FD_CNT(vsi_num)));
+ num_be -= FIELD_GET(E800_GLQF_FD_CNT_FD_BCNT_M,
+ rd32(hw, GLQF_FD_CNT));
+ }
return num_guar + num_be;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index 85cca572c22a..fb8b925aaf8b 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -1318,7 +1318,6 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
list_del(&entry->l_entry);
- devm_kfree(ice_hw_to_dev(hw), entry->entry);
devm_kfree(ice_hw_to_dev(hw), entry);
return 0;
@@ -1645,10 +1644,8 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
*entry_h = ICE_FLOW_ENTRY_HNDL(e);
out:
- if (status && e) {
- devm_kfree(ice_hw_to_dev(hw), e->entry);
+ if (status)
devm_kfree(ice_hw_to_dev(hw), e);
- }
return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index b465d27d9b80..96923ef0a5a8 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -350,11 +350,8 @@ struct ice_flow_entry {
u64 id;
struct ice_flow_prof *prof;
- /* Flow entry's content */
- void *entry;
enum ice_flow_priority priority;
u16 vsi_handle;
- u16 entry_sz;
};
#define ICE_FLOW_ENTRY_HNDL(e) ((u64)(uintptr_t)e)
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
index 75c9de675f20..c8ea1af51ad3 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -389,6 +389,9 @@ bool ice_gnss_is_gps_present(struct ice_hw *hw)
if (!hw->func_caps.ts_func_info.src_tmr_owned)
return false;
+ if (!ice_is_gps_in_netlist(hw))
+ return false;
+
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
if (ice_is_e810t(hw)) {
int err;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 531cc2194741..86936b758ade 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018, Intel Corporation. */
+/* Copyright (c) 2018-2023, Intel Corporation. */
/* Machine-generated file */
@@ -231,6 +231,7 @@
#define PFINT_SB_CTL 0x0016B600
#define PFINT_SB_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define PFINT_SB_CTL_CAUSE_ENA_M BIT(30)
+#define PFINT_TSYN_MSK 0x0016C980
#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4))
#define QINT_RQCTL_MSIX_INDX_S 0
#define QINT_RQCTL_MSIX_INDX_M ICE_M(0x7FF, 0)
@@ -284,11 +285,11 @@
#define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
#define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4))
#define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0)
-#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E36E0 + ((_i) * 32))
-#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
-#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M ICE_M(0xFFFF, 0)
-#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3800 + ((_i) * 32))
-#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M ICE_M(0xFFFF, 0)
+#define E800_PRTMAC_HSEC_CTL_TX_PS_QNT(_i) (0x001E36E0 + ((_i) * 32))
+#define E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX 8
+#define E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M GENMASK(15, 0)
+#define E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR(_i) (0x001E3800 + ((_i) * 32))
+#define E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M GENMASK(15, 0)
#define GL_MDCK_TX_TDPU 0x00049348
#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1)
#define GL_MDET_RX 0x00294C00
@@ -311,7 +312,11 @@
#define GL_MDET_TX_PQM_MAL_TYPE_S 26
#define GL_MDET_TX_PQM_MAL_TYPE_M ICE_M(0x1F, 26)
#define GL_MDET_TX_PQM_VALID_M BIT(31)
-#define GL_MDET_TX_TCLAN 0x000FC068
+#define GL_MDET_TX_TCLAN_BY_MAC(hw) \
+ ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MDET_TX_TCLAN : \
+ E800_GL_MDET_TX_TCLAN)
+#define E800_GL_MDET_TX_TCLAN 0x000FC068
+#define E830_GL_MDET_TX_TCLAN 0x000FCCC0
#define GL_MDET_TX_TCLAN_QNUM_S 0
#define GL_MDET_TX_TCLAN_QNUM_M ICE_M(0x7FFF, 0)
#define GL_MDET_TX_TCLAN_VF_NUM_S 15
@@ -325,7 +330,11 @@
#define PF_MDET_RX_VALID_M BIT(0)
#define PF_MDET_TX_PQM 0x002D2C80
#define PF_MDET_TX_PQM_VALID_M BIT(0)
-#define PF_MDET_TX_TCLAN 0x000FC000
+#define PF_MDET_TX_TCLAN_BY_MAC(hw) \
+ ((hw)->mac_type == ICE_MAC_E830 ? E830_PF_MDET_TX_TCLAN : \
+ E800_PF_MDET_TX_TCLAN)
+#define E800_PF_MDET_TX_TCLAN 0x000FC000
+#define E830_PF_MDET_TX_TCLAN 0x000FCC00
#define PF_MDET_TX_TCLAN_VALID_M BIT(0)
#define VP_MDET_RX(_VF) (0x00294400 + ((_VF) * 4))
#define VP_MDET_RX_VALID_M BIT(0)
@@ -335,6 +344,8 @@
#define VP_MDET_TX_TCLAN_VALID_M BIT(0)
#define VP_MDET_TX_TDPU(_VF) (0x00040000 + ((_VF) * 4))
#define VP_MDET_TX_TDPU_VALID_M BIT(0)
+#define E800_GL_MNG_FWSM_FW_MODES_M GENMASK(2, 0)
+#define E830_GL_MNG_FWSM_FW_MODES_M GENMASK(1, 0)
#define GL_MNG_FWSM 0x000B6134
#define GL_MNG_FWSM_FW_LOADING_M BIT(30)
#define GLNVM_FLA 0x000B6108
@@ -363,13 +374,18 @@
#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30
#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30)
#define GLQF_FD_CNT 0x00460018
+#define E800_GLQF_FD_CNT_FD_GCNT_M GENMASK(14, 0)
+#define E830_GLQF_FD_CNT_FD_GCNT_M GENMASK(15, 0)
#define GLQF_FD_CNT_FD_BCNT_S 16
-#define GLQF_FD_CNT_FD_BCNT_M ICE_M(0x7FFF, 16)
+#define E800_GLQF_FD_CNT_FD_BCNT_M GENMASK(30, 16)
+#define E830_GLQF_FD_CNT_FD_BCNT_M GENMASK(31, 16)
#define GLQF_FD_SIZE 0x00460010
#define GLQF_FD_SIZE_FD_GSIZE_S 0
-#define GLQF_FD_SIZE_FD_GSIZE_M ICE_M(0x7FFF, 0)
+#define E800_GLQF_FD_SIZE_FD_GSIZE_M GENMASK(14, 0)
+#define E830_GLQF_FD_SIZE_FD_GSIZE_M GENMASK(15, 0)
#define GLQF_FD_SIZE_FD_BSIZE_S 16
-#define GLQF_FD_SIZE_FD_BSIZE_M ICE_M(0x7FFF, 16)
+#define E800_GLQF_FD_SIZE_FD_BSIZE_M GENMASK(30, 16)
+#define E830_GLQF_FD_SIZE_FD_BSIZE_M GENMASK(31, 16)
#define GLQF_FDINSET(_i, _j) (0x00412000 + ((_i) * 4 + (_j) * 512))
#define GLQF_FDMASK(_i) (0x00410800 + ((_i) * 4))
#define GLQF_FDMASK_MAX_INDEX 31
@@ -388,6 +404,10 @@
#define GLQF_HMASK_SEL(_i) (0x00410000 + ((_i) * 4))
#define GLQF_HMASK_SEL_MAX_INDEX 127
#define GLQF_HMASK_SEL_MASK_SEL_S 0
+#define E800_PFQF_FD_CNT_FD_GCNT_M GENMASK(14, 0)
+#define E830_PFQF_FD_CNT_FD_GCNT_M GENMASK(15, 0)
+#define E800_PFQF_FD_CNT_FD_BCNT_M GENMASK(30, 16)
+#define E830_PFQF_FD_CNT_FD_BCNT_M GENMASK(31, 16)
#define PFQF_FD_ENA 0x0043A000
#define PFQF_FD_ENA_FD_ENA_M BIT(0)
#define PFQF_FD_SIZE 0x00460100
@@ -478,6 +498,7 @@
#define GLTSYN_SYNC_DLAY 0x00088818
#define GLTSYN_TGT_H_0(_i) (0x00088930 + ((_i) * 4))
#define GLTSYN_TGT_L_0(_i) (0x00088928 + ((_i) * 4))
+#define GLTSYN_TIME_0(_i) (0x000888C8 + ((_i) * 4))
#define GLTSYN_TIME_H(_i) (0x000888D8 + ((_i) * 4))
#define GLTSYN_TIME_L(_i) (0x000888D0 + ((_i) * 4))
#define PFHH_SEM 0x000A4200 /* Reset Source: PFR */
@@ -486,9 +507,11 @@
#define PFTSYN_SEM_BUSY_M BIT(0)
#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4))
#define VSIQF_FD_CNT_FD_GCNT_S 0
-#define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0)
+#define E800_VSIQF_FD_CNT_FD_GCNT_M GENMASK(13, 0)
+#define E830_VSIQF_FD_CNT_FD_GCNT_M GENMASK(15, 0)
#define VSIQF_FD_CNT_FD_BCNT_S 16
-#define VSIQF_FD_CNT_FD_BCNT_M ICE_M(0x3FFF, 16)
+#define E800_VSIQF_FD_CNT_FD_BCNT_M GENMASK(29, 16)
+#define E830_VSIQF_FD_CNT_FD_BCNT_M GENMASK(31, 16)
#define VSIQF_FD_SIZE(_VSI) (0x00462000 + ((_VSI) * 4))
#define VSIQF_HKEY_MAX_INDEX 12
#define PFPM_APM 0x000B8080
@@ -500,6 +523,10 @@
#define PFPM_WUS_MAG_M BIT(1)
#define PFPM_WUS_MNG_M BIT(3)
#define PFPM_WUS_FW_RST_WK_M BIT(31)
+#define E830_PRTMAC_CL01_PS_QNT 0x001E32A0
+#define E830_PRTMAC_CL01_PS_QNT_CL0_M GENMASK(15, 0)
+#define E830_PRTMAC_CL01_QNT_THR 0x001E3320
+#define E830_PRTMAC_CL01_QNT_THR_CL0_M GENMASK(15, 0)
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 7b1256992dcf..b980f89dc892 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -19,8 +19,11 @@ static const u8 lacp_train_pkt[LACP_TRAIN_PKT_LEN] = { 0, 0, 0, 0, 0, 0,
static const u8 ice_dflt_vsi_rcp[ICE_RECIPE_LEN] = {
0x05, 0, 0, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0x85, 0, 0x01, 0, 0, 0, 0xff, 0xff, 0x08, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ 0, 0, 0, 0, 0, 0, 0x30 };
+static const u8 ice_lport_rcp[ICE_RECIPE_LEN] = {
+ 0x05, 0, 0, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0x85, 0, 0x16, 0, 0, 0, 0xff, 0xff, 0x07, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0x30 };
/**
* ice_lag_set_primary - set PF LAG state as Primary
@@ -173,18 +176,22 @@ static struct ice_lag *ice_lag_find_primary(struct ice_lag *lag)
}
/**
- * ice_lag_cfg_dflt_fltr - Add/Remove default VSI rule for LAG
+ * ice_lag_cfg_fltr - Add/Remove rule for LAG
* @lag: lag struct for local interface
+ * @act: rule action
+ * @recipe_id: recipe id for the new rule
+ * @rule_idx: pointer to rule index
* @add: boolean on whether we are adding filters
*/
static int
-ice_lag_cfg_dflt_fltr(struct ice_lag *lag, bool add)
+ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx,
+ bool add)
{
struct ice_sw_rule_lkup_rx_tx *s_rule;
u16 s_rule_sz, vsi_num;
struct ice_hw *hw;
- u32 act, opc;
u8 *eth_hdr;
+ u32 opc;
int err;
hw = &lag->pf->hw;
@@ -193,7 +200,7 @@ ice_lag_cfg_dflt_fltr(struct ice_lag *lag, bool add)
s_rule_sz = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule);
s_rule = kzalloc(s_rule_sz, GFP_KERNEL);
if (!s_rule) {
- dev_err(ice_pf_to_dev(lag->pf), "error allocating rule for LAG default VSI\n");
+ dev_err(ice_pf_to_dev(lag->pf), "error allocating rule for LAG\n");
return -ENOMEM;
}
@@ -201,19 +208,17 @@ ice_lag_cfg_dflt_fltr(struct ice_lag *lag, bool add)
eth_hdr = s_rule->hdr_data;
ice_fill_eth_hdr(eth_hdr);
- act = (vsi_num << ICE_SINGLE_ACT_VSI_ID_S) &
+ act |= (vsi_num << ICE_SINGLE_ACT_VSI_ID_S) &
ICE_SINGLE_ACT_VSI_ID_M;
- act |= ICE_SINGLE_ACT_VSI_FORWARDING |
- ICE_SINGLE_ACT_VALID_BIT | ICE_SINGLE_ACT_LAN_ENABLE;
s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
- s_rule->recipe_id = cpu_to_le16(lag->pf_recipe);
+ s_rule->recipe_id = cpu_to_le16(recipe_id);
s_rule->src = cpu_to_le16(hw->port_info->lport);
s_rule->act = cpu_to_le32(act);
s_rule->hdr_len = cpu_to_le16(DUMMY_ETH_HDR_LEN);
opc = ice_aqc_opc_add_sw_rules;
} else {
- s_rule->index = cpu_to_le16(lag->pf_rule_id);
+ s_rule->index = cpu_to_le16(*rule_idx);
opc = ice_aqc_opc_remove_sw_rules;
}
@@ -222,9 +227,9 @@ ice_lag_cfg_dflt_fltr(struct ice_lag *lag, bool add)
goto dflt_fltr_free;
if (add)
- lag->pf_rule_id = le16_to_cpu(s_rule->index);
+ *rule_idx = le16_to_cpu(s_rule->index);
else
- lag->pf_rule_id = 0;
+ *rule_idx = 0;
dflt_fltr_free:
kfree(s_rule);
@@ -232,6 +237,37 @@ dflt_fltr_free:
}
/**
+ * ice_lag_cfg_dflt_fltr - Add/Remove default VSI rule for LAG
+ * @lag: lag struct for local interface
+ * @add: boolean on whether to add filter
+ */
+static int
+ice_lag_cfg_dflt_fltr(struct ice_lag *lag, bool add)
+{
+ u32 act = ICE_SINGLE_ACT_VSI_FORWARDING |
+ ICE_SINGLE_ACT_VALID_BIT | ICE_SINGLE_ACT_LAN_ENABLE;
+
+ return ice_lag_cfg_fltr(lag, act, lag->pf_recipe,
+ &lag->pf_rule_id, add);
+}
+
+/**
+ * ice_lag_cfg_drop_fltr - Add/Remove lport drop rule
+ * @lag: lag struct for local interface
+ * @add: boolean on whether to add filter
+ */
+static int
+ice_lag_cfg_drop_fltr(struct ice_lag *lag, bool add)
+{
+ u32 act = ICE_SINGLE_ACT_VSI_FORWARDING |
+ ICE_SINGLE_ACT_VALID_BIT |
+ ICE_SINGLE_ACT_DROP;
+
+ return ice_lag_cfg_fltr(lag, act, lag->lport_recipe,
+ &lag->lport_rule_idx, add);
+}
+
+/**
* ice_lag_cfg_pf_fltrs - set filters up for new active port
* @lag: local interfaces lag struct
* @ptr: opaque data containing notifier event
@@ -257,13 +293,18 @@ ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
if (bonding_info->slave.state && lag->pf_rule_id) {
if (ice_lag_cfg_dflt_fltr(lag, false))
dev_err(dev, "Error removing old default VSI filter\n");
+ if (ice_lag_cfg_drop_fltr(lag, true))
+ dev_err(dev, "Error adding new drop filter\n");
return;
}
/* interface becoming active - add new default VSI rule */
- if (!bonding_info->slave.state && !lag->pf_rule_id)
+ if (!bonding_info->slave.state && !lag->pf_rule_id) {
if (ice_lag_cfg_dflt_fltr(lag, true))
dev_err(dev, "Error adding new default VSI filter\n");
+ if (lag->lport_rule_idx && ice_lag_cfg_drop_fltr(lag, false))
+ dev_err(dev, "Error removing old drop filter\n");
+ }
}
/**
@@ -430,10 +471,11 @@ static void
ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
u16 vsi_num, u8 tc)
{
- u16 numq, valq, buf_size, num_moved, qbuf_size;
+ DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
+ u16 numq, valq, num_moved, qbuf_size;
+ u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
- struct ice_aqc_move_elem *buf;
struct ice_sched_node *n_prt;
struct ice_hw *new_hw = NULL;
__le32 teid, parent_teid;
@@ -505,26 +547,17 @@ qbuf_none:
goto resume_traffic;
/* Move Vf's VSI node for this TC to newport's scheduler tree */
- buf_size = struct_size(buf, teid, 1);
- buf = kzalloc(buf_size, GFP_KERNEL);
- if (!buf) {
- dev_warn(dev, "Failure to alloc memory for VF node failover\n");
- goto resume_traffic;
- }
-
buf->hdr.src_parent_teid = parent_teid;
buf->hdr.dest_parent_teid = n_prt->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1);
buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
buf->teid[0] = teid;
- if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved,
- NULL))
+ if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved))
dev_warn(dev, "Failure to move VF nodes for failover\n");
else
ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
- kfree(buf);
goto resume_traffic;
qbuf_err:
@@ -755,10 +788,11 @@ static void
ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
u8 tc)
{
- u16 numq, valq, buf_size, num_moved, qbuf_size;
+ DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
+ u16 numq, valq, num_moved, qbuf_size;
+ u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
- struct ice_aqc_move_elem *buf;
struct ice_sched_node *n_prt;
__le32 teid, parent_teid;
struct ice_vsi_ctx *ctx;
@@ -820,26 +854,17 @@ reclaim_none:
goto resume_reclaim;
/* Move node to new parent */
- buf_size = struct_size(buf, teid, 1);
- buf = kzalloc(buf_size, GFP_KERNEL);
- if (!buf) {
- dev_warn(dev, "Failure to alloc memory for VF node failover\n");
- goto resume_reclaim;
- }
-
buf->hdr.src_parent_teid = parent_teid;
buf->hdr.dest_parent_teid = n_prt->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1);
buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
buf->teid[0] = teid;
- if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved,
- NULL))
+ if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved))
dev_warn(dev, "Failure to move VF nodes for LAG reclaim\n");
else
ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
- kfree(buf);
goto resume_reclaim;
reclaim_qerr:
@@ -1195,6 +1220,7 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
swid = primary_lag->pf->hw.port_info->sw_id;
ice_lag_set_swid(swid, lag, true);
ice_lag_add_prune_list(primary_lag, lag->pf);
+ ice_lag_cfg_drop_fltr(lag, true);
}
/* add filter for primary control packets */
ice_lag_cfg_cp_fltr(lag, true);
@@ -1792,10 +1818,11 @@ static void
ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
u16 vsi_num, u8 tc)
{
- u16 numq, valq, buf_size, num_moved, qbuf_size;
+ DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
+ u16 numq, valq, num_moved, qbuf_size;
+ u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
- struct ice_aqc_move_elem *buf;
struct ice_sched_node *n_prt;
__le32 teid, parent_teid;
struct ice_vsi_ctx *ctx;
@@ -1853,26 +1880,17 @@ sync_none:
goto resume_sync;
/* Move node to new parent */
- buf_size = struct_size(buf, teid, 1);
- buf = kzalloc(buf_size, GFP_KERNEL);
- if (!buf) {
- dev_warn(dev, "Failure to alloc for VF node move in reset rebuild\n");
- goto resume_sync;
- }
-
buf->hdr.src_parent_teid = parent_teid;
buf->hdr.dest_parent_teid = n_prt->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1);
buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
buf->teid[0] = teid;
- if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved,
- NULL))
+ if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved))
dev_warn(dev, "Failure to move VF nodes for LAG reset rebuild\n");
else
ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
- kfree(buf);
goto resume_sync;
sync_qerr:
@@ -1953,11 +1971,16 @@ int ice_init_lag(struct ice_pf *pf)
goto lag_error;
}
- err = ice_create_lag_recipe(&pf->hw, &lag->pf_recipe, ice_dflt_vsi_rcp,
- 1);
+ err = ice_create_lag_recipe(&pf->hw, &lag->pf_recipe,
+ ice_dflt_vsi_rcp, 1);
if (err)
goto lag_error;
+ err = ice_create_lag_recipe(&pf->hw, &lag->lport_recipe,
+ ice_lport_rcp, 3);
+ if (err)
+ goto free_rcp_res;
+
/* associate recipes to profiles */
for (n = 0; n < ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER; n++) {
err = ice_aq_get_recipe_to_profile(&pf->hw, n,
@@ -1966,7 +1989,8 @@ int ice_init_lag(struct ice_pf *pf)
continue;
if (recipe_bits & BIT(ICE_SW_LKUP_DFLT)) {
- recipe_bits |= BIT(lag->pf_recipe);
+ recipe_bits |= BIT(lag->pf_recipe) |
+ BIT(lag->lport_recipe);
ice_aq_map_recipe_to_profile(&pf->hw, n,
(u8 *)&recipe_bits, NULL);
}
@@ -1977,6 +2001,9 @@ int ice_init_lag(struct ice_pf *pf)
dev_dbg(dev, "INIT LAG complete\n");
return 0;
+free_rcp_res:
+ ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1,
+ &pf->lag->pf_recipe);
lag_error:
kfree(lag);
pf->lag = NULL;
@@ -2006,6 +2033,8 @@ void ice_deinit_lag(struct ice_pf *pf)
ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1,
&pf->lag->pf_recipe);
+ ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1,
+ &pf->lag->lport_recipe);
kfree(lag);
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index facb6c894b6d..9557e8605a07 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -39,8 +39,10 @@ struct ice_lag {
u8 bonded:1; /* currently bonded */
u8 primary:1; /* this is primary */
u16 pf_recipe;
+ u16 lport_recipe;
u16 pf_rule_id;
u16 cp_rule_idx;
+ u16 lport_rule_idx;
u8 role;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 73bbf06a76db..4b1e56396293 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -229,7 +229,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
* of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
* original vector count
*/
- vsi->num_q_vectors = pf->vfs.num_msix_per - ICE_NONQ_VECS_VF;
+ vsi->num_q_vectors = vf->num_msix - ICE_NONQ_VECS_VF;
break;
case ICE_VSI_CTRL:
vsi->alloc_txq = 1;
@@ -1831,21 +1831,14 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
{
- struct ice_aqc_add_tx_qgrp *qg_buf;
- int err;
+ DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
return -EINVAL;
- qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
- if (!qg_buf)
- return -ENOMEM;
-
qg_buf->num_txqs = 1;
- err = ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
- kfree(qg_buf);
- return err;
+ return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
}
/**
@@ -1887,24 +1880,18 @@ setup_rings:
static int
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
{
- struct ice_aqc_add_tx_qgrp *qg_buf;
- u16 q_idx = 0;
+ DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
int err = 0;
-
- qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
- if (!qg_buf)
- return -ENOMEM;
+ u16 q_idx;
qg_buf->num_txqs = 1;
for (q_idx = 0; q_idx < count; q_idx++) {
err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
if (err)
- goto err_cfg_txqs;
+ break;
}
-err_cfg_txqs:
- kfree(qg_buf);
return err;
}
@@ -3990,13 +3977,21 @@ void ice_init_feature_support(struct ice_pf *pf)
case ICE_DEV_ID_E810C_BACKPLANE:
case ICE_DEV_ID_E810C_QSFP:
case ICE_DEV_ID_E810C_SFP:
+ case ICE_DEV_ID_E810_XXV_BACKPLANE:
+ case ICE_DEV_ID_E810_XXV_QSFP:
+ case ICE_DEV_ID_E810_XXV_SFP:
ice_set_feature_support(pf, ICE_F_DSCP);
- ice_set_feature_support(pf, ICE_F_PTP_EXTTS);
- if (ice_is_e810t(&pf->hw)) {
+ if (ice_is_phy_rclk_in_netlist(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_PHY_RCLK);
+ /* If we don't own the timer - don't enable other caps */
+ if (!ice_pf_src_tmr_owned(pf))
+ break;
+ if (ice_is_cgu_in_netlist(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_CGU);
+ if (ice_is_clock_mux_in_netlist(&pf->hw))
ice_set_feature_support(pf, ICE_F_SMA_CTRL);
- if (ice_gnss_is_gps_present(&pf->hw))
- ice_set_feature_support(pf, ICE_F_GNSS);
- }
+ if (ice_gnss_is_gps_present(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_GNSS);
break;
default:
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 7784135160fd..6607fa6fe556 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2018, Intel Corporation. */
+/* Copyright (c) 2018-2023, Intel Corporation. */
/* Intel(R) Ethernet Connection E800 Series Linux Driver */
@@ -1759,7 +1759,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
}
- reg = rd32(hw, GL_MDET_TX_TCLAN);
+ reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw));
if (reg & GL_MDET_TX_TCLAN_VALID_M) {
u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
GL_MDET_TX_TCLAN_PF_NUM_S;
@@ -1773,7 +1773,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
- wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
+ wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX);
}
reg = rd32(hw, GL_MDET_RX);
@@ -1801,9 +1801,9 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
}
- reg = rd32(hw, PF_MDET_TX_TCLAN);
+ reg = rd32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw));
if (reg & PF_MDET_TX_TCLAN_VALID_M) {
- wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
+ wr32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw), 0xffff);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
}
@@ -3150,7 +3150,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_TSYN_TX_M) {
ena_mask &= ~PFINT_OICR_TSYN_TX_M;
- if (!hw->reset_ongoing)
+ if (!hw->reset_ongoing && ice_ptp_pf_handles_tx_interrupt(pf))
set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
}
@@ -3160,7 +3160,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
- if (hw->func_caps.ts_func_info.src_tmr_owned) {
+ if (ice_pf_src_tmr_owned(pf)) {
/* Save EVENTs from GLTSYN register */
pf->ptp.ext_ts_irq |= gltsyn_stat &
(GLTSYN_STAT_EVENT0_M |
@@ -3871,7 +3871,8 @@ static void ice_set_pf_caps(struct ice_pf *pf)
}
clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
- if (func_caps->common_cap.ieee_1588)
+ if (func_caps->common_cap.ieee_1588 &&
+ !(pf->hw.mac_type == ICE_MAC_E830))
set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
pf->max_pf_txqs = func_caps->common_cap.num_txq;
@@ -4666,6 +4667,10 @@ static void ice_init_features(struct ice_pf *pf)
if (ice_is_feature_supported(pf, ICE_F_GNSS))
ice_gnss_init(pf);
+ if (ice_is_feature_supported(pf, ICE_F_CGU) ||
+ ice_is_feature_supported(pf, ICE_F_PHY_RCLK))
+ ice_dpll_init(pf);
+
/* Note: Flow director init failure is non-fatal to load */
if (ice_init_fdir(pf))
dev_err(dev, "could not initialize flow director\n");
@@ -4695,6 +4700,8 @@ static void ice_deinit_features(struct ice_pf *pf)
ice_gnss_exit(pf);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_release(pf);
+ if (test_bit(ICE_FLAG_DPLL, pf->flags))
+ ice_dpll_deinit(pf);
}
static void ice_init_wakeup(struct ice_pf *pf)
@@ -5535,7 +5542,7 @@ static void ice_pci_err_resume(struct pci_dev *pdev)
return;
}
- ice_restore_all_vfs_msi_state(pdev);
+ ice_restore_all_vfs_msi_state(pf);
ice_do_reset(pf, ICE_RESET_PFR);
ice_service_task_restart(pf);
@@ -5578,34 +5585,38 @@ static void ice_pci_err_reset_done(struct pci_dev *pdev)
* Class, Class Mask, private data (not used) }
*/
static const struct pci_device_id ice_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_BACKPLANE) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_QSFP56) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP_DD) },
/* required last entry */
- { 0, }
+ {}
};
MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
@@ -5629,6 +5640,8 @@ static struct pci_driver ice_driver = {
#endif /* CONFIG_PM */
.shutdown = ice_shutdown,
.sriov_configure = ice_sriov_configure,
+ .sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix,
+ .sriov_set_msix_vec_count = ice_sriov_set_msix_vec_count,
.err_handler = &ice_pci_err_handler
};
@@ -5645,6 +5658,8 @@ static int __init ice_module_init(void)
pr_info("%s\n", ice_driver_string);
pr_info("%s\n", ice_copyright);
+ ice_adv_lnk_speed_maps_init();
+
ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
if (!ice_wq) {
pr_err("Failed to create workqueue\n");
@@ -7387,8 +7402,13 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
}
/* configure PTP timestamping after VSI rebuild */
- if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_cfg_timestamp(pf, false);
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) {
+ if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_SELF)
+ ice_ptp_cfg_timestamp(pf, false);
+ else if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_ALL)
+ /* for E82x PHC owner always need to have interrupts */
+ ice_ptp_cfg_timestamp(pf, true);
+ }
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
if (err) {
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 81d96a40d5a7..1eddcbe89b0c 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -39,8 +39,8 @@ ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins)
/* initialize with defaults */
for (i = 0; i < NUM_PTP_PINS_E810T; i++) {
- snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name),
- "%s", ice_pin_desc_e810t[i].name);
+ strscpy(ptp_pins[i].name, ice_pin_desc_e810t[i].name,
+ sizeof(ptp_pins[i].name));
ptp_pins[i].index = ice_pin_desc_e810t[i].index;
ptp_pins[i].func = ice_pin_desc_e810t[i].func;
ptp_pins[i].chan = ice_pin_desc_e810t[i].chan;
@@ -256,6 +256,24 @@ ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
}
/**
+ * ice_ptp_configure_tx_tstamp - Enable or disable Tx timestamp interrupt
+ * @pf: The PF pointer to search in
+ * @on: bool value for whether timestamp interrupt is enabled or disabled
+ */
+static void ice_ptp_configure_tx_tstamp(struct ice_pf *pf, bool on)
+{
+ u32 val;
+
+ /* Configure the Tx timestamp interrupt */
+ val = rd32(&pf->hw, PFINT_OICR_ENA);
+ if (on)
+ val |= PFINT_OICR_TSYN_TX_M;
+ else
+ val &= ~PFINT_OICR_TSYN_TX_M;
+ wr32(&pf->hw, PFINT_OICR_ENA, val);
+}
+
+/**
* ice_set_tx_tstamp - Enable or disable Tx timestamping
* @pf: The PF pointer to search in
* @on: bool value for whether timestamps are enabled or disabled
@@ -263,7 +281,6 @@ ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
static void ice_set_tx_tstamp(struct ice_pf *pf, bool on)
{
struct ice_vsi *vsi;
- u32 val;
u16 i;
vsi = ice_get_main_vsi(pf);
@@ -277,13 +294,8 @@ static void ice_set_tx_tstamp(struct ice_pf *pf, bool on)
vsi->tx_rings[i]->ptp_tx = on;
}
- /* Configure the Tx timestamp interrupt */
- val = rd32(&pf->hw, PFINT_OICR_ENA);
- if (on)
- val |= PFINT_OICR_TSYN_TX_M;
- else
- val &= ~PFINT_OICR_TSYN_TX_M;
- wr32(&pf->hw, PFINT_OICR_ENA, val);
+ if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_SELF)
+ ice_ptp_configure_tx_tstamp(pf, on);
pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
}
@@ -328,131 +340,6 @@ void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena)
}
/**
- * ice_get_ptp_clock_index - Get the PTP clock index
- * @pf: the PF pointer
- *
- * Determine the clock index of the PTP clock associated with this device. If
- * this is the PF controlling the clock, just use the local access to the
- * clock device pointer.
- *
- * Otherwise, read from the driver shared parameters to determine the clock
- * index value.
- *
- * Returns: the index of the PTP clock associated with this device, or -1 if
- * there is no associated clock.
- */
-int ice_get_ptp_clock_index(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
- enum ice_aqc_driver_params param_idx;
- struct ice_hw *hw = &pf->hw;
- u8 tmr_idx;
- u32 value;
- int err;
-
- /* Use the ptp_clock structure if we're the main PF */
- if (pf->ptp.clock)
- return ptp_clock_index(pf->ptp.clock);
-
- tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
- if (!tmr_idx)
- param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
- else
- param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
-
- err = ice_aq_get_driver_param(hw, param_idx, &value, NULL);
- if (err) {
- dev_err(dev, "Failed to read PTP clock index parameter, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
- return -1;
- }
-
- /* The PTP clock index is an integer, and will be between 0 and
- * INT_MAX. The highest bit of the driver shared parameter is used to
- * indicate whether or not the currently stored clock index is valid.
- */
- if (!(value & PTP_SHARED_CLK_IDX_VALID))
- return -1;
-
- return value & ~PTP_SHARED_CLK_IDX_VALID;
-}
-
-/**
- * ice_set_ptp_clock_index - Set the PTP clock index
- * @pf: the PF pointer
- *
- * Set the PTP clock index for this device into the shared driver parameters,
- * so that other PFs associated with this device can read it.
- *
- * If the PF is unable to store the clock index, it will log an error, but
- * will continue operating PTP.
- */
-static void ice_set_ptp_clock_index(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
- enum ice_aqc_driver_params param_idx;
- struct ice_hw *hw = &pf->hw;
- u8 tmr_idx;
- u32 value;
- int err;
-
- if (!pf->ptp.clock)
- return;
-
- tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
- if (!tmr_idx)
- param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
- else
- param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
-
- value = (u32)ptp_clock_index(pf->ptp.clock);
- if (value > INT_MAX) {
- dev_err(dev, "PTP Clock index is too large to store\n");
- return;
- }
- value |= PTP_SHARED_CLK_IDX_VALID;
-
- err = ice_aq_set_driver_param(hw, param_idx, value, NULL);
- if (err) {
- dev_err(dev, "Failed to set PTP clock index parameter, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
- }
-}
-
-/**
- * ice_clear_ptp_clock_index - Clear the PTP clock index
- * @pf: the PF pointer
- *
- * Clear the PTP clock index for this device. Must be called when
- * unregistering the PTP clock, in order to ensure other PFs stop reporting
- * a clock object that no longer exists.
- */
-static void ice_clear_ptp_clock_index(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
- enum ice_aqc_driver_params param_idx;
- struct ice_hw *hw = &pf->hw;
- u8 tmr_idx;
- int err;
-
- /* Do not clear the index if we don't own the timer */
- if (!hw->func_caps.ts_func_info.src_tmr_owned)
- return;
-
- tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
- if (!tmr_idx)
- param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
- else
- param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
-
- err = ice_aq_set_driver_param(hw, param_idx, 0, NULL);
- if (err) {
- dev_dbg(dev, "Failed to clear PTP clock index parameter, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
- }
-}
-
-/**
* ice_ptp_read_src_clk_reg - Read the source clock register
* @pf: Board private structure
* @sts: Optional parameter for holding a pair of system timestamps from
@@ -674,9 +561,6 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
int err;
u8 idx;
- if (!tx->init)
- return;
-
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
hw = &pf->hw;
@@ -775,6 +659,39 @@ skip_ts_read:
}
/**
+ * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device
+ * @pf: Board private structure
+ */
+static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
+{
+ struct ice_ptp_port *port;
+ unsigned int i;
+
+ mutex_lock(&pf->ptp.ports_owner.lock);
+ list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member) {
+ struct ice_ptp_tx *tx = &port->tx;
+
+ if (!tx || !tx->init)
+ continue;
+
+ ice_ptp_process_tx_tstamp(tx);
+ }
+ mutex_unlock(&pf->ptp.ports_owner.lock);
+
+ for (i = 0; i < ICE_MAX_QUAD; i++) {
+ u64 tstamp_ready;
+ int err;
+
+ /* Read the Tx ready status first */
+ err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
+ if (err || tstamp_ready)
+ return ICE_TX_TSTAMP_WORK_PENDING;
+ }
+
+ return ICE_TX_TSTAMP_WORK_DONE;
+}
+
+/**
* ice_ptp_tx_tstamp - Process Tx timestamps for this function.
* @tx: Tx tracking structure to initialize
*
@@ -1366,6 +1283,7 @@ out_unlock:
void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
{
struct ice_ptp_port *ptp_port;
+ struct ice_hw *hw = &pf->hw;
if (!test_bit(ICE_FLAG_PTP, pf->flags))
return;
@@ -1380,11 +1298,16 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
/* Update cached link status for this port immediately */
ptp_port->link_up = linkup;
- /* E810 devices do not need to reconfigure the PHY */
- if (ice_is_e810(&pf->hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
+ /* Do not reconfigure E810 PHY */
return;
-
- ice_ptp_port_phy_restart(ptp_port);
+ case ICE_PHY_E822:
+ ice_ptp_port_phy_restart(ptp_port);
+ return;
+ default:
+ dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__);
+ }
}
/**
@@ -1441,6 +1364,24 @@ static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
}
/**
+ * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping
+ * @pf: Board private structure
+ */
+static void ice_ptp_restart_all_phy(struct ice_pf *pf)
+{
+ struct list_head *entry;
+
+ list_for_each(entry, &pf->ptp.ports_owner.ports) {
+ struct ice_ptp_port *port = list_entry(entry,
+ struct ice_ptp_port,
+ list_member);
+
+ if (port->link_up)
+ ice_ptp_port_phy_restart(port);
+ }
+}
+
+/**
* ice_ptp_adjfine - Adjust clock increment rate
* @info: the driver's PTP info structure
* @scaled_ppm: Parts per million with 16-bit fractional field
@@ -1877,9 +1818,9 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
/* Reenable periodic outputs */
ice_ptp_enable_all_clkout(pf);
- /* Recalibrate and re-enable timestamp block */
- if (pf->ptp.port.link_up)
- ice_ptp_port_phy_restart(&pf->ptp.port);
+ /* Recalibrate and re-enable timestamp blocks for E822/E823 */
+ if (hw->phy_model == ICE_PHY_E822)
+ ice_ptp_restart_all_phy(pf);
exit:
if (err) {
dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
@@ -1976,21 +1917,32 @@ ice_ptp_get_syncdevicetime(ktime_t *device,
u32 hh_lock, hh_art_ctl;
int i;
- /* Get the HW lock */
- hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
+#define MAX_HH_HW_LOCK_TRIES 5
+#define MAX_HH_CTL_LOCK_TRIES 100
+
+ for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) {
+ /* Get the HW lock */
+ hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
+ if (hh_lock & PFHH_SEM_BUSY_M) {
+ usleep_range(10000, 15000);
+ continue;
+ }
+ break;
+ }
if (hh_lock & PFHH_SEM_BUSY_M) {
dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
- return -EFAULT;
+ return -EBUSY;
}
+ /* Program cmd to master timer */
+ ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
+
/* Start the ART and device clock sync sequence */
hh_art_ctl = rd32(hw, GLHH_ART_CTL);
hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
wr32(hw, GLHH_ART_CTL, hh_art_ctl);
-#define MAX_HH_LOCK_TRIES 100
-
- for (i = 0; i < MAX_HH_LOCK_TRIES; i++) {
+ for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) {
/* Wait for sync to complete */
hh_art_ctl = rd32(hw, GLHH_ART_CTL);
if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
@@ -2014,19 +1966,23 @@ ice_ptp_get_syncdevicetime(ktime_t *device,
break;
}
}
+
+ /* Clear the master timer */
+ ice_ptp_src_cmd(hw, ICE_PTP_NOP);
+
/* Release HW lock */
hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
- if (i == MAX_HH_LOCK_TRIES)
+ if (i == MAX_HH_CTL_LOCK_TRIES)
return -ETIMEDOUT;
return 0;
}
/**
- * ice_ptp_getcrosststamp_e822 - Capture a device cross timestamp
+ * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp
* @info: the driver's PTP info structure
* @cts: The memory to fill the cross timestamp info
*
@@ -2034,14 +1990,14 @@ ice_ptp_get_syncdevicetime(ktime_t *device,
* clock. Fill the cross timestamp information and report it back to the
* caller.
*
- * This is only valid for E822 devices which have support for generating the
- * cross timestamp via PCIe PTM.
+ * This is only valid for E822 and E823 devices which have support for
+ * generating the cross timestamp via PCIe PTM.
*
* In order to correctly correlate the ART timestamp back to the TSC time, the
* CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
*/
static int
-ice_ptp_getcrosststamp_e822(struct ptp_clock_info *info,
+ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info,
struct system_device_crosststamp *cts)
{
struct ice_pf *pf = ptp_info_to_pf(info);
@@ -2246,18 +2202,20 @@ ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
static void
ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
{
- info->n_per_out = N_PER_OUT_E810;
-
- if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS))
- info->n_ext_ts = N_EXT_TS_E810;
-
if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
info->n_ext_ts = N_EXT_TS_E810;
+ info->n_per_out = N_PER_OUT_E810T;
info->n_pins = NUM_PTP_PINS_E810T;
info->verify = ice_verify_pin_e810t;
/* Complete setup of the SMA pins */
ice_ptp_setup_sma_pins_e810t(pf, info);
+ } else if (ice_is_e810t(&pf->hw)) {
+ info->n_ext_ts = N_EXT_TS_NO_SMA_E810T;
+ info->n_per_out = N_PER_OUT_NO_SMA_E810T;
+ } else {
+ info->n_per_out = N_PER_OUT_E810;
+ info->n_ext_ts = N_EXT_TS_E810;
}
}
@@ -2275,22 +2233,22 @@ ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info)
}
/**
- * ice_ptp_set_funcs_e822 - Set specialized functions for E822 support
+ * ice_ptp_set_funcs_e82x - Set specialized functions for E82x support
* @pf: Board private structure
* @info: PTP info to fill
*
- * Assign functions to the PTP capabiltiies structure for E822 devices.
+ * Assign functions to the PTP capabiltiies structure for E82x devices.
* Functions which operate across all device families should be set directly
- * in ice_ptp_set_caps. Only add functions here which are distinct for E822
+ * in ice_ptp_set_caps. Only add functions here which are distinct for E82x
* devices.
*/
static void
-ice_ptp_set_funcs_e822(struct ice_pf *pf, struct ptp_clock_info *info)
+ice_ptp_set_funcs_e82x(struct ice_pf *pf, struct ptp_clock_info *info)
{
#ifdef CONFIG_ICE_HWTS
if (boot_cpu_has(X86_FEATURE_ART) &&
boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
- info->getcrosststamp = ice_ptp_getcrosststamp_e822;
+ info->getcrosststamp = ice_ptp_getcrosststamp_e82x;
#endif /* CONFIG_ICE_HWTS */
}
@@ -2324,6 +2282,8 @@ ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
static void
ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info)
{
+ ice_ptp_set_funcs_e82x(pf, info);
+
info->enable = ice_ptp_gpio_enable_e823;
ice_ptp_setup_pins_e823(pf, info);
}
@@ -2351,7 +2311,7 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
else if (ice_is_e823(&pf->hw))
ice_ptp_set_funcs_e823(pf, info);
else
- ice_ptp_set_funcs_e822(pf, info);
+ ice_ptp_set_funcs_e82x(pf, info);
}
/**
@@ -2366,7 +2326,6 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
static long ice_ptp_create_clock(struct ice_pf *pf)
{
struct ptp_clock_info *info;
- struct ptp_clock *clock;
struct device *dev;
/* No need to create a clock device if we already have one */
@@ -2379,11 +2338,11 @@ static long ice_ptp_create_clock(struct ice_pf *pf)
dev = ice_pf_to_dev(pf);
/* Attempt to register the clock before enabling the hardware. */
- clock = ptp_clock_register(info, dev);
- if (IS_ERR(clock))
- return PTR_ERR(clock);
-
- pf->ptp.clock = clock;
+ pf->ptp.clock = ptp_clock_register(info, dev);
+ if (IS_ERR(pf->ptp.clock)) {
+ dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device");
+ return PTR_ERR(pf->ptp.clock);
+ }
return 0;
}
@@ -2440,7 +2399,21 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
*/
enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
{
- return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
+ switch (pf->ptp.tx_interrupt_mode) {
+ case ICE_PTP_TX_INTERRUPT_NONE:
+ /* This device has the clock owner handle timestamps for it */
+ return ICE_TX_TSTAMP_WORK_DONE;
+ case ICE_PTP_TX_INTERRUPT_SELF:
+ /* This device handles its own timestamps */
+ return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
+ case ICE_PTP_TX_INTERRUPT_ALL:
+ /* This device handles timestamps for all ports */
+ return ice_ptp_tx_tstamp_owner(pf);
+ default:
+ WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",
+ pf->ptp.tx_interrupt_mode);
+ return ICE_TX_TSTAMP_WORK_DONE;
+ }
}
static void ice_ptp_periodic_work(struct kthread_work *work)
@@ -2474,7 +2447,7 @@ void ice_ptp_reset(struct ice_pf *pf)
if (test_bit(ICE_PFR_REQ, pf->state))
goto pfr;
- if (!hw->func_caps.ts_func_info.src_tmr_owned)
+ if (!ice_pf_src_tmr_owned(pf))
goto reset_ts;
err = ice_ptp_init_phc(hw);
@@ -2550,6 +2523,209 @@ err:
}
/**
+ * ice_ptp_aux_dev_to_aux_pf - Get auxiliary PF handle for the auxiliary device
+ * @aux_dev: auxiliary device to get the auxiliary PF for
+ */
+static struct ice_pf *
+ice_ptp_aux_dev_to_aux_pf(struct auxiliary_device *aux_dev)
+{
+ struct ice_ptp_port *aux_port;
+ struct ice_ptp *aux_ptp;
+
+ aux_port = container_of(aux_dev, struct ice_ptp_port, aux_dev);
+ aux_ptp = container_of(aux_port, struct ice_ptp, port);
+
+ return container_of(aux_ptp, struct ice_pf, ptp);
+}
+
+/**
+ * ice_ptp_aux_dev_to_owner_pf - Get PF handle for the auxiliary device
+ * @aux_dev: auxiliary device to get the PF for
+ */
+static struct ice_pf *
+ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device *aux_dev)
+{
+ struct ice_ptp_port_owner *ports_owner;
+ struct auxiliary_driver *aux_drv;
+ struct ice_ptp *owner_ptp;
+
+ if (!aux_dev->dev.driver)
+ return NULL;
+
+ aux_drv = to_auxiliary_drv(aux_dev->dev.driver);
+ ports_owner = container_of(aux_drv, struct ice_ptp_port_owner,
+ aux_driver);
+ owner_ptp = container_of(ports_owner, struct ice_ptp, ports_owner);
+ return container_of(owner_ptp, struct ice_pf, ptp);
+}
+
+/**
+ * ice_ptp_auxbus_probe - Probe auxiliary devices
+ * @aux_dev: PF's auxiliary device
+ * @id: Auxiliary device ID
+ */
+static int ice_ptp_auxbus_probe(struct auxiliary_device *aux_dev,
+ const struct auxiliary_device_id *id)
+{
+ struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
+ struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev);
+
+ if (WARN_ON(!owner_pf))
+ return -ENODEV;
+
+ INIT_LIST_HEAD(&aux_pf->ptp.port.list_member);
+ mutex_lock(&owner_pf->ptp.ports_owner.lock);
+ list_add(&aux_pf->ptp.port.list_member,
+ &owner_pf->ptp.ports_owner.ports);
+ mutex_unlock(&owner_pf->ptp.ports_owner.lock);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_auxbus_remove - Remove auxiliary devices from the bus
+ * @aux_dev: PF's auxiliary device
+ */
+static void ice_ptp_auxbus_remove(struct auxiliary_device *aux_dev)
+{
+ struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
+ struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev);
+
+ mutex_lock(&owner_pf->ptp.ports_owner.lock);
+ list_del(&aux_pf->ptp.port.list_member);
+ mutex_unlock(&owner_pf->ptp.ports_owner.lock);
+}
+
+/**
+ * ice_ptp_auxbus_shutdown
+ * @aux_dev: PF's auxiliary device
+ */
+static void ice_ptp_auxbus_shutdown(struct auxiliary_device *aux_dev)
+{
+ /* Doing nothing here, but handle to auxbus driver must be satisfied */
+}
+
+/**
+ * ice_ptp_auxbus_suspend
+ * @aux_dev: PF's auxiliary device
+ * @state: power management state indicator
+ */
+static int
+ice_ptp_auxbus_suspend(struct auxiliary_device *aux_dev, pm_message_t state)
+{
+ /* Doing nothing here, but handle to auxbus driver must be satisfied */
+ return 0;
+}
+
+/**
+ * ice_ptp_auxbus_resume
+ * @aux_dev: PF's auxiliary device
+ */
+static int ice_ptp_auxbus_resume(struct auxiliary_device *aux_dev)
+{
+ /* Doing nothing here, but handle to auxbus driver must be satisfied */
+ return 0;
+}
+
+/**
+ * ice_ptp_auxbus_create_id_table - Create auxiliary device ID table
+ * @pf: Board private structure
+ * @name: auxiliary bus driver name
+ */
+static struct auxiliary_device_id *
+ice_ptp_auxbus_create_id_table(struct ice_pf *pf, const char *name)
+{
+ struct auxiliary_device_id *ids;
+
+ /* Second id left empty to terminate the array */
+ ids = devm_kcalloc(ice_pf_to_dev(pf), 2,
+ sizeof(struct auxiliary_device_id), GFP_KERNEL);
+ if (!ids)
+ return NULL;
+
+ snprintf(ids[0].name, sizeof(ids[0].name), "ice.%s", name);
+
+ return ids;
+}
+
+/**
+ * ice_ptp_register_auxbus_driver - Register PTP auxiliary bus driver
+ * @pf: Board private structure
+ */
+static int ice_ptp_register_auxbus_driver(struct ice_pf *pf)
+{
+ struct auxiliary_driver *aux_driver;
+ struct ice_ptp *ptp;
+ struct device *dev;
+ char *name;
+ int err;
+
+ ptp = &pf->ptp;
+ dev = ice_pf_to_dev(pf);
+ aux_driver = &ptp->ports_owner.aux_driver;
+ INIT_LIST_HEAD(&ptp->ports_owner.ports);
+ mutex_init(&ptp->ports_owner.lock);
+ name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
+ pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
+ ice_get_ptp_src_clock_index(&pf->hw));
+
+ aux_driver->name = name;
+ aux_driver->shutdown = ice_ptp_auxbus_shutdown;
+ aux_driver->suspend = ice_ptp_auxbus_suspend;
+ aux_driver->remove = ice_ptp_auxbus_remove;
+ aux_driver->resume = ice_ptp_auxbus_resume;
+ aux_driver->probe = ice_ptp_auxbus_probe;
+ aux_driver->id_table = ice_ptp_auxbus_create_id_table(pf, name);
+ if (!aux_driver->id_table)
+ return -ENOMEM;
+
+ err = auxiliary_driver_register(aux_driver);
+ if (err) {
+ devm_kfree(dev, aux_driver->id_table);
+ dev_err(dev, "Failed registering aux_driver, name <%s>\n",
+ name);
+ }
+
+ return err;
+}
+
+/**
+ * ice_ptp_unregister_auxbus_driver - Unregister PTP auxiliary bus driver
+ * @pf: Board private structure
+ */
+static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf)
+{
+ struct auxiliary_driver *aux_driver = &pf->ptp.ports_owner.aux_driver;
+
+ auxiliary_driver_unregister(aux_driver);
+ devm_kfree(ice_pf_to_dev(pf), aux_driver->id_table);
+
+ mutex_destroy(&pf->ptp.ports_owner.lock);
+}
+
+/**
+ * ice_ptp_clock_index - Get the PTP clock index for this device
+ * @pf: Board private structure
+ *
+ * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock
+ * is associated.
+ */
+int ice_ptp_clock_index(struct ice_pf *pf)
+{
+ struct auxiliary_device *aux_dev;
+ struct ice_pf *owner_pf;
+ struct ptp_clock *clock;
+
+ aux_dev = &pf->ptp.port.aux_dev;
+ owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
+ if (!owner_pf)
+ return -1;
+ clock = owner_pf->ptp.clock;
+
+ return clock ? ptp_clock_index(clock) : -1;
+}
+
+/**
* ice_ptp_prepare_for_reset - Prepare PTP for reset
* @pf: Board private structure
*/
@@ -2627,7 +2803,15 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
/* Release the global hardware lock */
ice_ptp_unlock(hw);
- if (!ice_is_e810(hw)) {
+ if (pf->ptp.tx_interrupt_mode == ICE_PTP_TX_INTERRUPT_ALL) {
+ /* The clock owner for this device type handles the timestamp
+ * interrupt for all ports.
+ */
+ ice_ptp_configure_tx_tstamp(pf, true);
+
+ /* React on all quads interrupts for E82x */
+ wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
+
/* Enable quad interrupts */
err = ice_ptp_tx_ena_intr(pf, true, itr);
if (err)
@@ -2639,11 +2823,15 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
if (err)
goto err_clk;
- /* Store the PTP clock index for other PFs */
- ice_set_ptp_clock_index(pf);
+ err = ice_ptp_register_auxbus_driver(pf);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "Failed to register PTP auxbus driver");
+ goto err_aux;
+ }
return 0;
-
+err_aux:
+ ptp_clock_unregister(pf->ptp.clock);
err_clk:
pf->ptp.clock = NULL;
err_exit:
@@ -2685,14 +2873,124 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
*/
static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
{
+ struct ice_hw *hw = &pf->hw;
+
mutex_init(&ptp_port->ps_lock);
- if (ice_is_e810(&pf->hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
+ case ICE_PHY_E822:
+ /* Non-owner PFs don't react to any interrupts on E82x,
+ * neither on own quad nor on others
+ */
+ if (!ice_ptp_pf_handles_tx_interrupt(pf)) {
+ ice_ptp_configure_tx_tstamp(pf, false);
+ wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
+ }
+ kthread_init_delayed_work(&ptp_port->ov_work,
+ ice_ptp_wait_for_offsets);
+
+ return ice_ptp_init_tx_e822(pf, &ptp_port->tx,
+ ptp_port->port_num);
+ default:
+ return -ENODEV;
+ }
+}
+
+/**
+ * ice_ptp_release_auxbus_device
+ * @dev: device that utilizes the auxbus
+ */
+static void ice_ptp_release_auxbus_device(struct device *dev)
+{
+ /* Doing nothing here, but handle to auxbux device must be satisfied */
+}
+
+/**
+ * ice_ptp_create_auxbus_device - Create PTP auxiliary bus device
+ * @pf: Board private structure
+ */
+static int ice_ptp_create_auxbus_device(struct ice_pf *pf)
+{
+ struct auxiliary_device *aux_dev;
+ struct ice_ptp *ptp;
+ struct device *dev;
+ char *name;
+ int err;
+ u32 id;
- kthread_init_delayed_work(&ptp_port->ov_work,
- ice_ptp_wait_for_offsets);
- return ice_ptp_init_tx_e822(pf, &ptp_port->tx, ptp_port->port_num);
+ ptp = &pf->ptp;
+ id = ptp->port.port_num;
+ dev = ice_pf_to_dev(pf);
+
+ aux_dev = &ptp->port.aux_dev;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
+ pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
+ ice_get_ptp_src_clock_index(&pf->hw));
+
+ aux_dev->name = name;
+ aux_dev->id = id;
+ aux_dev->dev.release = ice_ptp_release_auxbus_device;
+ aux_dev->dev.parent = dev;
+
+ err = auxiliary_device_init(aux_dev);
+ if (err)
+ goto aux_err;
+
+ err = auxiliary_device_add(aux_dev);
+ if (err) {
+ auxiliary_device_uninit(aux_dev);
+ goto aux_err;
+ }
+
+ return 0;
+aux_err:
+ dev_err(dev, "Failed to create PTP auxiliary bus device <%s>\n", name);
+ devm_kfree(dev, name);
+ return err;
+}
+
+/**
+ * ice_ptp_remove_auxbus_device - Remove PTP auxiliary bus device
+ * @pf: Board private structure
+ */
+static void ice_ptp_remove_auxbus_device(struct ice_pf *pf)
+{
+ struct auxiliary_device *aux_dev = &pf->ptp.port.aux_dev;
+
+ auxiliary_device_delete(aux_dev);
+ auxiliary_device_uninit(aux_dev);
+
+ memset(aux_dev, 0, sizeof(*aux_dev));
+}
+
+/**
+ * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode
+ * @pf: Board private structure
+ *
+ * Initialize the Tx timestamp interrupt mode for this device. For most device
+ * types, each PF processes the interrupt and manages its own timestamps. For
+ * E822-based devices, only the clock owner processes the timestamps. Other
+ * PFs disable the interrupt and do not process their own timestamps.
+ */
+static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
+{
+ switch (pf->hw.phy_model) {
+ case ICE_PHY_E822:
+ /* E822 based PHY has the clock owner process the interrupt
+ * for all ports.
+ */
+ if (ice_pf_src_tmr_owned(pf))
+ pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL;
+ else
+ pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE;
+ break;
+ default:
+ /* other PHY types handle their own Tx interrupt */
+ pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF;
+ }
}
/**
@@ -2713,10 +3011,14 @@ void ice_ptp_init(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
int err;
+ ice_ptp_init_phy_model(hw);
+
+ ice_ptp_init_tx_interrupt_mode(pf);
+
/* If this function owns the clock hardware, it must allocate and
* configure the PTP clock device to represent it.
*/
- if (hw->func_caps.ts_func_info.src_tmr_owned) {
+ if (ice_pf_src_tmr_owned(pf)) {
err = ice_ptp_init_owner(pf);
if (err)
goto err;
@@ -2735,6 +3037,10 @@ void ice_ptp_init(struct ice_pf *pf)
if (err)
goto err;
+ err = ice_ptp_create_auxbus_device(pf);
+ if (err)
+ goto err;
+
dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
return;
@@ -2763,6 +3069,8 @@ void ice_ptp_release(struct ice_pf *pf)
/* Disable timestamping for both Tx and Rx */
ice_ptp_cfg_timestamp(pf, false);
+ ice_ptp_remove_auxbus_device(pf);
+
ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
clear_bit(ICE_FLAG_PTP, pf->flags);
@@ -2782,9 +3090,10 @@ void ice_ptp_release(struct ice_pf *pf)
/* Disable periodic outputs */
ice_ptp_disable_all_clkout(pf);
- ice_clear_ptp_clock_index(pf);
ptp_clock_unregister(pf->ptp.clock);
pf->ptp.clock = NULL;
+ ice_ptp_unregister_auxbus_driver(pf);
+
dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 995a57019ba7..8f6f94392756 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -157,7 +157,9 @@ struct ice_ptp_tx {
* ready for PTP functionality. It is used to track the port initialization
* and determine when the port's PHY offset is valid.
*
+ * @list_member: list member structure of auxiliary device
* @tx: Tx timestamp tracking for this port
+ * @aux_dev: auxiliary device associated with this port
* @ov_work: delayed work task for tracking when PHY offset is valid
* @ps_lock: mutex used to protect the overall PTP PHY start procedure
* @link_up: indicates whether the link is up
@@ -165,7 +167,9 @@ struct ice_ptp_tx {
* @port_num: the port number this structure represents
*/
struct ice_ptp_port {
+ struct list_head list_member;
struct ice_ptp_tx tx;
+ struct auxiliary_device aux_dev;
struct kthread_delayed_work ov_work;
struct mutex ps_lock; /* protects overall PTP PHY start procedure */
bool link_up;
@@ -173,11 +177,35 @@ struct ice_ptp_port {
u8 port_num;
};
+enum ice_ptp_tx_interrupt {
+ ICE_PTP_TX_INTERRUPT_NONE = 0,
+ ICE_PTP_TX_INTERRUPT_SELF,
+ ICE_PTP_TX_INTERRUPT_ALL,
+};
+
+/**
+ * struct ice_ptp_port_owner - data used to handle the PTP clock owner info
+ *
+ * This structure contains data necessary for the PTP clock owner to correctly
+ * handle the timestamping feature for all attached ports.
+ *
+ * @aux_driver: the structure carring the auxiliary driver information
+ * @ports: list of porst handled by this port owner
+ * @lock: protect access to ports list
+ */
+struct ice_ptp_port_owner {
+ struct auxiliary_driver aux_driver;
+ struct list_head ports;
+ struct mutex lock;
+};
+
#define GLTSYN_TGT_H_IDX_MAX 4
/**
* struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK
+ * @tx_interrupt_mode: the TX interrupt mode for the PTP clock
* @port: data for the PHY port initialization procedure
+ * @ports_owner: data for the auxiliary driver owner
* @work: delayed work function for periodic tasks
* @cached_phc_time: a cached copy of the PHC time for timestamp extension
* @cached_phc_jiffies: jiffies when cached_phc_time was last updated
@@ -197,7 +225,9 @@ struct ice_ptp_port {
* @late_cached_phc_updates: number of times cached PHC update is late
*/
struct ice_ptp {
+ enum ice_ptp_tx_interrupt tx_interrupt_mode;
struct ice_ptp_port port;
+ struct ice_ptp_port_owner ports_owner;
struct kthread_delayed_work work;
u64 cached_phc_time;
unsigned long cached_phc_jiffies;
@@ -258,11 +288,11 @@ struct ice_ptp {
#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4))
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+int ice_ptp_clock_index(struct ice_pf *pf);
struct ice_pf;
int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr);
int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr);
void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena);
-int ice_get_ptp_clock_index(struct ice_pf *pf);
void ice_ptp_extts_event(struct ice_pf *pf);
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
@@ -288,10 +318,6 @@ static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
}
static inline void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) { }
-static inline int ice_get_ptp_clock_index(struct ice_pf *pf)
-{
- return -1;
-}
static inline void ice_ptp_extts_event(struct ice_pf *pf) { }
static inline s8
@@ -314,5 +340,10 @@ static inline void ice_ptp_release(struct ice_pf *pf) { }
static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
{
}
+
+static inline int ice_ptp_clock_index(struct ice_pf *pf)
+{
+ return -1;
+}
#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
#endif /* _ICE_PTP_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index f818dd215c05..6d573908de7a 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -7,6 +7,132 @@
#include "ice_ptp_consts.h"
#include "ice_cgu_regs.h"
+static struct dpll_pin_frequency ice_cgu_pin_freq_common[] = {
+ DPLL_PIN_FREQUENCY_1PPS,
+ DPLL_PIN_FREQUENCY_10MHZ,
+};
+
+static struct dpll_pin_frequency ice_cgu_pin_freq_1_hz[] = {
+ DPLL_PIN_FREQUENCY_1PPS,
+};
+
+static struct dpll_pin_frequency ice_cgu_pin_freq_10_mhz[] = {
+ DPLL_PIN_FREQUENCY_10MHZ,
+};
+
+static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_inputs[] = {
+ { "CVL-SDP22", ZL_REF0P, DPLL_PIN_TYPE_INT_OSCILLATOR,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "CVL-SDP20", ZL_REF0N, DPLL_PIN_TYPE_INT_OSCILLATOR,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "C827_0-RCLKA", ZL_REF1P, DPLL_PIN_TYPE_MUX, 0, },
+ { "C827_0-RCLKB", ZL_REF1N, DPLL_PIN_TYPE_MUX, 0, },
+ { "SMA1", ZL_REF3P, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "SMA2/U.FL2", ZL_REF3N, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+ { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0, },
+};
+
+static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_inputs[] = {
+ { "CVL-SDP22", ZL_REF0P, DPLL_PIN_TYPE_INT_OSCILLATOR,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "CVL-SDP20", ZL_REF0N, DPLL_PIN_TYPE_INT_OSCILLATOR,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "C827_0-RCLKA", ZL_REF1P, DPLL_PIN_TYPE_MUX, },
+ { "C827_0-RCLKB", ZL_REF1N, DPLL_PIN_TYPE_MUX, },
+ { "C827_1-RCLKA", ZL_REF2P, DPLL_PIN_TYPE_MUX, },
+ { "C827_1-RCLKB", ZL_REF2N, DPLL_PIN_TYPE_MUX, },
+ { "SMA1", ZL_REF3P, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "SMA2/U.FL2", ZL_REF3N, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+ { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, },
+};
+
+static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_outputs[] = {
+ { "REF-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "REF-SMA2/U.FL2", ZL_OUT1, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, },
+ { "MAC-CLK", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, },
+ { "CVL-SDP21", ZL_OUT4, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+ { "CVL-SDP23", ZL_OUT5, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+};
+
+static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_outputs[] = {
+ { "REF-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "REF-SMA2/U.FL2", ZL_OUT1, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
+ { "PHY2-CLK", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
+ { "MAC-CLK", ZL_OUT4, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
+ { "CVL-SDP21", ZL_OUT5, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+ { "CVL-SDP23", ZL_OUT6, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+};
+
+static const struct ice_cgu_pin_desc ice_e823_si_cgu_inputs[] = {
+ { "NONE", SI_REF0P, 0, 0 },
+ { "NONE", SI_REF0N, 0, 0 },
+ { "SYNCE0_DP", SI_REF1P, DPLL_PIN_TYPE_MUX, 0 },
+ { "SYNCE0_DN", SI_REF1N, DPLL_PIN_TYPE_MUX, 0 },
+ { "EXT_CLK_SYNC", SI_REF2P, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "NONE", SI_REF2N, 0, 0 },
+ { "EXT_PPS_OUT", SI_REF3, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "INT_PPS_OUT", SI_REF4, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+};
+
+static const struct ice_cgu_pin_desc ice_e823_si_cgu_outputs[] = {
+ { "1588-TIME_SYNC", SI_OUT0, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "PHY-CLK", SI_OUT1, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
+ { "10MHZ-SMA2", SI_OUT2, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_10_mhz), ice_cgu_pin_freq_10_mhz },
+ { "PPS-SMA1", SI_OUT3, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+};
+
+static const struct ice_cgu_pin_desc ice_e823_zl_cgu_inputs[] = {
+ { "NONE", ZL_REF0P, 0, 0 },
+ { "INT_PPS_OUT", ZL_REF0N, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+ { "SYNCE0_DP", ZL_REF1P, DPLL_PIN_TYPE_MUX, 0 },
+ { "SYNCE0_DN", ZL_REF1N, DPLL_PIN_TYPE_MUX, 0 },
+ { "NONE", ZL_REF2P, 0, 0 },
+ { "NONE", ZL_REF2N, 0, 0 },
+ { "EXT_CLK_SYNC", ZL_REF3P, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "NONE", ZL_REF3N, 0, 0 },
+ { "EXT_PPS_OUT", ZL_REF4P, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+ { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0 },
+};
+
+static const struct ice_cgu_pin_desc ice_e823_zl_cgu_outputs[] = {
+ { "PPS-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+ { "10MHZ-SMA2", ZL_OUT1, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_10_mhz), ice_cgu_pin_freq_10_mhz },
+ { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
+ { "1588-TIME_REF", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
+ { "CPK-TIME_SYNC", ZL_OUT4, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "NONE", ZL_OUT5, 0, 0 },
+};
+
/* Low level functions for interacting with and managing the device clock used
* for the Precision Time Protocol.
*
@@ -107,7 +233,7 @@ static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
*
* Prepare the source timer for an upcoming timer sync command.
*/
-static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
u32 cmd_val;
u8 tmr_idx;
@@ -116,19 +242,19 @@ static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
cmd_val = tmr_idx << SEL_CPK_SRC;
switch (cmd) {
- case INIT_TIME:
+ case ICE_PTP_INIT_TIME:
cmd_val |= GLTSYN_CMD_INIT_TIME;
break;
- case INIT_INCVAL:
+ case ICE_PTP_INIT_INCVAL:
cmd_val |= GLTSYN_CMD_INIT_INCVAL;
break;
- case ADJ_TIME:
+ case ICE_PTP_ADJ_TIME:
cmd_val |= GLTSYN_CMD_ADJ_TIME;
break;
- case ADJ_TIME_AT_TIME:
+ case ICE_PTP_ADJ_TIME_AT_TIME:
cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
break;
- case READ_TIME:
+ case ICE_PTP_READ_TIME:
cmd_val |= GLTSYN_CMD_READ_TIME;
break;
case ICE_PTP_NOP:
@@ -168,9 +294,9 @@ ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
{
int phy_port, phy, quadtype;
- phy_port = port % ICE_PORTS_PER_PHY;
- phy = port / ICE_PORTS_PER_PHY;
- quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_NUM_QUAD_TYPE;
+ phy_port = port % ICE_PORTS_PER_PHY_E822;
+ phy = port / ICE_PORTS_PER_PHY_E822;
+ quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E822;
if (quadtype == 0) {
msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
@@ -495,20 +621,25 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
* Fill a message buffer for accessing a register in a quad shared between
* multiple PHYs.
*/
-static void
+static int
ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
{
u32 addr;
+ if (quad >= ICE_MAX_QUAD)
+ return -EINVAL;
+
msg->dest_dev = rmn_0;
- if ((quad % ICE_NUM_QUAD_TYPE) == 0)
+ if ((quad % ICE_QUADS_PER_PHY_E822) == 0)
addr = Q_0_BASE + offset;
else
addr = Q_1_BASE + offset;
msg->msg_addr_low = lower_16_bits(addr);
msg->msg_addr_high = upper_16_bits(addr);
+
+ return 0;
}
/**
@@ -527,10 +658,10 @@ ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
struct ice_sbq_msg_input msg = {0};
int err;
- if (quad >= ICE_MAX_QUAD)
- return -EINVAL;
+ err = ice_fill_quad_msg_e822(&msg, quad, offset);
+ if (err)
+ return err;
- ice_fill_quad_msg_e822(&msg, quad, offset);
msg.opcode = ice_sbq_msg_rd;
err = ice_sbq_rw_reg(hw, &msg);
@@ -561,10 +692,10 @@ ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
struct ice_sbq_msg_input msg = {0};
int err;
- if (quad >= ICE_MAX_QUAD)
- return -EINVAL;
+ err = ice_fill_quad_msg_e822(&msg, quad, offset);
+ if (err)
+ return err;
- ice_fill_quad_msg_e822(&msg, quad, offset);
msg.opcode = ice_sbq_msg_wr;
msg.data = val;
@@ -628,29 +759,32 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
* @quad: the quad to read from
* @idx: the timestamp index to reset
*
- * Clear a timestamp, resetting its valid bit, from the PHY quad block that is
- * shared between the internal PHYs on the E822 devices.
+ * Read the timestamp out of the quad to clear its timestamp status bit from
+ * the PHY quad block that is shared between the internal PHYs of the E822
+ * devices.
+ *
+ * Note that unlike E810, software cannot directly write to the quad memory
+ * bank registers. E822 relies on the ice_get_phy_tx_tstamp_ready() function
+ * to determine which timestamps are valid. Reading a timestamp auto-clears
+ * the valid bit.
+ *
+ * To directly clear the contents of the timestamp block entirely, discarding
+ * all timestamp data at once, software should instead use
+ * ice_ptp_reset_ts_memory_quad_e822().
+ *
+ * This function should only be called on an idx whose bit is set according to
+ * ice_get_phy_tx_tstamp_ready().
*/
static int
ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
{
- u16 lo_addr, hi_addr;
+ u64 unused_tstamp;
int err;
- lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
- hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
-
- err = ice_write_quad_reg_e822(hw, quad, lo_addr, 0);
+ err = ice_read_phy_tstamp_e822(hw, quad, idx, &unused_tstamp);
if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
- err);
- return err;
- }
-
- err = ice_write_quad_reg_e822(hw, quad, hi_addr, 0);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
- err);
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for quad %u, idx %u, err %d\n",
+ quad, idx, err);
return err;
}
@@ -1025,7 +1159,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw)
* @time: Time to initialize the PHY port clocks to
*
* Program the PHY port registers with a new initial time value. The port
- * clock will be initialized once the driver issues an INIT_TIME sync
+ * clock will be initialized once the driver issues an ICE_PTP_INIT_TIME sync
* command. The time value is the upper 32 bits of the PHY timer, usually in
* units of nominal nanoseconds.
*/
@@ -1074,7 +1208,7 @@ exit_err:
*
* Program the port for an atomic adjustment by writing the Tx and Rx timer
* registers. The atomic adjustment won't be completed until the driver issues
- * an ADJ_TIME command.
+ * an ICE_PTP_ADJ_TIME command.
*
* Note that time is not in units of nanoseconds. It is in clock time
* including the lower sub-nanosecond portion of the port timer.
@@ -1127,7 +1261,7 @@ exit_err:
*
* Prepare the PHY ports for an atomic time adjustment by programming the PHY
* Tx and Rx port registers. The actual adjustment is completed by issuing an
- * ADJ_TIME or ADJ_TIME_AT_TIME sync command.
+ * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command.
*/
static int
ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
@@ -1162,7 +1296,7 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
*
* Prepare each of the PHY ports for a new increment value by programming the
* port's TIMETUS registers. The new increment value will be updated after
- * issuing an INIT_INCVAL command.
+ * issuing an ICE_PTP_INIT_INCVAL command.
*/
static int
ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval)
@@ -1248,19 +1382,19 @@ ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd
tmr_idx = ice_get_ptp_src_clock_index(hw);
cmd_val = tmr_idx << SEL_PHY_SRC;
switch (cmd) {
- case INIT_TIME:
+ case ICE_PTP_INIT_TIME:
cmd_val |= PHY_CMD_INIT_TIME;
break;
- case INIT_INCVAL:
+ case ICE_PTP_INIT_INCVAL:
cmd_val |= PHY_CMD_INIT_INCVAL;
break;
- case ADJ_TIME:
+ case ICE_PTP_ADJ_TIME:
cmd_val |= PHY_CMD_ADJ_TIME;
break;
- case READ_TIME:
+ case ICE_PTP_READ_TIME:
cmd_val |= PHY_CMD_READ_TIME;
break;
- case ADJ_TIME_AT_TIME:
+ case ICE_PTP_ADJ_TIME_AT_TIME:
cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
break;
case ICE_PTP_NOP:
@@ -2196,8 +2330,8 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
* @phy_time: on return, the 64bit PHY timer value
* @phc_time: on return, the lower 64bits of PHC time
*
- * Issue a READ_TIME timer command to simultaneously capture the PHY and PHC
- * timer values.
+ * Issue a ICE_PTP_READ_TIME timer command to simultaneously capture the PHY
+ * and PHC timer values.
*/
static int
ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
@@ -2210,15 +2344,15 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
tmr_idx = ice_get_ptp_src_clock_index(hw);
- /* Prepare the PHC timer for a READ_TIME capture command */
- ice_ptp_src_cmd(hw, READ_TIME);
+ /* Prepare the PHC timer for a ICE_PTP_READ_TIME capture command */
+ ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
- /* Prepare the PHY timer for a READ_TIME capture command */
- err = ice_ptp_one_port_cmd(hw, port, READ_TIME);
+ /* Prepare the PHY timer for a ICE_PTP_READ_TIME capture command */
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_READ_TIME);
if (err)
return err;
- /* Issue the sync to start the READ_TIME capture */
+ /* Issue the sync to start the ICE_PTP_READ_TIME capture */
ice_ptp_exec_tmr_cmd(hw);
/* Read the captured PHC time from the shadow time registers */
@@ -2252,10 +2386,11 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
* @port: the PHY port to synchronize
*
* Perform an adjustment to ensure that the PHY and PHC timers are in sync.
- * This is done by issuing a READ_TIME command which triggers a simultaneous
- * read of the PHY timer and PHC timer. Then we use the difference to
- * calculate an appropriate 2s complement addition to add to the PHY timer in
- * order to ensure it reads the same value as the primary PHC timer.
+ * This is done by issuing a ICE_PTP_READ_TIME command which triggers a
+ * simultaneous read of the PHY timer and PHC timer. Then we use the
+ * difference to calculate an appropriate 2s complement addition to add
+ * to the PHY timer in order to ensure it reads the same value as the
+ * primary PHC timer.
*/
static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
{
@@ -2285,7 +2420,7 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
if (err)
goto err_unlock;
- err = ice_ptp_one_port_cmd(hw, port, ADJ_TIME);
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_ADJ_TIME);
if (err)
goto err_unlock;
@@ -2408,7 +2543,7 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
if (err)
return err;
- err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL);
if (err)
return err;
@@ -2436,7 +2571,7 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
if (err)
return err;
- err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL);
if (err)
return err;
@@ -2685,28 +2820,39 @@ ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
* @lport: the lport to read from
* @idx: the timestamp index to reset
*
- * Clear a timestamp, resetting its valid bit, from the timestamp block of the
- * external PHY on the E810 device.
+ * Read the timestamp and then forcibly overwrite its value to clear the valid
+ * bit from the timestamp block of the external PHY on the E810 device.
+ *
+ * This function should only be called on an idx whose bit is set according to
+ * ice_get_phy_tx_tstamp_ready().
*/
static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
{
u32 lo_addr, hi_addr;
+ u64 unused_tstamp;
int err;
+ err = ice_read_phy_tstamp_e810(hw, lport, idx, &unused_tstamp);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for lport %u, idx %u, err %d\n",
+ lport, idx, err);
+ return err;
+ }
+
lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
err = ice_write_phy_reg_e810(hw, lo_addr, 0);
if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
- err);
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register for lport %u, idx %u, err %d\n",
+ lport, idx, err);
return err;
}
err = ice_write_phy_reg_e810(hw, hi_addr, 0);
if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
- err);
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register for lport %u, idx %u, err %d\n",
+ lport, idx, err);
return err;
}
@@ -2757,7 +2903,7 @@ static int ice_ptp_init_phc_e810(struct ice_hw *hw)
*
* Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
* initial clock time. The time will not actually be programmed until the
- * driver issues an INIT_TIME command.
+ * driver issues an ICE_PTP_INIT_TIME command.
*
* The time value is the upper 32 bits of the PHY timer, usually in units of
* nominal nanoseconds.
@@ -2792,7 +2938,7 @@ static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
*
* Prepare the PHY port for an atomic adjustment by programming the PHY
* ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual adjustment
- * is completed by issuing an ADJ_TIME sync command.
+ * is completed by issuing an ICE_PTP_ADJ_TIME sync command.
*
* The adjustment value only contains the portion used for the upper 32bits of
* the PHY timer, usually in units of nominal nanoseconds. Negative
@@ -2832,7 +2978,7 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
*
* Prepare the PHY port for a new increment value by programming the PHY
* ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual change is
- * completed by issuing an INIT_INCVAL command.
+ * completed by issuing an ICE_PTP_INIT_INCVAL command.
*/
static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
{
@@ -2875,19 +3021,19 @@ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
int err;
switch (cmd) {
- case INIT_TIME:
+ case ICE_PTP_INIT_TIME:
cmd_val = GLTSYN_CMD_INIT_TIME;
break;
- case INIT_INCVAL:
+ case ICE_PTP_INIT_INCVAL:
cmd_val = GLTSYN_CMD_INIT_INCVAL;
break;
- case ADJ_TIME:
+ case ICE_PTP_ADJ_TIME:
cmd_val = GLTSYN_CMD_ADJ_TIME;
break;
- case READ_TIME:
+ case ICE_PTP_READ_TIME:
cmd_val = GLTSYN_CMD_READ_TIME;
break;
- case ADJ_TIME_AT_TIME:
+ case ICE_PTP_ADJ_TIME_AT_TIME:
cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
break;
case ICE_PTP_NOP:
@@ -3150,6 +3296,21 @@ void ice_ptp_unlock(struct ice_hw *hw)
}
/**
+ * ice_ptp_init_phy_model - Initialize hw->phy_model based on device type
+ * @hw: pointer to the HW structure
+ *
+ * Determine the PHY model for the device, and initialize hw->phy_model
+ * for use by other functions.
+ */
+void ice_ptp_init_phy_model(struct ice_hw *hw)
+{
+ if (ice_is_e810(hw))
+ hw->phy_model = ICE_PHY_E810;
+ else
+ hw->phy_model = ICE_PHY_E822;
+}
+
+/**
* ice_ptp_tmr_cmd - Prepare and trigger a timer sync command
* @hw: pointer to HW struct
* @cmd: the command to issue
@@ -3167,10 +3328,17 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
ice_ptp_src_cmd(hw, cmd);
/* Next, prepare the ports */
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
err = ice_ptp_port_cmd_e810(hw, cmd);
- else
+ break;
+ case ICE_PHY_E822:
err = ice_ptp_port_cmd_e822(hw, cmd);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n",
cmd, err);
@@ -3212,14 +3380,21 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
/* PHY timers */
/* Fill Rx and Tx ports and send msg to PHY */
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
- else
+ break;
+ case ICE_PHY_E822:
err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
if (err)
return err;
- return ice_ptp_tmr_cmd(hw, INIT_TIME);
+ return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_TIME);
}
/**
@@ -3232,8 +3407,8 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
*
* 1) Write the increment value to the source timer shadow registers
* 2) Write the increment value to the PHY timer shadow registers
- * 3) Issue an INIT_INCVAL timer command to synchronously switch both the
- * source and port timers to the new increment value at the next clock
+ * 3) Issue an ICE_PTP_INIT_INCVAL timer command to synchronously switch both
+ * the source and port timers to the new increment value at the next clock
* cycle.
*/
int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
@@ -3247,14 +3422,21 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
err = ice_ptp_prep_phy_incval_e810(hw, incval);
- else
+ break;
+ case ICE_PHY_E822:
err = ice_ptp_prep_phy_incval_e822(hw, incval);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
if (err)
return err;
- return ice_ptp_tmr_cmd(hw, INIT_INCVAL);
+ return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_INCVAL);
}
/**
@@ -3288,8 +3470,8 @@ int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
*
* 1) Write the adjustment to the source timer shadow registers
* 2) Write the adjustment to the PHY timer shadow registers
- * 3) Issue an ADJ_TIME timer command to synchronously apply the adjustment to
- * both the source and port timers at the next clock cycle.
+ * 3) Issue an ICE_PTP_ADJ_TIME timer command to synchronously apply the
+ * adjustment to both the source and port timers at the next clock cycle.
*/
int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
{
@@ -3299,21 +3481,28 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Write the desired clock adjustment into the GLTSYN_SHADJ register.
- * For an ADJ_TIME command, this set of registers represents the value
- * to add to the clock time. It supports subtraction by interpreting
- * the value as a 2's complement integer.
+ * For an ICE_PTP_ADJ_TIME command, this set of registers represents
+ * the value to add to the clock time. It supports subtraction by
+ * interpreting the value as a 2's complement integer.
*/
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
err = ice_ptp_prep_phy_adj_e810(hw, adj);
- else
+ break;
+ case ICE_PHY_E822:
err = ice_ptp_prep_phy_adj_e822(hw, adj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
if (err)
return err;
- return ice_ptp_tmr_cmd(hw, ADJ_TIME);
+ return ice_ptp_tmr_cmd(hw, ICE_PTP_ADJ_TIME);
}
/**
@@ -3329,10 +3518,14 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
*/
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
{
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
- else
+ case ICE_PHY_E822:
return ice_read_phy_tstamp_e822(hw, block, idx, tstamp);
+ default:
+ return -EOPNOTSUPP;
+ }
}
/**
@@ -3341,16 +3534,71 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
* @block: the block to read from
* @idx: the timestamp index to reset
*
- * Clear a timestamp, resetting its valid bit, from the timestamp block. For
- * E822 devices, the block is the quad to clear from. For E810 devices, the
- * block is the logical port to clear from.
+ * Clear a timestamp from the timestamp block, discarding its value without
+ * returning it. This resets the memory status bit for the timestamp index
+ * allowing it to be reused for another timestamp in the future.
+ *
+ * For E822 devices, the block number is the PHY quad to clear from. For E810
+ * devices, the block number is the logical port to clear from.
+ *
+ * This function must only be called on a timestamp index whose valid bit is
+ * set according to ice_get_phy_tx_tstamp_ready().
*/
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
return ice_clear_phy_tstamp_e810(hw, block, idx);
- else
+ case ICE_PHY_E822:
return ice_clear_phy_tstamp_e822(hw, block, idx);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * ice_get_pf_c827_idx - find and return the C827 index for the current pf
+ * @hw: pointer to the hw struct
+ * @idx: index of the found C827 PHY
+ * Return:
+ * * 0 - success
+ * * negative - failure
+ */
+static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
+{
+ struct ice_aqc_get_link_topo cmd;
+ u8 node_part_number;
+ u16 node_handle;
+ int status;
+ u8 ctx;
+
+ if (hw->mac_type != ICE_MAC_E810)
+ return -ENODEV;
+
+ if (hw->device_id != ICE_DEV_ID_E810C_QSFP) {
+ *idx = C827_0;
+ return 0;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_PHY << ICE_AQC_LINK_TOPO_NODE_TYPE_S;
+ ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S;
+ cmd.addr.topo_params.node_type_ctx = ctx;
+
+ status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
+ &node_handle);
+ if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
+ return -ENOENT;
+
+ if (node_handle == E810C_QSFP_C827_0_HANDLE)
+ *idx = C827_0;
+ else if (node_handle == E810C_QSFP_C827_1_HANDLE)
+ *idx = C827_1;
+ else
+ return -EIO;
+
+ return 0;
}
/**
@@ -3359,10 +3607,14 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
*/
void ice_ptp_reset_ts_memory(struct ice_hw *hw)
{
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E822:
+ ice_ptp_reset_ts_memory_e822(hw);
+ break;
+ case ICE_PHY_E810:
+ default:
return;
-
- ice_ptp_reset_ts_memory_e822(hw);
+ }
}
/**
@@ -3381,10 +3633,14 @@ int ice_ptp_init_phc(struct ice_hw *hw)
/* Clear event err indications for auxiliary pins */
(void)rd32(hw, GLTSYN_STAT(src_idx));
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
return ice_ptp_init_phc_e810(hw);
- else
+ case ICE_PHY_E822:
return ice_ptp_init_phc_e822(hw);
+ default:
+ return -EOPNOTSUPP;
+ }
}
/**
@@ -3400,10 +3656,308 @@ int ice_ptp_init_phc(struct ice_hw *hw)
*/
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
{
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
return ice_get_phy_tx_tstamp_ready_e810(hw, block,
tstamp_ready);
- else
+ case ICE_PHY_E822:
return ice_get_phy_tx_tstamp_ready_e822(hw, block,
tstamp_ready);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * ice_cgu_get_pin_desc_e823 - get pin description array
+ * @hw: pointer to the hw struct
+ * @input: if request is done against input or output pin
+ * @size: number of inputs/outputs
+ *
+ * Return: pointer to pin description array associated to given hw.
+ */
+static const struct ice_cgu_pin_desc *
+ice_cgu_get_pin_desc_e823(struct ice_hw *hw, bool input, int *size)
+{
+ static const struct ice_cgu_pin_desc *t;
+
+ if (hw->cgu_part_number ==
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032) {
+ if (input) {
+ t = ice_e823_zl_cgu_inputs;
+ *size = ARRAY_SIZE(ice_e823_zl_cgu_inputs);
+ } else {
+ t = ice_e823_zl_cgu_outputs;
+ *size = ARRAY_SIZE(ice_e823_zl_cgu_outputs);
+ }
+ } else if (hw->cgu_part_number ==
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384) {
+ if (input) {
+ t = ice_e823_si_cgu_inputs;
+ *size = ARRAY_SIZE(ice_e823_si_cgu_inputs);
+ } else {
+ t = ice_e823_si_cgu_outputs;
+ *size = ARRAY_SIZE(ice_e823_si_cgu_outputs);
+ }
+ } else {
+ t = NULL;
+ *size = 0;
+ }
+
+ return t;
+}
+
+/**
+ * ice_cgu_get_pin_desc - get pin description array
+ * @hw: pointer to the hw struct
+ * @input: if request is done against input or output pins
+ * @size: size of array returned by function
+ *
+ * Return: pointer to pin description array associated to given hw.
+ */
+static const struct ice_cgu_pin_desc *
+ice_cgu_get_pin_desc(struct ice_hw *hw, bool input, int *size)
+{
+ const struct ice_cgu_pin_desc *t = NULL;
+
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E810C_SFP:
+ if (input) {
+ t = ice_e810t_sfp_cgu_inputs;
+ *size = ARRAY_SIZE(ice_e810t_sfp_cgu_inputs);
+ } else {
+ t = ice_e810t_sfp_cgu_outputs;
+ *size = ARRAY_SIZE(ice_e810t_sfp_cgu_outputs);
+ }
+ break;
+ case ICE_DEV_ID_E810C_QSFP:
+ if (input) {
+ t = ice_e810t_qsfp_cgu_inputs;
+ *size = ARRAY_SIZE(ice_e810t_qsfp_cgu_inputs);
+ } else {
+ t = ice_e810t_qsfp_cgu_outputs;
+ *size = ARRAY_SIZE(ice_e810t_qsfp_cgu_outputs);
+ }
+ break;
+ case ICE_DEV_ID_E823L_10G_BASE_T:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823L_BACKPLANE:
+ case ICE_DEV_ID_E823L_QSFP:
+ case ICE_DEV_ID_E823L_SFP:
+ case ICE_DEV_ID_E823C_10G_BASE_T:
+ case ICE_DEV_ID_E823C_BACKPLANE:
+ case ICE_DEV_ID_E823C_QSFP:
+ case ICE_DEV_ID_E823C_SFP:
+ case ICE_DEV_ID_E823C_SGMII:
+ t = ice_cgu_get_pin_desc_e823(hw, input, size);
+ break;
+ default:
+ break;
+ }
+
+ return t;
+}
+
+/**
+ * ice_cgu_get_pin_type - get pin's type
+ * @hw: pointer to the hw struct
+ * @pin: pin index
+ * @input: if request is done against input or output pin
+ *
+ * Return: type of a pin.
+ */
+enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input)
+{
+ const struct ice_cgu_pin_desc *t;
+ int t_size;
+
+ t = ice_cgu_get_pin_desc(hw, input, &t_size);
+
+ if (!t)
+ return 0;
+
+ if (pin >= t_size)
+ return 0;
+
+ return t[pin].type;
+}
+
+/**
+ * ice_cgu_get_pin_freq_supp - get pin's supported frequency
+ * @hw: pointer to the hw struct
+ * @pin: pin index
+ * @input: if request is done against input or output pin
+ * @num: output number of supported frequencies
+ *
+ * Get frequency supported number and array of supported frequencies.
+ *
+ * Return: array of supported frequencies for given pin.
+ */
+struct dpll_pin_frequency *
+ice_cgu_get_pin_freq_supp(struct ice_hw *hw, u8 pin, bool input, u8 *num)
+{
+ const struct ice_cgu_pin_desc *t;
+ int t_size;
+
+ *num = 0;
+ t = ice_cgu_get_pin_desc(hw, input, &t_size);
+ if (!t)
+ return NULL;
+ if (pin >= t_size)
+ return NULL;
+ *num = t[pin].freq_supp_num;
+
+ return t[pin].freq_supp;
+}
+
+/**
+ * ice_cgu_get_pin_name - get pin's name
+ * @hw: pointer to the hw struct
+ * @pin: pin index
+ * @input: if request is done against input or output pin
+ *
+ * Return:
+ * * null terminated char array with name
+ * * NULL in case of failure
+ */
+const char *ice_cgu_get_pin_name(struct ice_hw *hw, u8 pin, bool input)
+{
+ const struct ice_cgu_pin_desc *t;
+ int t_size;
+
+ t = ice_cgu_get_pin_desc(hw, input, &t_size);
+
+ if (!t)
+ return NULL;
+
+ if (pin >= t_size)
+ return NULL;
+
+ return t[pin].name;
+}
+
+/**
+ * ice_get_cgu_state - get the state of the DPLL
+ * @hw: pointer to the hw struct
+ * @dpll_idx: Index of internal DPLL unit
+ * @last_dpll_state: last known state of DPLL
+ * @pin: pointer to a buffer for returning currently active pin
+ * @ref_state: reference clock state
+ * @eec_mode: eec mode of the DPLL
+ * @phase_offset: pointer to a buffer for returning phase offset
+ * @dpll_state: state of the DPLL (output)
+ *
+ * This function will read the state of the DPLL(dpll_idx). Non-null
+ * 'pin', 'ref_state', 'eec_mode' and 'phase_offset' parameters are used to
+ * retrieve currently active pin, state, mode and phase_offset respectively.
+ *
+ * Return: state of the DPLL
+ */
+int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx,
+ enum dpll_lock_status last_dpll_state, u8 *pin,
+ u8 *ref_state, u8 *eec_mode, s64 *phase_offset,
+ enum dpll_lock_status *dpll_state)
+{
+ u8 hw_ref_state, hw_dpll_state, hw_eec_mode, hw_config;
+ s64 hw_phase_offset;
+ int status;
+
+ status = ice_aq_get_cgu_dpll_status(hw, dpll_idx, &hw_ref_state,
+ &hw_dpll_state, &hw_config,
+ &hw_phase_offset, &hw_eec_mode);
+ if (status)
+ return status;
+
+ if (pin)
+ /* current ref pin in dpll_state_refsel_status_X register */
+ *pin = hw_config & ICE_AQC_GET_CGU_DPLL_CONFIG_CLK_REF_SEL;
+ if (phase_offset)
+ *phase_offset = hw_phase_offset;
+ if (ref_state)
+ *ref_state = hw_ref_state;
+ if (eec_mode)
+ *eec_mode = hw_eec_mode;
+ if (!dpll_state)
+ return 0;
+
+ /* According to ZL DPLL documentation, once state reach LOCKED_HO_ACQ
+ * it would never return to FREERUN. This aligns to ITU-T G.781
+ * Recommendation. We cannot report HOLDOVER as HO memory is cleared
+ * while switching to another reference.
+ * Only for situations where previous state was either: "LOCKED without
+ * HO_ACQ" or "HOLDOVER" we actually back to FREERUN.
+ */
+ if (hw_dpll_state & ICE_AQC_GET_CGU_DPLL_STATUS_STATE_LOCK) {
+ if (hw_dpll_state & ICE_AQC_GET_CGU_DPLL_STATUS_STATE_HO_READY)
+ *dpll_state = DPLL_LOCK_STATUS_LOCKED_HO_ACQ;
+ else
+ *dpll_state = DPLL_LOCK_STATUS_LOCKED;
+ } else if (last_dpll_state == DPLL_LOCK_STATUS_LOCKED_HO_ACQ ||
+ last_dpll_state == DPLL_LOCK_STATUS_HOLDOVER) {
+ *dpll_state = DPLL_LOCK_STATUS_HOLDOVER;
+ } else {
+ *dpll_state = DPLL_LOCK_STATUS_UNLOCKED;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_get_cgu_rclk_pin_info - get info on available recovered clock pins
+ * @hw: pointer to the hw struct
+ * @base_idx: returns index of first recovered clock pin on device
+ * @pin_num: returns number of recovered clock pins available on device
+ *
+ * Based on hw provide caller info about recovery clock pins available on the
+ * board.
+ *
+ * Return:
+ * * 0 - success, information is valid
+ * * negative - failure, information is not valid
+ */
+int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num)
+{
+ u8 phy_idx;
+ int ret;
+
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E810C_SFP:
+ case ICE_DEV_ID_E810C_QSFP:
+
+ ret = ice_get_pf_c827_idx(hw, &phy_idx);
+ if (ret)
+ return ret;
+ *base_idx = E810T_CGU_INPUT_C827(phy_idx, ICE_RCLKA_PIN);
+ *pin_num = ICE_E810_RCLK_PINS_NUM;
+ ret = 0;
+ break;
+ case ICE_DEV_ID_E823L_10G_BASE_T:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823L_BACKPLANE:
+ case ICE_DEV_ID_E823L_QSFP:
+ case ICE_DEV_ID_E823L_SFP:
+ case ICE_DEV_ID_E823C_10G_BASE_T:
+ case ICE_DEV_ID_E823C_BACKPLANE:
+ case ICE_DEV_ID_E823C_QSFP:
+ case ICE_DEV_ID_E823C_SFP:
+ case ICE_DEV_ID_E823C_SGMII:
+ *pin_num = ICE_E822_RCLK_PINS_NUM;
+ ret = 0;
+ if (hw->cgu_part_number ==
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032)
+ *base_idx = ZL_REF1P;
+ else if (hw->cgu_part_number ==
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384)
+ *base_idx = SI_REF1P;
+ else
+ ret = -ENODEV;
+
+ break;
+ default:
+ ret = -ENODEV;
+ break;
+ }
+
+ return ret;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 9aa10b0426fd..36aeeef99ec0 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -3,13 +3,14 @@
#ifndef _ICE_PTP_HW_H_
#define _ICE_PTP_HW_H_
+#include <linux/dpll.h>
enum ice_ptp_tmr_cmd {
- INIT_TIME,
- INIT_INCVAL,
- ADJ_TIME,
- ADJ_TIME_AT_TIME,
- READ_TIME,
+ ICE_PTP_INIT_TIME,
+ ICE_PTP_INIT_INCVAL,
+ ICE_PTP_ADJ_TIME,
+ ICE_PTP_ADJ_TIME_AT_TIME,
+ ICE_PTP_READ_TIME,
ICE_PTP_NOP,
};
@@ -110,6 +111,77 @@ struct ice_cgu_pll_params_e822 {
u32 post_pll_div;
};
+#define E810C_QSFP_C827_0_HANDLE 2
+#define E810C_QSFP_C827_1_HANDLE 3
+enum ice_e810_c827_idx {
+ C827_0,
+ C827_1
+};
+
+enum ice_phy_rclk_pins {
+ ICE_RCLKA_PIN = 0, /* SCL pin */
+ ICE_RCLKB_PIN, /* SDA pin */
+};
+
+#define ICE_E810_RCLK_PINS_NUM (ICE_RCLKB_PIN + 1)
+#define ICE_E822_RCLK_PINS_NUM (ICE_RCLKA_PIN + 1)
+#define E810T_CGU_INPUT_C827(_phy, _pin) ((_phy) * ICE_E810_RCLK_PINS_NUM + \
+ (_pin) + ZL_REF1P)
+
+enum ice_zl_cgu_in_pins {
+ ZL_REF0P = 0,
+ ZL_REF0N,
+ ZL_REF1P,
+ ZL_REF1N,
+ ZL_REF2P,
+ ZL_REF2N,
+ ZL_REF3P,
+ ZL_REF3N,
+ ZL_REF4P,
+ ZL_REF4N,
+ NUM_ZL_CGU_INPUT_PINS
+};
+
+enum ice_zl_cgu_out_pins {
+ ZL_OUT0 = 0,
+ ZL_OUT1,
+ ZL_OUT2,
+ ZL_OUT3,
+ ZL_OUT4,
+ ZL_OUT5,
+ ZL_OUT6,
+ NUM_ZL_CGU_OUTPUT_PINS
+};
+
+enum ice_si_cgu_in_pins {
+ SI_REF0P = 0,
+ SI_REF0N,
+ SI_REF1P,
+ SI_REF1N,
+ SI_REF2P,
+ SI_REF2N,
+ SI_REF3,
+ SI_REF4,
+ NUM_SI_CGU_INPUT_PINS
+};
+
+enum ice_si_cgu_out_pins {
+ SI_OUT0 = 0,
+ SI_OUT1,
+ SI_OUT2,
+ SI_OUT3,
+ SI_OUT4,
+ NUM_SI_CGU_OUTPUT_PINS
+};
+
+struct ice_cgu_pin_desc {
+ char *name;
+ u8 index;
+ enum dpll_pin_type type;
+ u32 freq_supp_num;
+ struct dpll_pin_frequency *freq_supp;
+};
+
extern const struct
ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
@@ -131,6 +203,7 @@ extern const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD];
u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
bool ice_ptp_lock(struct ice_hw *hw);
void ice_ptp_unlock(struct ice_hw *hw);
+void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd);
int ice_ptp_init_time(struct ice_hw *hw, u64 time);
int ice_ptp_write_incval(struct ice_hw *hw, u64 incval);
int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval);
@@ -197,6 +270,18 @@ int ice_ptp_init_phy_e810(struct ice_hw *hw);
int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data);
int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data);
int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data);
+bool ice_is_pca9575_present(struct ice_hw *hw);
+enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input);
+struct dpll_pin_frequency *
+ice_cgu_get_pin_freq_supp(struct ice_hw *hw, u8 pin, bool input, u8 *num);
+const char *ice_cgu_get_pin_name(struct ice_hw *hw, u8 pin, bool input);
+int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx,
+ enum dpll_lock_status last_dpll_state, u8 *pin,
+ u8 *ref_state, u8 *eec_mode, s64 *phase_offset,
+ enum dpll_lock_status *dpll_state);
+int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num);
+
+void ice_ptp_init_phy_model(struct ice_hw *hw);
#define PFTSYN_SEM_BYTES 4
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index c0533d7b66b9..2f4a621254e8 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -229,29 +229,22 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
* ice_sched_remove_elems - remove nodes from HW
* @hw: pointer to the HW struct
* @parent: pointer to the parent node
- * @num_nodes: number of nodes
- * @node_teids: array of node teids to be deleted
+ * @node_teid: node teid to be deleted
*
* This function remove nodes from HW
*/
static int
ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
- u16 num_nodes, u32 *node_teids)
+ u32 node_teid)
{
- struct ice_aqc_delete_elem *buf;
- u16 i, num_groups_removed = 0;
- u16 buf_size;
+ DEFINE_FLEX(struct ice_aqc_delete_elem, buf, teid, 1);
+ u16 buf_size = __struct_size(buf);
+ u16 num_groups_removed = 0;
int status;
- buf_size = struct_size(buf, teid, num_nodes);
- buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
buf->hdr.parent_teid = parent->info.node_teid;
- buf->hdr.num_elems = cpu_to_le16(num_nodes);
- for (i = 0; i < num_nodes; i++)
- buf->teid[i] = cpu_to_le32(node_teids[i]);
+ buf->hdr.num_elems = cpu_to_le16(1);
+ buf->teid[0] = cpu_to_le32(node_teid);
status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
&num_groups_removed, NULL);
@@ -259,7 +252,6 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
hw->adminq.sq_last_status);
- devm_kfree(ice_hw_to_dev(hw), buf);
return status;
}
@@ -326,7 +318,7 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
u32 teid = le32_to_cpu(node->info.node_teid);
- ice_sched_remove_elems(hw, node->parent, 1, &teid);
+ ice_sched_remove_elems(hw, node->parent, teid);
}
parent = node->parent;
/* root has no parent */
@@ -437,24 +429,20 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
}
/**
- * ice_aq_move_sched_elems - move scheduler elements
+ * ice_aq_move_sched_elems - move scheduler element (just 1 group)
* @hw: pointer to the HW struct
- * @grps_req: number of groups to move
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
* @grps_movd: returns total number of groups moved
- * @cd: pointer to command details structure or NULL
*
* Move scheduling elements (0x0408)
*/
int
-ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
- struct ice_aqc_move_elem *buf, u16 buf_size,
- u16 *grps_movd, struct ice_sq_cd *cd)
+ice_aq_move_sched_elems(struct ice_hw *hw, struct ice_aqc_move_elem *buf,
+ u16 buf_size, u16 *grps_movd)
{
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems,
- grps_req, (void *)buf, buf_size,
- grps_movd, cd);
+ 1, buf, buf_size, grps_movd, NULL);
}
/**
@@ -1193,7 +1181,7 @@ static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
int status;
/* remove the default leaf node */
- status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
+ status = ice_sched_remove_elems(pi->hw, node->parent, teid);
if (!status)
ice_free_sched_node(pi, node);
}
@@ -2232,12 +2220,12 @@ int
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list)
{
- struct ice_aqc_move_elem *buf;
+ DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ u16 buf_len = __struct_size(buf);
struct ice_sched_node *node;
u16 i, grps_movd = 0;
struct ice_hw *hw;
int status = 0;
- u16 buf_len;
hw = pi->hw;
@@ -2249,35 +2237,27 @@ ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
hw->max_children[parent->tx_sched_layer])
return -ENOSPC;
- buf_len = struct_size(buf, teid, 1);
- buf = kzalloc(buf_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
for (i = 0; i < num_items; i++) {
node = ice_sched_find_node_by_teid(pi->root, list[i]);
if (!node) {
status = -EINVAL;
- goto move_err_exit;
+ break;
}
buf->hdr.src_parent_teid = node->info.parent_teid;
buf->hdr.dest_parent_teid = parent->info.node_teid;
buf->teid[0] = node->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1);
- status = ice_aq_move_sched_elems(hw, 1, buf, buf_len,
- &grps_movd, NULL);
+ status = ice_aq_move_sched_elems(hw, buf, buf_len, &grps_movd);
if (status && grps_movd != 1) {
status = -EIO;
- goto move_err_exit;
+ break;
}
/* update the SW DB */
ice_sched_update_parent(parent, node);
}
-move_err_exit:
- kfree(buf);
return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 0055d9330c07..1aef05ea5a57 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -161,10 +161,8 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
u16 *num_nodes_added);
void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
void ice_sched_replay_agg(struct ice_hw *hw);
-int
-ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
- struct ice_aqc_move_elem *buf, u16 buf_size,
- u16 *grps_movd, struct ice_sq_cd *cd);
+int ice_aq_move_sched_elems(struct ice_hw *hw, struct ice_aqc_move_elem *buf,
+ u16 buf_size, u16 *grps_movd);
int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 31314e7540f8..2a5e6616cc0a 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -64,7 +64,7 @@ static void ice_free_vf_res(struct ice_vf *vf)
vf->num_mac = 0;
}
- last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1;
+ last_vector_idx = vf->first_vector_idx + vf->num_msix - 1;
/* clear VF MDD event information */
memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
@@ -102,7 +102,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
first = vf->first_vector_idx;
- last = first + pf->vfs.num_msix_per - 1;
+ last = first + vf->num_msix - 1;
for (v = first; v <= last; v++) {
u32 reg;
@@ -138,6 +138,8 @@ static int ice_sriov_free_msix_res(struct ice_pf *pf)
if (!pf)
return -EINVAL;
+ bitmap_free(pf->sriov_irq_bm);
+ pf->sriov_irq_size = 0;
pf->sriov_base_vector = 0;
return 0;
@@ -244,22 +246,6 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
return vsi;
}
-/**
- * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
- * @pf: pointer to PF structure
- * @vf: pointer to VF that the first MSIX vector index is being calculated for
- *
- * This returns the first MSIX vector index in PF space that is used by this VF.
- * This index is used when accessing PF relative registers such as
- * GLINT_VECT2FUNC and GLINT_DYN_CTL.
- * This will always be the OICR index in the AVF driver so any functionality
- * using vf->first_vector_idx for queue configuration will have to increment by
- * 1 to avoid meddling with the OICR index.
- */
-static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
-{
- return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per;
-}
/**
* ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
@@ -280,12 +266,12 @@ static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
hw = &pf->hw;
pf_based_first_msix = vf->first_vector_idx;
- pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1;
+ pf_based_last_msix = (pf_based_first_msix + vf->num_msix) - 1;
device_based_first_msix = pf_based_first_msix +
pf->hw.func_caps.common_cap.msix_vector_first_id;
device_based_last_msix =
- (device_based_first_msix + pf->vfs.num_msix_per) - 1;
+ (device_based_first_msix + vf->num_msix) - 1;
device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
@@ -527,6 +513,52 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
}
/**
+ * ice_sriov_get_irqs - get irqs for SR-IOV usacase
+ * @pf: pointer to PF structure
+ * @needed: number of irqs to get
+ *
+ * This returns the first MSI-X vector index in PF space that is used by this
+ * VF. This index is used when accessing PF relative registers such as
+ * GLINT_VECT2FUNC and GLINT_DYN_CTL.
+ * This will always be the OICR index in the AVF driver so any functionality
+ * using vf->first_vector_idx for queue configuration_id: id of VF which will
+ * use this irqs
+ *
+ * Only SRIOV specific vectors are tracked in sriov_irq_bm. SRIOV vectors are
+ * allocated from the end of global irq index. First bit in sriov_irq_bm means
+ * last irq index etc. It simplifies extension of SRIOV vectors.
+ * They will be always located from sriov_base_vector to the last irq
+ * index. While increasing/decreasing sriov_base_vector can be moved.
+ */
+static int ice_sriov_get_irqs(struct ice_pf *pf, u16 needed)
+{
+ int res = bitmap_find_next_zero_area(pf->sriov_irq_bm,
+ pf->sriov_irq_size, 0, needed, 0);
+ /* conversion from number in bitmap to global irq index */
+ int index = pf->sriov_irq_size - res - needed;
+
+ if (res >= pf->sriov_irq_size || index < pf->sriov_base_vector)
+ return -ENOENT;
+
+ bitmap_set(pf->sriov_irq_bm, res, needed);
+ return index;
+}
+
+/**
+ * ice_sriov_free_irqs - free irqs used by the VF
+ * @pf: pointer to PF structure
+ * @vf: pointer to VF structure
+ */
+static void ice_sriov_free_irqs(struct ice_pf *pf, struct ice_vf *vf)
+{
+ /* Move back from first vector index to first index in bitmap */
+ int bm_i = pf->sriov_irq_size - vf->first_vector_idx - vf->num_msix;
+
+ bitmap_clear(pf->sriov_irq_bm, bm_i, vf->num_msix);
+ vf->first_vector_idx = 0;
+}
+
+/**
* ice_init_vf_vsi_res - initialize/setup VF VSI resources
* @vf: VF to initialize/setup the VSI for
*
@@ -539,7 +571,9 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf)
struct ice_vsi *vsi;
int err;
- vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
+ vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+ if (vf->first_vector_idx < 0)
+ return -ENOMEM;
vsi = ice_vf_vsi_setup(vf);
if (!vsi)
@@ -789,14 +823,19 @@ static const struct ice_vf_ops ice_sriov_vf_ops = {
*/
static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
{
+ struct pci_dev *pdev = pf->pdev;
struct ice_vfs *vfs = &pf->vfs;
+ struct pci_dev *vfdev = NULL;
struct ice_vf *vf;
- u16 vf_id;
- int err;
+ u16 vf_pdev_id;
+ int err, pos;
lockdep_assert_held(&vfs->table_lock);
- for (vf_id = 0; vf_id < num_vfs; vf_id++) {
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_pdev_id);
+
+ for (u16 vf_id = 0; vf_id < num_vfs; vf_id++) {
vf = kzalloc(sizeof(*vf), GFP_KERNEL);
if (!vf) {
err = -ENOMEM;
@@ -812,11 +851,28 @@ static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
ice_initialize_vf_entry(vf);
+ do {
+ vfdev = pci_get_device(pdev->vendor, vf_pdev_id, vfdev);
+ } while (vfdev && vfdev->physfn != pdev);
+ vf->vfdev = vfdev;
vf->vf_sw_id = pf->first_sw;
+ pci_dev_get(vfdev);
+
+ /* set default number of MSI-X */
+ vf->num_msix = pf->vfs.num_msix_per;
+ vf->num_vf_qs = pf->vfs.num_qps_per;
+ ice_vc_set_default_allowlist(vf);
+
hash_add_rcu(vfs->table, &vf->entry, vf_id);
}
+ /* Decrement of refcount done by pci_get_device() inside the loop does
+ * not touch the last iteration's vfdev, so it has to be done manually
+ * to balance pci_dev_get() added within the loop.
+ */
+ pci_dev_put(vfdev);
+
return 0;
err_free_entries:
@@ -831,10 +887,16 @@ err_free_entries:
*/
static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
{
+ int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int ret;
+ pf->sriov_irq_bm = bitmap_zalloc(total_vectors, GFP_KERNEL);
+ if (!pf->sriov_irq_bm)
+ return -ENOMEM;
+ pf->sriov_irq_size = total_vectors;
+
/* Disable global interrupt 0 so we don't try to handle the VFLR. */
wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
@@ -893,6 +955,7 @@ err_unroll_intr:
/* rearm interrupts here */
ice_irq_dynamic_ena(hw, NULL, NULL);
clear_bit(ICE_OICR_INTR_DIS, pf->state);
+ bitmap_free(pf->sriov_irq_bm);
return ret;
}
@@ -957,6 +1020,175 @@ static int ice_check_sriov_allowed(struct ice_pf *pf)
}
/**
+ * ice_sriov_get_vf_total_msix - return number of MSI-X used by VFs
+ * @pdev: pointer to pci_dev struct
+ *
+ * The function is called via sysfs ops
+ */
+u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+
+ return pf->sriov_irq_size - ice_get_max_used_msix_vector(pf);
+}
+
+static int ice_sriov_move_base_vector(struct ice_pf *pf, int move)
+{
+ if (pf->sriov_base_vector - move < ice_get_max_used_msix_vector(pf))
+ return -ENOMEM;
+
+ pf->sriov_base_vector -= move;
+ return 0;
+}
+
+static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
+{
+ u16 vf_ids[ICE_MAX_SRIOV_VFS];
+ struct ice_vf *tmp_vf;
+ int to_remap = 0, bkt;
+
+ /* For better irqs usage try to remap irqs of VFs
+ * that aren't running yet
+ */
+ ice_for_each_vf(pf, bkt, tmp_vf) {
+ /* skip VF which is changing the number of MSI-X */
+ if (restricted_id == tmp_vf->vf_id ||
+ test_bit(ICE_VF_STATE_ACTIVE, tmp_vf->vf_states))
+ continue;
+
+ ice_dis_vf_mappings(tmp_vf);
+ ice_sriov_free_irqs(pf, tmp_vf);
+
+ vf_ids[to_remap] = tmp_vf->vf_id;
+ to_remap += 1;
+ }
+
+ for (int i = 0; i < to_remap; i++) {
+ tmp_vf = ice_get_vf_by_id(pf, vf_ids[i]);
+ if (!tmp_vf)
+ continue;
+
+ tmp_vf->first_vector_idx =
+ ice_sriov_get_irqs(pf, tmp_vf->num_msix);
+ /* there is no need to rebuild VSI as we are only changing the
+ * vector indexes not amount of MSI-X or queues
+ */
+ ice_ena_vf_mappings(tmp_vf);
+ ice_put_vf(tmp_vf);
+ }
+}
+
+/**
+ * ice_sriov_set_msix_vec_count
+ * @vf_dev: pointer to pci_dev struct of VF device
+ * @msix_vec_count: new value for MSI-X amount on this VF
+ *
+ * Set requested MSI-X, queues and registers for @vf_dev.
+ *
+ * First do some sanity checks like if there are any VFs, if the new value
+ * is correct etc. Then disable old mapping (MSI-X and queues registers), change
+ * MSI-X and queues, rebuild VSI and enable new mapping.
+ *
+ * If it is possible (driver not binded to VF) try to remap also other VFs to
+ * linearize irqs register usage.
+ */
+int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
+{
+ struct pci_dev *pdev = pci_physfn(vf_dev);
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+ u16 prev_msix, prev_queues, queues;
+ bool needs_rebuild = false;
+ struct ice_vf *vf;
+ int id;
+
+ if (!ice_get_num_vfs(pf))
+ return -ENOENT;
+
+ if (!msix_vec_count)
+ return 0;
+
+ queues = msix_vec_count;
+ /* add 1 MSI-X for OICR */
+ msix_vec_count += 1;
+
+ if (queues > min(ice_get_avail_txq_count(pf),
+ ice_get_avail_rxq_count(pf)))
+ return -EINVAL;
+
+ if (msix_vec_count < ICE_MIN_INTR_PER_VF)
+ return -EINVAL;
+
+ /* Transition of PCI VF function number to function_id */
+ for (id = 0; id < pci_num_vf(pdev); id++) {
+ if (vf_dev->devfn == pci_iov_virtfn_devfn(pdev, id))
+ break;
+ }
+
+ if (id == pci_num_vf(pdev))
+ return -ENOENT;
+
+ vf = ice_get_vf_by_id(pf, id);
+
+ if (!vf)
+ return -ENOENT;
+
+ prev_msix = vf->num_msix;
+ prev_queues = vf->num_vf_qs;
+
+ if (ice_sriov_move_base_vector(pf, msix_vec_count - prev_msix)) {
+ ice_put_vf(vf);
+ return -ENOSPC;
+ }
+
+ ice_dis_vf_mappings(vf);
+ ice_sriov_free_irqs(pf, vf);
+
+ /* Remap all VFs beside the one is now configured */
+ ice_sriov_remap_vectors(pf, vf->vf_id);
+
+ vf->num_msix = msix_vec_count;
+ vf->num_vf_qs = queues;
+ vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+ if (vf->first_vector_idx < 0)
+ goto unroll;
+
+ ice_vf_vsi_release(vf);
+ if (vf->vf_ops->create_vsi(vf)) {
+ /* Try to rebuild with previous values */
+ needs_rebuild = true;
+ goto unroll;
+ }
+
+ dev_info(ice_pf_to_dev(pf),
+ "Changing VF %d resources to %d vectors and %d queues\n",
+ vf->vf_id, vf->num_msix, vf->num_vf_qs);
+
+ ice_ena_vf_mappings(vf);
+ ice_put_vf(vf);
+
+ return 0;
+
+unroll:
+ dev_info(ice_pf_to_dev(pf),
+ "Can't set %d vectors on VF %d, falling back to %d\n",
+ vf->num_msix, vf->vf_id, prev_msix);
+
+ vf->num_msix = prev_msix;
+ vf->num_vf_qs = prev_queues;
+ vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+ if (vf->first_vector_idx < 0)
+ return -EINVAL;
+
+ if (needs_rebuild)
+ vf->vf_ops->create_vsi(vf);
+
+ ice_ena_vf_mappings(vf);
+ ice_put_vf(vf);
+
+ return -EINVAL;
+}
+
+/**
* ice_sriov_configure - Enable or change number of VFs via sysfs
* @pdev: pointer to a pci_dev structure
* @num_vfs: number of VFs to allocate or 0 to free VFs
@@ -1709,31 +1941,16 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf)
/**
* ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
- * @pdev: pointer to a pci_dev structure
+ * @pf: pointer to the PF structure
*
* Called when recovering from a PF FLR to restore interrupt capability to
* the VFs.
*/
-void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
+void ice_restore_all_vfs_msi_state(struct ice_pf *pf)
{
- u16 vf_id;
- int pos;
-
- if (!pci_num_vf(pdev))
- return;
+ struct ice_vf *vf;
+ u32 bkt;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
- if (pos) {
- struct pci_dev *vfdev;
-
- pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
- &vf_id);
- vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
- while (vfdev) {
- if (vfdev->is_virtfn && vfdev->physfn == pdev)
- pci_restore_msi_state(vfdev);
- vfdev = pci_get_device(pdev->vendor, vf_id,
- vfdev);
- }
- }
+ ice_for_each_vf(pf, bkt, vf)
+ pci_restore_msi_state(vf->vfdev);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
index 346cb2666f3a..8488df38b586 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.h
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -33,7 +33,7 @@ int
ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
void ice_free_vfs(struct ice_pf *pf);
-void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
+void ice_restore_all_vfs_msi_state(struct ice_pf *pf);
int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
@@ -60,6 +60,8 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf);
void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
bool
ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto);
+u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev);
+int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count);
#else /* CONFIG_PCI_IOV */
static inline void ice_process_vflr_event(struct ice_pf *pf) { }
static inline void ice_free_vfs(struct ice_pf *pf) { }
@@ -67,7 +69,7 @@ static inline
void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
-static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { }
+static inline void ice_restore_all_vfs_msi_state(struct ice_pf *pf) { }
static inline int
ice_sriov_configure(struct pci_dev __always_unused *pdev,
@@ -142,5 +144,16 @@ ice_get_vf_stats(struct net_device __always_unused *netdev,
{
return -EOPNOTSUPP;
}
+
+static inline u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev)
+{
+ return 0;
+}
+
+static inline int
+ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_SRIOV_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 2f77b684ff76..ee19f3aa3d19 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -1812,15 +1812,11 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
enum ice_sw_lkup_type lkup_type,
enum ice_adminq_opc opc)
{
- struct ice_aqc_alloc_free_res_elem *sw_buf;
+ DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
+ u16 buf_len = __struct_size(sw_buf);
struct ice_aqc_res_elem *vsi_ele;
- u16 buf_len;
int status;
- buf_len = struct_size(sw_buf, elem, 1);
- sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
- if (!sw_buf)
- return -ENOMEM;
sw_buf->num_elems = cpu_to_le16(1);
if (lkup_type == ICE_SW_LKUP_MAC ||
@@ -1840,8 +1836,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
sw_buf->res_type =
cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
} else {
- status = -EINVAL;
- goto ice_aq_alloc_free_vsi_list_exit;
+ return -EINVAL;
}
if (opc == ice_aqc_opc_free_res)
@@ -1849,16 +1844,14 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, opc);
if (status)
- goto ice_aq_alloc_free_vsi_list_exit;
+ return status;
if (opc == ice_aqc_opc_alloc_res) {
vsi_ele = &sw_buf->elem[0];
*vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
}
-ice_aq_alloc_free_vsi_list_exit:
- devm_kfree(ice_hw_to_dev(hw), sw_buf);
- return status;
+ return 0;
}
/**
@@ -2088,15 +2081,10 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
*/
int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
{
- struct ice_aqc_alloc_free_res_elem *sw_buf;
- u16 buf_len;
+ DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
+ u16 buf_len = __struct_size(sw_buf);
int status;
- buf_len = struct_size(sw_buf, elem, 1);
- sw_buf = kzalloc(buf_len, GFP_KERNEL);
- if (!sw_buf)
- return -ENOMEM;
-
sw_buf->num_elems = cpu_to_le16(1);
sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
ICE_AQC_RES_TYPE_S) |
@@ -2105,7 +2093,6 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
ice_aqc_opc_alloc_res);
if (!status)
*rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
- kfree(sw_buf);
return status;
}
@@ -4434,28 +4421,19 @@ int
ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 *counter_id)
{
- struct ice_aqc_alloc_free_res_elem *buf;
- u16 buf_len;
+ DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ u16 buf_len = __struct_size(buf);
int status;
- /* Allocate resource */
- buf_len = struct_size(buf, elem, 1);
- buf = kzalloc(buf_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
buf->num_elems = cpu_to_le16(num_items);
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
ICE_AQC_RES_TYPE_M) | alloc_shared);
status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res);
if (status)
- goto exit;
+ return status;
*counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
-
-exit:
- kfree(buf);
return status;
}
@@ -4471,16 +4449,10 @@ int
ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id)
{
- struct ice_aqc_alloc_free_res_elem *buf;
- u16 buf_len;
+ DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ u16 buf_len = __struct_size(buf);
int status;
- /* Free resource */
- buf_len = struct_size(buf, elem, 1);
- buf = kzalloc(buf_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
buf->num_elems = cpu_to_le16(num_items);
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
ICE_AQC_RES_TYPE_M) | alloc_shared);
@@ -4490,7 +4462,6 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
if (status)
ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
- kfree(buf);
return status;
}
@@ -4508,15 +4479,10 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
*/
int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id)
{
- struct ice_aqc_alloc_free_res_elem *buf;
- u16 buf_len;
+ DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ u16 buf_len = __struct_size(buf);
int status;
- buf_len = struct_size(buf, elem, 1);
- buf = kzalloc(buf_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
buf->num_elems = cpu_to_le16(1);
if (shared)
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
@@ -4534,7 +4500,6 @@ int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id)
ice_debug(hw, ICE_DBG_SW, "Could not set resource type %u id %u to %s\n",
type, res_id, shared ? "SHARED" : "DEDICATED");
- kfree(buf);
return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index c8322fb6f2b3..7e06373e14d9 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -450,7 +450,7 @@ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res,
struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[first_idx];
if (xdp_res & ICE_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_res & ICE_XDP_TX) {
if (static_branch_unlikely(&ice_xdp_locking_key))
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 5e353b0cbe6f..a18ca0ff879f 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018, Intel Corporation. */
+/* Copyright (c) 2018-2023, Intel Corporation. */
#ifndef _ICE_TYPE_H_
#define _ICE_TYPE_H_
@@ -129,6 +129,7 @@ enum ice_set_fc_aq_failures {
enum ice_mac_type {
ICE_MAC_UNKNOWN = 0,
ICE_MAC_E810,
+ ICE_MAC_E830,
ICE_MAC_GENERIC,
};
@@ -822,6 +823,13 @@ struct ice_mbx_data {
u16 async_watermark_val;
};
+/* PHY model */
+enum ice_phy_model {
+ ICE_PHY_UNSUP = -1,
+ ICE_PHY_E810 = 1,
+ ICE_PHY_E822,
+};
+
/* Port hardware description */
struct ice_hw {
u8 __iomem *hw_addr;
@@ -843,6 +851,7 @@ struct ice_hw {
u8 revision_id;
u8 pf_id; /* device profile info */
+ enum ice_phy_model phy_model;
u16 max_burst_size; /* driver sets this value */
@@ -901,17 +910,20 @@ struct ice_hw {
/* INTRL granularity in 1 us */
u8 intrl_gran;
-#define ICE_PHY_PER_NAC 1
-#define ICE_MAX_QUAD 2
-#define ICE_NUM_QUAD_TYPE 2
-#define ICE_PORTS_PER_QUAD 4
-#define ICE_PHY_0_LAST_QUAD 1
-#define ICE_PORTS_PER_PHY 8
-#define ICE_NUM_EXTERNAL_PORTS ICE_PORTS_PER_PHY
+#define ICE_PHY_PER_NAC_E822 1
+#define ICE_MAX_QUAD 2
+#define ICE_QUADS_PER_PHY_E822 2
+#define ICE_PORTS_PER_PHY_E822 8
+#define ICE_PORTS_PER_QUAD 4
+#define ICE_PORTS_PER_PHY_E810 4
+#define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD)
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
+ u32 pkg_seg_id;
+ u32 pkg_sign_type;
u32 active_track_id;
+ u8 pkg_has_signing_seg:1;
u8 active_pkg_name[ICE_PKG_NAME_SIZE];
u8 active_pkg_in_nvm;
@@ -965,6 +977,7 @@ struct ice_hw {
DECLARE_BITMAP(hw_ptype, ICE_FLOW_PTYPE_MAX);
u8 dvm_ena;
u16 io_expander_handle;
+ u8 cgu_part_number;
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 24e4f4d897b6..aca1f2ea5034 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -56,6 +56,8 @@ static void ice_release_vf(struct kref *ref)
{
struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt);
+ pci_dev_put(vf->vfdev);
+
vf->vf_ops->free(vf);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 48fea6fa0362..93c774f2f437 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -72,7 +72,7 @@ struct ice_vfs {
struct mutex table_lock; /* Lock for protecting the hash table */
u16 num_supported; /* max supported VFs on this PF */
u16 num_qps_per; /* number of queue pairs per VF */
- u16 num_msix_per; /* number of MSI-X vectors per VF */
+ u16 num_msix_per; /* default MSI-X vectors per VF */
unsigned long last_printed_mdd_jiffies; /* MDD message rate limit */
};
@@ -82,7 +82,7 @@ struct ice_vf {
struct rcu_head rcu;
struct kref refcnt;
struct ice_pf *pf;
-
+ struct pci_dev *vfdev;
/* Used during virtchnl message handling and NDO ops against the VF
* that will trigger a VFR
*/
@@ -123,6 +123,9 @@ struct ice_vf {
u8 num_req_qs; /* num of queue pairs requested by VF */
u16 num_mac;
u16 num_vf_qs; /* num of queue configured per VF */
+ u8 vlan_strip_ena; /* Outer and Inner VLAN strip enable */
+#define ICE_INNER_VLAN_STRIP_ENA BIT(0)
+#define ICE_OUTER_VLAN_STRIP_ENA BIT(1)
struct ice_mdd_vf_events mdd_rx_events;
struct ice_mdd_vf_events mdd_tx_events;
DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
@@ -133,6 +136,8 @@ struct ice_vf {
/* devlink port data */
struct devlink_port devlink_port;
+
+ u16 num_msix; /* num of MSI-X configured on this VF */
};
/* Flags for controlling behavior of ice_reset_vf */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index db97353efd06..cdf17b1e2f25 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -486,6 +486,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_CRC;
+
if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
@@ -498,7 +501,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->num_vsis = 1;
/* Tx and Rx queue are equal for VF */
vfres->num_queue_pairs = vsi->num_txq;
- vfres->max_vectors = vf->pf->vfs.num_msix_per;
+ vfres->max_vectors = vf->num_msix;
vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
vfres->rss_lut_size = ICE_LUT_VSI_SIZE;
vfres->max_mtu = ice_vc_get_max_frame_size(vf);
@@ -1621,6 +1624,15 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
for (i = 0; i < qci->num_queue_pairs; i++) {
+ if (!qci->qpair[i].rxq.crc_disable)
+ continue;
+
+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) ||
+ vf->vlan_strip_ena)
+ goto error_param;
+ }
+
+ for (i = 0; i < qci->num_queue_pairs; i++) {
qpi = &qci->qpair[i];
if (qpi->txq.vsi_id != qci->vsi_id ||
qpi->rxq.vsi_id != qci->vsi_id ||
@@ -1666,6 +1678,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
+ if (qpi->rxq.crc_disable)
+ vsi->rx_rings[q_idx]->flags |=
+ ICE_RX_FLAGS_CRC_STRIP_DIS;
+ else
+ vsi->rx_rings[q_idx]->flags &=
+ ~ICE_RX_FLAGS_CRC_STRIP_DIS;
+
if (qpi->rxq.databuffer_size != 0 &&
(qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
qpi->rxq.databuffer_size < 1024))
@@ -2411,6 +2430,21 @@ static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
}
/**
+ * ice_vsi_is_rxq_crc_strip_dis - check if Rx queue CRC strip is disabled or not
+ * @vsi: pointer to the VF VSI info
+ */
+static bool ice_vsi_is_rxq_crc_strip_dis(struct ice_vsi *vsi)
+{
+ unsigned int i;
+
+ ice_for_each_alloc_rxq(vsi, i)
+ if (vsi->rx_rings[i]->flags & ICE_RX_FLAGS_CRC_STRIP_DIS)
+ return true;
+
+ return false;
+}
+
+/**
* ice_vc_ena_vlan_stripping
* @vf: pointer to the VF info
*
@@ -2439,6 +2473,8 @@ static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q))
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ else
+ vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA;
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
@@ -2474,6 +2510,8 @@ static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
if (vsi->inner_vlan_ops.dis_stripping(vsi))
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ else
+ vf->vlan_strip_ena &= ~ICE_INNER_VLAN_STRIP_ENA;
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
@@ -2651,6 +2689,8 @@ static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
{
struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ vf->vlan_strip_ena = 0;
+
if (!vsi)
return -EINVAL;
@@ -2660,10 +2700,16 @@ static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw))
return 0;
- if (ice_vf_vlan_offload_ena(vf->driver_caps))
- return vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
- else
- return vsi->inner_vlan_ops.dis_stripping(vsi);
+ if (ice_vf_vlan_offload_ena(vf->driver_caps)) {
+ int err;
+
+ err = vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
+ if (!err)
+ vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA;
+ return err;
+ }
+
+ return vsi->inner_vlan_ops.dis_stripping(vsi);
}
static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf)
@@ -3437,6 +3483,11 @@ static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
goto out;
}
+ if (ice_vsi_is_rxq_crc_strip_dis(vsi)) {
+ v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ goto out;
+ }
+
ethertype_setting = strip_msg->outer_ethertype_setting;
if (ethertype_setting) {
if (ice_vc_ena_vlan_offload(vsi,
@@ -3457,6 +3508,8 @@ static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
* enabled, is extracted in L2TAG1.
*/
ice_vsi_update_l2tsel(vsi, l2tsel);
+
+ vf->vlan_strip_ena |= ICE_OUTER_VLAN_STRIP_ENA;
}
}
@@ -3468,6 +3521,9 @@ static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
goto out;
}
+ if (ethertype_setting)
+ vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA;
+
out:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2,
v_ret, NULL, 0);
@@ -3529,6 +3585,8 @@ static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
* in L2TAG1.
*/
ice_vsi_update_l2tsel(vsi, l2tsel);
+
+ vf->vlan_strip_ena &= ~ICE_OUTER_VLAN_STRIP_ENA;
}
}
@@ -3538,6 +3596,9 @@ static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
goto out;
}
+ if (ethertype_setting)
+ vf->vlan_strip_ena &= ~ICE_INNER_VLAN_STRIP_ENA;
+
out:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2,
v_ret, NULL, 0);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index daa6a1e894cf..24b23b7ef04a 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2021, Intel Corporation. */
+/* Copyright (C) 2021-2023, Intel Corporation. */
#include "ice.h"
#include "ice_base.h"
@@ -1422,8 +1422,8 @@ ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
*/
static void ice_vf_fdir_dump_info(struct ice_vf *vf)
{
+ u32 fd_size, fd_cnt, fd_size_g, fd_cnt_g, fd_size_b, fd_cnt_b;
struct ice_vsi *vf_vsi;
- u32 fd_size, fd_cnt;
struct device *dev;
struct ice_pf *pf;
struct ice_hw *hw;
@@ -1442,12 +1442,25 @@ static void ice_vf_fdir_dump_info(struct ice_vf *vf)
fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
- dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x\n",
- vf->vf_id,
- (fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
- (fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S,
- (fd_cnt & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
- (fd_cnt & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S);
+ switch (hw->mac_type) {
+ case ICE_MAC_E830:
+ fd_size_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
+ fd_size_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
+ fd_cnt_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
+ fd_cnt_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
+ break;
+ case ICE_MAC_E810:
+ default:
+ fd_size_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
+ fd_size_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
+ fd_cnt_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
+ fd_cnt_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
+ }
+
+ dev_dbg(dev, "VF %d: Size in the FD table: guaranteed:0x%x, best effort:0x%x\n",
+ vf->vf_id, fd_size_g, fd_size_b);
+ dev_dbg(dev, "VF %d: Filter counter in the FD table: guaranteed:0x%x, best effort:0x%x\n",
+ vf->vf_id, fd_cnt_g, fd_cnt_b);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 2a3f0834e139..99954508184f 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -217,21 +217,16 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
*/
static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
{
- struct ice_aqc_add_tx_qgrp *qg_buf;
+ DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
+ u16 size = __struct_size(qg_buf);
struct ice_q_vector *q_vector;
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
- u16 size;
int err;
if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
return -EINVAL;
- size = struct_size(qg_buf, txqs, 1);
- qg_buf = kzalloc(size, GFP_KERNEL);
- if (!qg_buf)
- return -ENOMEM;
-
qg_buf->num_txqs = 1;
tx_ring = vsi->tx_rings[q_idx];
@@ -240,7 +235,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
if (err)
- goto free_buf;
+ return err;
if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
@@ -249,29 +244,28 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
qg_buf->num_txqs = 1;
err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
if (err)
- goto free_buf;
+ return err;
ice_set_ring_xdp(xdp_ring);
ice_tx_xsk_pool(vsi, q_idx);
}
err = ice_vsi_cfg_rxq(rx_ring);
if (err)
- goto free_buf;
+ return err;
ice_qvec_cfg_msix(vsi, q_vector);
err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
if (err)
- goto free_buf;
+ return err;
clear_bit(ICE_CFG_BUSY, vsi->state);
ice_qvec_toggle_napi(vsi, q_vector, true);
ice_qvec_ena_irq(vsi, q_vector);
netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
-free_buf:
- kfree(qg_buf);
- return err;
+
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile
new file mode 100644
index 000000000000..6844ead2f3ac
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2023 Intel Corporation
+
+# Makefile for Intel(R) Infrastructure Data Path Function Linux Driver
+
+obj-$(CONFIG_IDPF) += idpf.o
+
+idpf-y := \
+ idpf_controlq.o \
+ idpf_controlq_setup.o \
+ idpf_dev.o \
+ idpf_ethtool.o \
+ idpf_lib.o \
+ idpf_main.o \
+ idpf_singleq_txrx.o \
+ idpf_txrx.o \
+ idpf_virtchnl.o \
+ idpf_vf_dev.o
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
new file mode 100644
index 000000000000..bee73353b56a
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -0,0 +1,968 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef _IDPF_H_
+#define _IDPF_H_
+
+/* Forward declaration */
+struct idpf_adapter;
+struct idpf_vport;
+struct idpf_vport_max_q;
+
+#include <net/pkt_sched.h>
+#include <linux/aer.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include <linux/bitfield.h>
+#include <linux/sctp.h>
+#include <linux/ethtool.h>
+#include <net/gro.h>
+#include <linux/dim.h>
+
+#include "virtchnl2.h"
+#include "idpf_lan_txrx.h"
+#include "idpf_txrx.h"
+#include "idpf_controlq.h"
+
+#define GETMAXVAL(num_bits) GENMASK((num_bits) - 1, 0)
+
+#define IDPF_NO_FREE_SLOT 0xffff
+
+/* Default Mailbox settings */
+#define IDPF_NUM_FILTERS_PER_MSG 20
+#define IDPF_NUM_DFLT_MBX_Q 2 /* includes both TX and RX */
+#define IDPF_DFLT_MBX_Q_LEN 64
+#define IDPF_DFLT_MBX_ID -1
+/* maximum number of times to try before resetting mailbox */
+#define IDPF_MB_MAX_ERR 20
+#define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz) \
+ ((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz))
+#define IDPF_WAIT_FOR_EVENT_TIMEO_MIN 2000
+#define IDPF_WAIT_FOR_EVENT_TIMEO 60000
+
+#define IDPF_MAX_WAIT 500
+
+/* available message levels */
+#define IDPF_AVAIL_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+
+#define IDPF_DIM_PROFILE_SLOTS 5
+
+#define IDPF_VIRTCHNL_VERSION_MAJOR VIRTCHNL2_VERSION_MAJOR_2
+#define IDPF_VIRTCHNL_VERSION_MINOR VIRTCHNL2_VERSION_MINOR_0
+
+/**
+ * struct idpf_mac_filter
+ * @list: list member field
+ * @macaddr: MAC address
+ * @remove: filter should be removed (virtchnl)
+ * @add: filter should be added (virtchnl)
+ */
+struct idpf_mac_filter {
+ struct list_head list;
+ u8 macaddr[ETH_ALEN];
+ bool remove;
+ bool add;
+};
+
+/**
+ * enum idpf_state - State machine to handle bring up
+ * @__IDPF_STARTUP: Start the state machine
+ * @__IDPF_VER_CHECK: Negotiate virtchnl version
+ * @__IDPF_GET_CAPS: Negotiate capabilities
+ * @__IDPF_INIT_SW: Init based on given capabilities
+ * @__IDPF_STATE_LAST: Must be last, used to determine size
+ */
+enum idpf_state {
+ __IDPF_STARTUP,
+ __IDPF_VER_CHECK,
+ __IDPF_GET_CAPS,
+ __IDPF_INIT_SW,
+ __IDPF_STATE_LAST,
+};
+
+/**
+ * enum idpf_flags - Hard reset causes.
+ * @IDPF_HR_FUNC_RESET: Hard reset when TxRx timeout
+ * @IDPF_HR_DRV_LOAD: Set on driver load for a clean HW
+ * @IDPF_HR_RESET_IN_PROG: Reset in progress
+ * @IDPF_REMOVE_IN_PROG: Driver remove in progress
+ * @IDPF_MB_INTR_MODE: Mailbox in interrupt mode
+ * @IDPF_FLAGS_NBITS: Must be last
+ */
+enum idpf_flags {
+ IDPF_HR_FUNC_RESET,
+ IDPF_HR_DRV_LOAD,
+ IDPF_HR_RESET_IN_PROG,
+ IDPF_REMOVE_IN_PROG,
+ IDPF_MB_INTR_MODE,
+ IDPF_FLAGS_NBITS,
+};
+
+/**
+ * enum idpf_cap_field - Offsets into capabilities struct for specific caps
+ * @IDPF_BASE_CAPS: generic base capabilities
+ * @IDPF_CSUM_CAPS: checksum offload capabilities
+ * @IDPF_SEG_CAPS: segmentation offload capabilities
+ * @IDPF_RSS_CAPS: RSS offload capabilities
+ * @IDPF_HSPLIT_CAPS: Header split capabilities
+ * @IDPF_RSC_CAPS: RSC offload capabilities
+ * @IDPF_OTHER_CAPS: miscellaneous offloads
+ *
+ * Used when checking for a specific capability flag since different capability
+ * sets are not mutually exclusive numerically, the caller must specify which
+ * type of capability they are checking for.
+ */
+enum idpf_cap_field {
+ IDPF_BASE_CAPS = -1,
+ IDPF_CSUM_CAPS = offsetof(struct virtchnl2_get_capabilities,
+ csum_caps),
+ IDPF_SEG_CAPS = offsetof(struct virtchnl2_get_capabilities,
+ seg_caps),
+ IDPF_RSS_CAPS = offsetof(struct virtchnl2_get_capabilities,
+ rss_caps),
+ IDPF_HSPLIT_CAPS = offsetof(struct virtchnl2_get_capabilities,
+ hsplit_caps),
+ IDPF_RSC_CAPS = offsetof(struct virtchnl2_get_capabilities,
+ rsc_caps),
+ IDPF_OTHER_CAPS = offsetof(struct virtchnl2_get_capabilities,
+ other_caps),
+};
+
+/**
+ * enum idpf_vport_state - Current vport state
+ * @__IDPF_VPORT_DOWN: Vport is down
+ * @__IDPF_VPORT_UP: Vport is up
+ * @__IDPF_VPORT_STATE_LAST: Must be last, number of states
+ */
+enum idpf_vport_state {
+ __IDPF_VPORT_DOWN,
+ __IDPF_VPORT_UP,
+ __IDPF_VPORT_STATE_LAST,
+};
+
+/**
+ * struct idpf_netdev_priv - Struct to store vport back pointer
+ * @adapter: Adapter back pointer
+ * @vport: Vport back pointer
+ * @vport_id: Vport identifier
+ * @vport_idx: Relative vport index
+ * @state: See enum idpf_vport_state
+ * @netstats: Packet and byte stats
+ * @stats_lock: Lock to protect stats update
+ */
+struct idpf_netdev_priv {
+ struct idpf_adapter *adapter;
+ struct idpf_vport *vport;
+ u32 vport_id;
+ u16 vport_idx;
+ enum idpf_vport_state state;
+ struct rtnl_link_stats64 netstats;
+ spinlock_t stats_lock;
+};
+
+/**
+ * struct idpf_reset_reg - Reset register offsets/masks
+ * @rstat: Reset status register
+ * @rstat_m: Reset status mask
+ */
+struct idpf_reset_reg {
+ void __iomem *rstat;
+ u32 rstat_m;
+};
+
+/**
+ * struct idpf_vport_max_q - Queue limits
+ * @max_rxq: Maximum number of RX queues supported
+ * @max_txq: Maixmum number of TX queues supported
+ * @max_bufq: In splitq, maximum number of buffer queues supported
+ * @max_complq: In splitq, maximum number of completion queues supported
+ */
+struct idpf_vport_max_q {
+ u16 max_rxq;
+ u16 max_txq;
+ u16 max_bufq;
+ u16 max_complq;
+};
+
+/**
+ * struct idpf_reg_ops - Device specific register operation function pointers
+ * @ctlq_reg_init: Mailbox control queue register initialization
+ * @intr_reg_init: Traffic interrupt register initialization
+ * @mb_intr_reg_init: Mailbox interrupt register initialization
+ * @reset_reg_init: Reset register initialization
+ * @trigger_reset: Trigger a reset to occur
+ */
+struct idpf_reg_ops {
+ void (*ctlq_reg_init)(struct idpf_ctlq_create_info *cq);
+ int (*intr_reg_init)(struct idpf_vport *vport);
+ void (*mb_intr_reg_init)(struct idpf_adapter *adapter);
+ void (*reset_reg_init)(struct idpf_adapter *adapter);
+ void (*trigger_reset)(struct idpf_adapter *adapter,
+ enum idpf_flags trig_cause);
+};
+
+/**
+ * struct idpf_dev_ops - Device specific operations
+ * @reg_ops: Register operations
+ */
+struct idpf_dev_ops {
+ struct idpf_reg_ops reg_ops;
+};
+
+/* These macros allow us to generate an enum and a matching char * array of
+ * stringified enums that are always in sync. Checkpatch issues a bogus warning
+ * about this being a complex macro; but it's wrong, these are never used as a
+ * statement and instead only used to define the enum and array.
+ */
+#define IDPF_FOREACH_VPORT_VC_STATE(STATE) \
+ STATE(IDPF_VC_CREATE_VPORT) \
+ STATE(IDPF_VC_CREATE_VPORT_ERR) \
+ STATE(IDPF_VC_ENA_VPORT) \
+ STATE(IDPF_VC_ENA_VPORT_ERR) \
+ STATE(IDPF_VC_DIS_VPORT) \
+ STATE(IDPF_VC_DIS_VPORT_ERR) \
+ STATE(IDPF_VC_DESTROY_VPORT) \
+ STATE(IDPF_VC_DESTROY_VPORT_ERR) \
+ STATE(IDPF_VC_CONFIG_TXQ) \
+ STATE(IDPF_VC_CONFIG_TXQ_ERR) \
+ STATE(IDPF_VC_CONFIG_RXQ) \
+ STATE(IDPF_VC_CONFIG_RXQ_ERR) \
+ STATE(IDPF_VC_ENA_QUEUES) \
+ STATE(IDPF_VC_ENA_QUEUES_ERR) \
+ STATE(IDPF_VC_DIS_QUEUES) \
+ STATE(IDPF_VC_DIS_QUEUES_ERR) \
+ STATE(IDPF_VC_MAP_IRQ) \
+ STATE(IDPF_VC_MAP_IRQ_ERR) \
+ STATE(IDPF_VC_UNMAP_IRQ) \
+ STATE(IDPF_VC_UNMAP_IRQ_ERR) \
+ STATE(IDPF_VC_ADD_QUEUES) \
+ STATE(IDPF_VC_ADD_QUEUES_ERR) \
+ STATE(IDPF_VC_DEL_QUEUES) \
+ STATE(IDPF_VC_DEL_QUEUES_ERR) \
+ STATE(IDPF_VC_ALLOC_VECTORS) \
+ STATE(IDPF_VC_ALLOC_VECTORS_ERR) \
+ STATE(IDPF_VC_DEALLOC_VECTORS) \
+ STATE(IDPF_VC_DEALLOC_VECTORS_ERR) \
+ STATE(IDPF_VC_SET_SRIOV_VFS) \
+ STATE(IDPF_VC_SET_SRIOV_VFS_ERR) \
+ STATE(IDPF_VC_GET_RSS_LUT) \
+ STATE(IDPF_VC_GET_RSS_LUT_ERR) \
+ STATE(IDPF_VC_SET_RSS_LUT) \
+ STATE(IDPF_VC_SET_RSS_LUT_ERR) \
+ STATE(IDPF_VC_GET_RSS_KEY) \
+ STATE(IDPF_VC_GET_RSS_KEY_ERR) \
+ STATE(IDPF_VC_SET_RSS_KEY) \
+ STATE(IDPF_VC_SET_RSS_KEY_ERR) \
+ STATE(IDPF_VC_GET_STATS) \
+ STATE(IDPF_VC_GET_STATS_ERR) \
+ STATE(IDPF_VC_ADD_MAC_ADDR) \
+ STATE(IDPF_VC_ADD_MAC_ADDR_ERR) \
+ STATE(IDPF_VC_DEL_MAC_ADDR) \
+ STATE(IDPF_VC_DEL_MAC_ADDR_ERR) \
+ STATE(IDPF_VC_GET_PTYPE_INFO) \
+ STATE(IDPF_VC_GET_PTYPE_INFO_ERR) \
+ STATE(IDPF_VC_LOOPBACK_STATE) \
+ STATE(IDPF_VC_LOOPBACK_STATE_ERR) \
+ STATE(IDPF_VC_NBITS)
+
+#define IDPF_GEN_ENUM(ENUM) ENUM,
+#define IDPF_GEN_STRING(STRING) #STRING,
+
+enum idpf_vport_vc_state {
+ IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_ENUM)
+};
+
+extern const char * const idpf_vport_vc_state_str[];
+
+/**
+ * enum idpf_vport_reset_cause - Vport soft reset causes
+ * @IDPF_SR_Q_CHANGE: Soft reset queue change
+ * @IDPF_SR_Q_DESC_CHANGE: Soft reset descriptor change
+ * @IDPF_SR_MTU_CHANGE: Soft reset MTU change
+ * @IDPF_SR_RSC_CHANGE: Soft reset RSC change
+ */
+enum idpf_vport_reset_cause {
+ IDPF_SR_Q_CHANGE,
+ IDPF_SR_Q_DESC_CHANGE,
+ IDPF_SR_MTU_CHANGE,
+ IDPF_SR_RSC_CHANGE,
+};
+
+/**
+ * enum idpf_vport_flags - Vport flags
+ * @IDPF_VPORT_DEL_QUEUES: To send delete queues message
+ * @IDPF_VPORT_SW_MARKER: Indicate TX pipe drain software marker packets
+ * processing is done
+ * @IDPF_VPORT_FLAGS_NBITS: Must be last
+ */
+enum idpf_vport_flags {
+ IDPF_VPORT_DEL_QUEUES,
+ IDPF_VPORT_SW_MARKER,
+ IDPF_VPORT_FLAGS_NBITS,
+};
+
+struct idpf_port_stats {
+ struct u64_stats_sync stats_sync;
+ u64_stats_t rx_hw_csum_err;
+ u64_stats_t rx_hsplit;
+ u64_stats_t rx_hsplit_hbo;
+ u64_stats_t rx_bad_descs;
+ u64_stats_t tx_linearize;
+ u64_stats_t tx_busy;
+ u64_stats_t tx_drops;
+ u64_stats_t tx_dma_map_errs;
+ struct virtchnl2_vport_stats vport_stats;
+};
+
+/**
+ * struct idpf_vport - Handle for netdevices and queue resources
+ * @num_txq: Number of allocated TX queues
+ * @num_complq: Number of allocated completion queues
+ * @txq_desc_count: TX queue descriptor count
+ * @complq_desc_count: Completion queue descriptor count
+ * @compln_clean_budget: Work budget for completion clean
+ * @num_txq_grp: Number of TX queue groups
+ * @txq_grps: Array of TX queue groups
+ * @txq_model: Split queue or single queue queuing model
+ * @txqs: Used only in hotpath to get to the right queue very fast
+ * @crc_enable: Enable CRC insertion offload
+ * @num_rxq: Number of allocated RX queues
+ * @num_bufq: Number of allocated buffer queues
+ * @rxq_desc_count: RX queue descriptor count. *MUST* have enough descriptors
+ * to complete all buffer descriptors for all buffer queues in
+ * the worst case.
+ * @num_bufqs_per_qgrp: Buffer queues per RX queue in a given grouping
+ * @bufq_desc_count: Buffer queue descriptor count
+ * @bufq_size: Size of buffers in ring (e.g. 2K, 4K, etc)
+ * @num_rxq_grp: Number of RX queues in a group
+ * @rxq_grps: Total number of RX groups. Number of groups * number of RX per
+ * group will yield total number of RX queues.
+ * @rxq_model: Splitq queue or single queue queuing model
+ * @rx_ptype_lkup: Lookup table for ptypes on RX
+ * @adapter: back pointer to associated adapter
+ * @netdev: Associated net_device. Each vport should have one and only one
+ * associated netdev.
+ * @flags: See enum idpf_vport_flags
+ * @vport_type: Default SRIOV, SIOV, etc.
+ * @vport_id: Device given vport identifier
+ * @idx: Software index in adapter vports struct
+ * @default_vport: Use this vport if one isn't specified
+ * @base_rxd: True if the driver should use base descriptors instead of flex
+ * @num_q_vectors: Number of IRQ vectors allocated
+ * @q_vectors: Array of queue vectors
+ * @q_vector_idxs: Starting index of queue vectors
+ * @max_mtu: device given max possible MTU
+ * @default_mac_addr: device will give a default MAC to use
+ * @rx_itr_profile: RX profiles for Dynamic Interrupt Moderation
+ * @tx_itr_profile: TX profiles for Dynamic Interrupt Moderation
+ * @port_stats: per port csum, header split, and other offload stats
+ * @link_up: True if link is up
+ * @link_speed_mbps: Link speed in mbps
+ * @vc_msg: Virtchnl message buffer
+ * @vc_state: Virtchnl message state
+ * @vchnl_wq: Wait queue for virtchnl messages
+ * @sw_marker_wq: workqueue for marker packets
+ * @vc_buf_lock: Lock to protect virtchnl buffer
+ */
+struct idpf_vport {
+ u16 num_txq;
+ u16 num_complq;
+ u32 txq_desc_count;
+ u32 complq_desc_count;
+ u32 compln_clean_budget;
+ u16 num_txq_grp;
+ struct idpf_txq_group *txq_grps;
+ u32 txq_model;
+ struct idpf_queue **txqs;
+ bool crc_enable;
+
+ u16 num_rxq;
+ u16 num_bufq;
+ u32 rxq_desc_count;
+ u8 num_bufqs_per_qgrp;
+ u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP];
+ u32 bufq_size[IDPF_MAX_BUFQS_PER_RXQ_GRP];
+ u16 num_rxq_grp;
+ struct idpf_rxq_group *rxq_grps;
+ u32 rxq_model;
+ struct idpf_rx_ptype_decoded rx_ptype_lkup[IDPF_RX_MAX_PTYPE];
+
+ struct idpf_adapter *adapter;
+ struct net_device *netdev;
+ DECLARE_BITMAP(flags, IDPF_VPORT_FLAGS_NBITS);
+ u16 vport_type;
+ u32 vport_id;
+ u16 idx;
+ bool default_vport;
+ bool base_rxd;
+
+ u16 num_q_vectors;
+ struct idpf_q_vector *q_vectors;
+ u16 *q_vector_idxs;
+ u16 max_mtu;
+ u8 default_mac_addr[ETH_ALEN];
+ u16 rx_itr_profile[IDPF_DIM_PROFILE_SLOTS];
+ u16 tx_itr_profile[IDPF_DIM_PROFILE_SLOTS];
+ struct idpf_port_stats port_stats;
+
+ bool link_up;
+ u32 link_speed_mbps;
+
+ char vc_msg[IDPF_CTLQ_MAX_BUF_LEN];
+ DECLARE_BITMAP(vc_state, IDPF_VC_NBITS);
+
+ wait_queue_head_t vchnl_wq;
+ wait_queue_head_t sw_marker_wq;
+ struct mutex vc_buf_lock;
+};
+
+/**
+ * enum idpf_user_flags
+ * @__IDPF_PROMISC_UC: Unicast promiscuous mode
+ * @__IDPF_PROMISC_MC: Multicast promiscuous mode
+ * @__IDPF_USER_FLAGS_NBITS: Must be last
+ */
+enum idpf_user_flags {
+ __IDPF_PROMISC_UC = 32,
+ __IDPF_PROMISC_MC,
+
+ __IDPF_USER_FLAGS_NBITS,
+};
+
+/**
+ * struct idpf_rss_data - Associated RSS data
+ * @rss_key_size: Size of RSS hash key
+ * @rss_key: RSS hash key
+ * @rss_lut_size: Size of RSS lookup table
+ * @rss_lut: RSS lookup table
+ * @cached_lut: Used to restore previously init RSS lut
+ */
+struct idpf_rss_data {
+ u16 rss_key_size;
+ u8 *rss_key;
+ u16 rss_lut_size;
+ u32 *rss_lut;
+ u32 *cached_lut;
+};
+
+/**
+ * struct idpf_vport_user_config_data - User defined configuration values for
+ * each vport.
+ * @rss_data: See struct idpf_rss_data
+ * @num_req_tx_qs: Number of user requested TX queues through ethtool
+ * @num_req_rx_qs: Number of user requested RX queues through ethtool
+ * @num_req_txq_desc: Number of user requested TX queue descriptors through
+ * ethtool
+ * @num_req_rxq_desc: Number of user requested RX queue descriptors through
+ * ethtool
+ * @user_flags: User toggled config flags
+ * @mac_filter_list: List of MAC filters
+ *
+ * Used to restore configuration after a reset as the vport will get wiped.
+ */
+struct idpf_vport_user_config_data {
+ struct idpf_rss_data rss_data;
+ u16 num_req_tx_qs;
+ u16 num_req_rx_qs;
+ u32 num_req_txq_desc;
+ u32 num_req_rxq_desc;
+ DECLARE_BITMAP(user_flags, __IDPF_USER_FLAGS_NBITS);
+ struct list_head mac_filter_list;
+};
+
+/**
+ * enum idpf_vport_config_flags - Vport config flags
+ * @IDPF_VPORT_REG_NETDEV: Register netdev
+ * @IDPF_VPORT_UP_REQUESTED: Set if interface up is requested on core reset
+ * @IDPF_VPORT_ADD_MAC_REQ: Asynchronous add ether address in flight
+ * @IDPF_VPORT_DEL_MAC_REQ: Asynchronous delete ether address in flight
+ * @IDPF_VPORT_CONFIG_FLAGS_NBITS: Must be last
+ */
+enum idpf_vport_config_flags {
+ IDPF_VPORT_REG_NETDEV,
+ IDPF_VPORT_UP_REQUESTED,
+ IDPF_VPORT_ADD_MAC_REQ,
+ IDPF_VPORT_DEL_MAC_REQ,
+ IDPF_VPORT_CONFIG_FLAGS_NBITS,
+};
+
+/**
+ * struct idpf_avail_queue_info
+ * @avail_rxq: Available RX queues
+ * @avail_txq: Available TX queues
+ * @avail_bufq: Available buffer queues
+ * @avail_complq: Available completion queues
+ *
+ * Maintain total queues available after allocating max queues to each vport.
+ */
+struct idpf_avail_queue_info {
+ u16 avail_rxq;
+ u16 avail_txq;
+ u16 avail_bufq;
+ u16 avail_complq;
+};
+
+/**
+ * struct idpf_vector_info - Utility structure to pass function arguments as a
+ * structure
+ * @num_req_vecs: Vectors required based on the number of queues updated by the
+ * user via ethtool
+ * @num_curr_vecs: Current number of vectors, must be >= @num_req_vecs
+ * @index: Relative starting index for vectors
+ * @default_vport: Vectors are for default vport
+ */
+struct idpf_vector_info {
+ u16 num_req_vecs;
+ u16 num_curr_vecs;
+ u16 index;
+ bool default_vport;
+};
+
+/**
+ * struct idpf_vector_lifo - Stack to maintain vector indexes used for vector
+ * distribution algorithm
+ * @top: Points to stack top i.e. next available vector index
+ * @base: Always points to start of the free pool
+ * @size: Total size of the vector stack
+ * @vec_idx: Array to store all the vector indexes
+ *
+ * Vector stack maintains all the relative vector indexes at the *adapter*
+ * level. This stack is divided into 2 parts, first one is called as 'default
+ * pool' and other one is called 'free pool'. Vector distribution algorithm
+ * gives priority to default vports in a way that at least IDPF_MIN_Q_VEC
+ * vectors are allocated per default vport and the relative vector indexes for
+ * those are maintained in default pool. Free pool contains all the unallocated
+ * vector indexes which can be allocated on-demand basis. Mailbox vector index
+ * is maintained in the default pool of the stack.
+ */
+struct idpf_vector_lifo {
+ u16 top;
+ u16 base;
+ u16 size;
+ u16 *vec_idx;
+};
+
+/**
+ * struct idpf_vport_config - Vport configuration data
+ * @user_config: see struct idpf_vport_user_config_data
+ * @max_q: Maximum possible queues
+ * @req_qs_chunks: Queue chunk data for requested queues
+ * @mac_filter_list_lock: Lock to protect mac filters
+ * @flags: See enum idpf_vport_config_flags
+ */
+struct idpf_vport_config {
+ struct idpf_vport_user_config_data user_config;
+ struct idpf_vport_max_q max_q;
+ void *req_qs_chunks;
+ spinlock_t mac_filter_list_lock;
+ DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS);
+};
+
+/**
+ * struct idpf_adapter - Device data struct generated on probe
+ * @pdev: PCI device struct given on probe
+ * @virt_ver_maj: Virtchnl version major
+ * @virt_ver_min: Virtchnl version minor
+ * @msg_enable: Debug message level enabled
+ * @mb_wait_count: Number of times mailbox was attempted initialization
+ * @state: Init state machine
+ * @flags: See enum idpf_flags
+ * @reset_reg: See struct idpf_reset_reg
+ * @hw: Device access data
+ * @num_req_msix: Requested number of MSIX vectors
+ * @num_avail_msix: Available number of MSIX vectors
+ * @num_msix_entries: Number of entries in MSIX table
+ * @msix_entries: MSIX table
+ * @req_vec_chunks: Requested vector chunk data
+ * @mb_vector: Mailbox vector data
+ * @vector_stack: Stack to store the msix vector indexes
+ * @irq_mb_handler: Handler for hard interrupt for mailbox
+ * @tx_timeout_count: Number of TX timeouts that have occurred
+ * @avail_queues: Device given queue limits
+ * @vports: Array to store vports created by the driver
+ * @netdevs: Associated Vport netdevs
+ * @vport_params_reqd: Vport params requested
+ * @vport_params_recvd: Vport params received
+ * @vport_ids: Array of device given vport identifiers
+ * @vport_config: Vport config parameters
+ * @max_vports: Maximum vports that can be allocated
+ * @num_alloc_vports: Current number of vports allocated
+ * @next_vport: Next free slot in pf->vport[] - 0-based!
+ * @init_task: Initialization task
+ * @init_wq: Workqueue for initialization task
+ * @serv_task: Periodically recurring maintenance task
+ * @serv_wq: Workqueue for service task
+ * @mbx_task: Task to handle mailbox interrupts
+ * @mbx_wq: Workqueue for mailbox responses
+ * @vc_event_task: Task to handle out of band virtchnl event notifications
+ * @vc_event_wq: Workqueue for virtchnl events
+ * @stats_task: Periodic statistics retrieval task
+ * @stats_wq: Workqueue for statistics task
+ * @caps: Negotiated capabilities with device
+ * @vchnl_wq: Wait queue for virtchnl messages
+ * @vc_state: Virtchnl message state
+ * @vc_msg: Virtchnl message buffer
+ * @dev_ops: See idpf_dev_ops
+ * @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk
+ * to VFs but is used to initialize them
+ * @crc_enable: Enable CRC insertion offload
+ * @req_tx_splitq: TX split or single queue model to request
+ * @req_rx_splitq: RX split or single queue model to request
+ * @vport_ctrl_lock: Lock to protect the vport control flow
+ * @vector_lock: Lock to protect vector distribution
+ * @queue_lock: Lock to protect queue distribution
+ * @vc_buf_lock: Lock to protect virtchnl buffer
+ */
+struct idpf_adapter {
+ struct pci_dev *pdev;
+ u32 virt_ver_maj;
+ u32 virt_ver_min;
+
+ u32 msg_enable;
+ u32 mb_wait_count;
+ enum idpf_state state;
+ DECLARE_BITMAP(flags, IDPF_FLAGS_NBITS);
+ struct idpf_reset_reg reset_reg;
+ struct idpf_hw hw;
+ u16 num_req_msix;
+ u16 num_avail_msix;
+ u16 num_msix_entries;
+ struct msix_entry *msix_entries;
+ struct virtchnl2_alloc_vectors *req_vec_chunks;
+ struct idpf_q_vector mb_vector;
+ struct idpf_vector_lifo vector_stack;
+ irqreturn_t (*irq_mb_handler)(int irq, void *data);
+
+ u32 tx_timeout_count;
+ struct idpf_avail_queue_info avail_queues;
+ struct idpf_vport **vports;
+ struct net_device **netdevs;
+ struct virtchnl2_create_vport **vport_params_reqd;
+ struct virtchnl2_create_vport **vport_params_recvd;
+ u32 *vport_ids;
+
+ struct idpf_vport_config **vport_config;
+ u16 max_vports;
+ u16 num_alloc_vports;
+ u16 next_vport;
+
+ struct delayed_work init_task;
+ struct workqueue_struct *init_wq;
+ struct delayed_work serv_task;
+ struct workqueue_struct *serv_wq;
+ struct delayed_work mbx_task;
+ struct workqueue_struct *mbx_wq;
+ struct delayed_work vc_event_task;
+ struct workqueue_struct *vc_event_wq;
+ struct delayed_work stats_task;
+ struct workqueue_struct *stats_wq;
+ struct virtchnl2_get_capabilities caps;
+
+ wait_queue_head_t vchnl_wq;
+ DECLARE_BITMAP(vc_state, IDPF_VC_NBITS);
+ char vc_msg[IDPF_CTLQ_MAX_BUF_LEN];
+ struct idpf_dev_ops dev_ops;
+ int num_vfs;
+ bool crc_enable;
+ bool req_tx_splitq;
+ bool req_rx_splitq;
+
+ struct mutex vport_ctrl_lock;
+ struct mutex vector_lock;
+ struct mutex queue_lock;
+ struct mutex vc_buf_lock;
+};
+
+/**
+ * idpf_is_queue_model_split - check if queue model is split
+ * @q_model: queue model single or split
+ *
+ * Returns true if queue model is split else false
+ */
+static inline int idpf_is_queue_model_split(u16 q_model)
+{
+ return q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT;
+}
+
+#define idpf_is_cap_ena(adapter, field, flag) \
+ idpf_is_capability_ena(adapter, false, field, flag)
+#define idpf_is_cap_ena_all(adapter, field, flag) \
+ idpf_is_capability_ena(adapter, true, field, flag)
+
+bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
+ enum idpf_cap_field field, u64 flag);
+
+#define IDPF_CAP_RSS (\
+ VIRTCHNL2_CAP_RSS_IPV4_TCP |\
+ VIRTCHNL2_CAP_RSS_IPV4_TCP |\
+ VIRTCHNL2_CAP_RSS_IPV4_UDP |\
+ VIRTCHNL2_CAP_RSS_IPV4_SCTP |\
+ VIRTCHNL2_CAP_RSS_IPV4_OTHER |\
+ VIRTCHNL2_CAP_RSS_IPV6_TCP |\
+ VIRTCHNL2_CAP_RSS_IPV6_TCP |\
+ VIRTCHNL2_CAP_RSS_IPV6_UDP |\
+ VIRTCHNL2_CAP_RSS_IPV6_SCTP |\
+ VIRTCHNL2_CAP_RSS_IPV6_OTHER)
+
+#define IDPF_CAP_RSC (\
+ VIRTCHNL2_CAP_RSC_IPV4_TCP |\
+ VIRTCHNL2_CAP_RSC_IPV6_TCP)
+
+#define IDPF_CAP_HSPLIT (\
+ VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |\
+ VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6)
+
+#define IDPF_CAP_RX_CSUM_L4V4 (\
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |\
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP)
+
+#define IDPF_CAP_RX_CSUM_L4V6 (\
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
+
+#define IDPF_CAP_RX_CSUM (\
+ VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 |\
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |\
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP |\
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |\
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP)
+
+#define IDPF_CAP_SCTP_CSUM (\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP |\
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP |\
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP |\
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP)
+
+#define IDPF_CAP_TUNNEL_TX_CSUM (\
+ VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |\
+ VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL)
+
+/**
+ * idpf_get_reserved_vecs - Get reserved vectors
+ * @adapter: private data struct
+ */
+static inline u16 idpf_get_reserved_vecs(struct idpf_adapter *adapter)
+{
+ return le16_to_cpu(adapter->caps.num_allocated_vectors);
+}
+
+/**
+ * idpf_get_default_vports - Get default number of vports
+ * @adapter: private data struct
+ */
+static inline u16 idpf_get_default_vports(struct idpf_adapter *adapter)
+{
+ return le16_to_cpu(adapter->caps.default_num_vports);
+}
+
+/**
+ * idpf_get_max_vports - Get max number of vports
+ * @adapter: private data struct
+ */
+static inline u16 idpf_get_max_vports(struct idpf_adapter *adapter)
+{
+ return le16_to_cpu(adapter->caps.max_vports);
+}
+
+/**
+ * idpf_get_max_tx_bufs - Get max scatter-gather buffers supported by the device
+ * @adapter: private data struct
+ */
+static inline unsigned int idpf_get_max_tx_bufs(struct idpf_adapter *adapter)
+{
+ return adapter->caps.max_sg_bufs_per_tx_pkt;
+}
+
+/**
+ * idpf_get_min_tx_pkt_len - Get min packet length supported by the device
+ * @adapter: private data struct
+ */
+static inline u8 idpf_get_min_tx_pkt_len(struct idpf_adapter *adapter)
+{
+ u8 pkt_len = adapter->caps.min_sso_packet_len;
+
+ return pkt_len ? pkt_len : IDPF_TX_MIN_PKT_LEN;
+}
+
+/**
+ * idpf_get_reg_addr - Get BAR0 register address
+ * @adapter: private data struct
+ * @reg_offset: register offset value
+ *
+ * Based on the register offset, return the actual BAR0 register address
+ */
+static inline void __iomem *idpf_get_reg_addr(struct idpf_adapter *adapter,
+ resource_size_t reg_offset)
+{
+ return (void __iomem *)(adapter->hw.hw_addr + reg_offset);
+}
+
+/**
+ * idpf_is_reset_detected - check if we were reset at some point
+ * @adapter: driver specific private structure
+ *
+ * Returns true if we are either in reset currently or were previously reset.
+ */
+static inline bool idpf_is_reset_detected(struct idpf_adapter *adapter)
+{
+ if (!adapter->hw.arq)
+ return true;
+
+ return !(readl(idpf_get_reg_addr(adapter, adapter->hw.arq->reg.len)) &
+ adapter->hw.arq->reg.len_mask);
+}
+
+/**
+ * idpf_is_reset_in_prog - check if reset is in progress
+ * @adapter: driver specific private structure
+ *
+ * Returns true if hard reset is in progress, false otherwise
+ */
+static inline bool idpf_is_reset_in_prog(struct idpf_adapter *adapter)
+{
+ return (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags) ||
+ test_bit(IDPF_HR_FUNC_RESET, adapter->flags) ||
+ test_bit(IDPF_HR_DRV_LOAD, adapter->flags));
+}
+
+/**
+ * idpf_netdev_to_vport - get a vport handle from a netdev
+ * @netdev: network interface device structure
+ */
+static inline struct idpf_vport *idpf_netdev_to_vport(struct net_device *netdev)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+
+ return np->vport;
+}
+
+/**
+ * idpf_netdev_to_adapter - Get adapter handle from a netdev
+ * @netdev: Network interface device structure
+ */
+static inline struct idpf_adapter *idpf_netdev_to_adapter(struct net_device *netdev)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+
+ return np->adapter;
+}
+
+/**
+ * idpf_is_feature_ena - Determine if a particular feature is enabled
+ * @vport: Vport to check
+ * @feature: Netdev flag to check
+ *
+ * Returns true or false if a particular feature is enabled.
+ */
+static inline bool idpf_is_feature_ena(const struct idpf_vport *vport,
+ netdev_features_t feature)
+{
+ return vport->netdev->features & feature;
+}
+
+/**
+ * idpf_get_max_tx_hdr_size -- get the size of tx header
+ * @adapter: Driver specific private structure
+ */
+static inline u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter)
+{
+ return le16_to_cpu(adapter->caps.max_tx_hdr_size);
+}
+
+/**
+ * idpf_vport_ctrl_lock - Acquire the vport control lock
+ * @netdev: Network interface device structure
+ *
+ * This lock should be used by non-datapath code to protect against vport
+ * destruction.
+ */
+static inline void idpf_vport_ctrl_lock(struct net_device *netdev)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+
+ mutex_lock(&np->adapter->vport_ctrl_lock);
+}
+
+/**
+ * idpf_vport_ctrl_unlock - Release the vport control lock
+ * @netdev: Network interface device structure
+ */
+static inline void idpf_vport_ctrl_unlock(struct net_device *netdev)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+
+ mutex_unlock(&np->adapter->vport_ctrl_lock);
+}
+
+void idpf_statistics_task(struct work_struct *work);
+void idpf_init_task(struct work_struct *work);
+void idpf_service_task(struct work_struct *work);
+void idpf_mbx_task(struct work_struct *work);
+void idpf_vc_event_task(struct work_struct *work);
+void idpf_dev_ops_init(struct idpf_adapter *adapter);
+void idpf_vf_dev_ops_init(struct idpf_adapter *adapter);
+int idpf_vport_adjust_qs(struct idpf_vport *vport);
+int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
+void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
+int idpf_vc_core_init(struct idpf_adapter *adapter);
+void idpf_vc_core_deinit(struct idpf_adapter *adapter);
+int idpf_intr_req(struct idpf_adapter *adapter);
+void idpf_intr_rel(struct idpf_adapter *adapter);
+int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
+ struct idpf_vec_regs *reg_vals);
+u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter);
+int idpf_send_delete_queues_msg(struct idpf_vport *vport);
+int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
+ u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
+int idpf_initiate_soft_reset(struct idpf_vport *vport,
+ enum idpf_vport_reset_cause reset_cause);
+int idpf_send_enable_vport_msg(struct idpf_vport *vport);
+int idpf_send_disable_vport_msg(struct idpf_vport *vport);
+int idpf_send_destroy_vport_msg(struct idpf_vport *vport);
+int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport);
+int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport);
+int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
+int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
+int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
+int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
+void idpf_deinit_task(struct idpf_adapter *adapter);
+int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter,
+ u16 *q_vector_idxs,
+ struct idpf_vector_info *vec_info);
+int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
+int idpf_send_get_stats_msg(struct idpf_vport *vport);
+int idpf_get_vec_ids(struct idpf_adapter *adapter,
+ u16 *vecids, int num_vecids,
+ struct virtchnl2_vector_chunks *chunks);
+int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op,
+ void *msg, int msg_size);
+int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
+ u16 msg_size, u8 *msg);
+void idpf_set_ethtool_ops(struct net_device *netdev);
+int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+int idpf_add_del_mac_filters(struct idpf_vport *vport,
+ struct idpf_netdev_priv *np,
+ bool add, bool async);
+int idpf_set_promiscuous(struct idpf_adapter *adapter,
+ struct idpf_vport_user_config_data *config_data,
+ u32 vport_id);
+int idpf_send_disable_queues_msg(struct idpf_vport *vport);
+void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
+u32 idpf_get_vport_id(struct idpf_vport *vport);
+int idpf_vport_queue_ids_init(struct idpf_vport *vport);
+int idpf_queue_reg_init(struct idpf_vport *vport);
+int idpf_send_config_queues_msg(struct idpf_vport *vport);
+int idpf_send_enable_queues_msg(struct idpf_vport *vport);
+int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+int idpf_check_supported_desc_ids(struct idpf_vport *vport);
+void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector,
+ u16 itr, bool tx);
+int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
+int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
+int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs);
+
+#endif /* !_IDPF_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
new file mode 100644
index 000000000000..c7f43d2fcd13
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
@@ -0,0 +1,621 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2023 Intel Corporation */
+
+#include "idpf_controlq.h"
+
+/**
+ * idpf_ctlq_setup_regs - initialize control queue registers
+ * @cq: pointer to the specific control queue
+ * @q_create_info: structs containing info for each queue to be initialized
+ */
+static void idpf_ctlq_setup_regs(struct idpf_ctlq_info *cq,
+ struct idpf_ctlq_create_info *q_create_info)
+{
+ /* set control queue registers in our local struct */
+ cq->reg.head = q_create_info->reg.head;
+ cq->reg.tail = q_create_info->reg.tail;
+ cq->reg.len = q_create_info->reg.len;
+ cq->reg.bah = q_create_info->reg.bah;
+ cq->reg.bal = q_create_info->reg.bal;
+ cq->reg.len_mask = q_create_info->reg.len_mask;
+ cq->reg.len_ena_mask = q_create_info->reg.len_ena_mask;
+ cq->reg.head_mask = q_create_info->reg.head_mask;
+}
+
+/**
+ * idpf_ctlq_init_regs - Initialize control queue registers
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ * @is_rxq: true if receive control queue, false otherwise
+ *
+ * Initialize registers. The caller is expected to have already initialized the
+ * descriptor ring memory and buffer memory
+ */
+static void idpf_ctlq_init_regs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+ bool is_rxq)
+{
+ /* Update tail to post pre-allocated buffers for rx queues */
+ if (is_rxq)
+ wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1));
+
+ /* For non-Mailbox control queues only TAIL need to be set */
+ if (cq->q_id != -1)
+ return;
+
+ /* Clear Head for both send or receive */
+ wr32(hw, cq->reg.head, 0);
+
+ /* set starting point */
+ wr32(hw, cq->reg.bal, lower_32_bits(cq->desc_ring.pa));
+ wr32(hw, cq->reg.bah, upper_32_bits(cq->desc_ring.pa));
+ wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask));
+}
+
+/**
+ * idpf_ctlq_init_rxq_bufs - populate receive queue descriptors with buf
+ * @cq: pointer to the specific Control queue
+ *
+ * Record the address of the receive queue DMA buffers in the descriptors.
+ * The buffers must have been previously allocated.
+ */
+static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
+{
+ int i;
+
+ for (i = 0; i < cq->ring_size; i++) {
+ struct idpf_ctlq_desc *desc = IDPF_CTLQ_DESC(cq, i);
+ struct idpf_dma_mem *bi = cq->bi.rx_buff[i];
+
+ /* No buffer to post to descriptor, continue */
+ if (!bi)
+ continue;
+
+ desc->flags =
+ cpu_to_le16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+ desc->opcode = 0;
+ desc->datalen = cpu_to_le16(bi->size);
+ desc->ret_val = 0;
+ desc->v_opcode_dtype = 0;
+ desc->v_retval = 0;
+ desc->params.indirect.addr_high =
+ cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.indirect.addr_low =
+ cpu_to_le32(lower_32_bits(bi->pa));
+ desc->params.indirect.param0 = 0;
+ desc->params.indirect.sw_cookie = 0;
+ desc->params.indirect.v_flags = 0;
+ }
+}
+
+/**
+ * idpf_ctlq_shutdown - shutdown the CQ
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ *
+ * The main shutdown routine for any controq queue
+ */
+static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+ mutex_lock(&cq->cq_lock);
+
+ /* free ring buffers and the ring itself */
+ idpf_ctlq_dealloc_ring_res(hw, cq);
+
+ /* Set ring_size to 0 to indicate uninitialized queue */
+ cq->ring_size = 0;
+
+ mutex_unlock(&cq->cq_lock);
+ mutex_destroy(&cq->cq_lock);
+}
+
+/**
+ * idpf_ctlq_add - add one control queue
+ * @hw: pointer to hardware struct
+ * @qinfo: info for queue to be created
+ * @cq_out: (output) double pointer to control queue to be created
+ *
+ * Allocate and initialize a control queue and add it to the control queue list.
+ * The cq parameter will be allocated/initialized and passed back to the caller
+ * if no errors occur.
+ *
+ * Note: idpf_ctlq_init must be called prior to any calls to idpf_ctlq_add
+ */
+int idpf_ctlq_add(struct idpf_hw *hw,
+ struct idpf_ctlq_create_info *qinfo,
+ struct idpf_ctlq_info **cq_out)
+{
+ struct idpf_ctlq_info *cq;
+ bool is_rxq = false;
+ int err;
+
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+ if (!cq)
+ return -ENOMEM;
+
+ cq->cq_type = qinfo->type;
+ cq->q_id = qinfo->id;
+ cq->buf_size = qinfo->buf_size;
+ cq->ring_size = qinfo->len;
+
+ cq->next_to_use = 0;
+ cq->next_to_clean = 0;
+ cq->next_to_post = cq->ring_size - 1;
+
+ switch (qinfo->type) {
+ case IDPF_CTLQ_TYPE_MAILBOX_RX:
+ is_rxq = true;
+ fallthrough;
+ case IDPF_CTLQ_TYPE_MAILBOX_TX:
+ err = idpf_ctlq_alloc_ring_res(hw, cq);
+ break;
+ default:
+ err = -EBADR;
+ break;
+ }
+
+ if (err)
+ goto init_free_q;
+
+ if (is_rxq) {
+ idpf_ctlq_init_rxq_bufs(cq);
+ } else {
+ /* Allocate the array of msg pointers for TX queues */
+ cq->bi.tx_msg = kcalloc(qinfo->len,
+ sizeof(struct idpf_ctlq_msg *),
+ GFP_KERNEL);
+ if (!cq->bi.tx_msg) {
+ err = -ENOMEM;
+ goto init_dealloc_q_mem;
+ }
+ }
+
+ idpf_ctlq_setup_regs(cq, qinfo);
+
+ idpf_ctlq_init_regs(hw, cq, is_rxq);
+
+ mutex_init(&cq->cq_lock);
+
+ list_add(&cq->cq_list, &hw->cq_list_head);
+
+ *cq_out = cq;
+
+ return 0;
+
+init_dealloc_q_mem:
+ /* free ring buffers and the ring itself */
+ idpf_ctlq_dealloc_ring_res(hw, cq);
+init_free_q:
+ kfree(cq);
+
+ return err;
+}
+
+/**
+ * idpf_ctlq_remove - deallocate and remove specified control queue
+ * @hw: pointer to hardware struct
+ * @cq: pointer to control queue to be removed
+ */
+void idpf_ctlq_remove(struct idpf_hw *hw,
+ struct idpf_ctlq_info *cq)
+{
+ list_del(&cq->cq_list);
+ idpf_ctlq_shutdown(hw, cq);
+ kfree(cq);
+}
+
+/**
+ * idpf_ctlq_init - main initialization routine for all control queues
+ * @hw: pointer to hardware struct
+ * @num_q: number of queues to initialize
+ * @q_info: array of structs containing info for each queue to be initialized
+ *
+ * This initializes any number and any type of control queues. This is an all
+ * or nothing routine; if one fails, all previously allocated queues will be
+ * destroyed. This must be called prior to using the individual add/remove
+ * APIs.
+ */
+int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,
+ struct idpf_ctlq_create_info *q_info)
+{
+ struct idpf_ctlq_info *cq, *tmp;
+ int err;
+ int i;
+
+ INIT_LIST_HEAD(&hw->cq_list_head);
+
+ for (i = 0; i < num_q; i++) {
+ struct idpf_ctlq_create_info *qinfo = q_info + i;
+
+ err = idpf_ctlq_add(hw, qinfo, &cq);
+ if (err)
+ goto init_destroy_qs;
+ }
+
+ return 0;
+
+init_destroy_qs:
+ list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)
+ idpf_ctlq_remove(hw, cq);
+
+ return err;
+}
+
+/**
+ * idpf_ctlq_deinit - destroy all control queues
+ * @hw: pointer to hw struct
+ */
+void idpf_ctlq_deinit(struct idpf_hw *hw)
+{
+ struct idpf_ctlq_info *cq, *tmp;
+
+ list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)
+ idpf_ctlq_remove(hw, cq);
+}
+
+/**
+ * idpf_ctlq_send - send command to Control Queue (CTQ)
+ * @hw: pointer to hw struct
+ * @cq: handle to control queue struct to send on
+ * @num_q_msg: number of messages to send on control queue
+ * @q_msg: pointer to array of queue messages to be sent
+ *
+ * The caller is expected to allocate DMAable buffers and pass them to the
+ * send routine via the q_msg struct / control queue specific data struct.
+ * The control queue will hold a reference to each send message until
+ * the completion for that message has been cleaned.
+ */
+int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+ u16 num_q_msg, struct idpf_ctlq_msg q_msg[])
+{
+ struct idpf_ctlq_desc *desc;
+ int num_desc_avail;
+ int err = 0;
+ int i;
+
+ mutex_lock(&cq->cq_lock);
+
+ /* Ensure there are enough descriptors to send all messages */
+ num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
+ if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
+ err = -ENOSPC;
+ goto err_unlock;
+ }
+
+ for (i = 0; i < num_q_msg; i++) {
+ struct idpf_ctlq_msg *msg = &q_msg[i];
+
+ desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
+
+ desc->opcode = cpu_to_le16(msg->opcode);
+ desc->pfid_vfid = cpu_to_le16(msg->func_id);
+
+ desc->v_opcode_dtype = cpu_to_le32(msg->cookie.mbx.chnl_opcode);
+ desc->v_retval = cpu_to_le32(msg->cookie.mbx.chnl_retval);
+
+ desc->flags = cpu_to_le16((msg->host_id & IDPF_HOST_ID_MASK) <<
+ IDPF_CTLQ_FLAG_HOST_ID_S);
+ if (msg->data_len) {
+ struct idpf_dma_mem *buff = msg->ctx.indirect.payload;
+
+ desc->datalen |= cpu_to_le16(msg->data_len);
+ desc->flags |= cpu_to_le16(IDPF_CTLQ_FLAG_BUF);
+ desc->flags |= cpu_to_le16(IDPF_CTLQ_FLAG_RD);
+
+ /* Update the address values in the desc with the pa
+ * value for respective buffer
+ */
+ desc->params.indirect.addr_high =
+ cpu_to_le32(upper_32_bits(buff->pa));
+ desc->params.indirect.addr_low =
+ cpu_to_le32(lower_32_bits(buff->pa));
+
+ memcpy(&desc->params, msg->ctx.indirect.context,
+ IDPF_INDIRECT_CTX_SIZE);
+ } else {
+ memcpy(&desc->params, msg->ctx.direct,
+ IDPF_DIRECT_CTX_SIZE);
+ }
+
+ /* Store buffer info */
+ cq->bi.tx_msg[cq->next_to_use] = msg;
+
+ (cq->next_to_use)++;
+ if (cq->next_to_use == cq->ring_size)
+ cq->next_to_use = 0;
+ }
+
+ /* Force memory write to complete before letting hardware
+ * know that there are new descriptors to fetch.
+ */
+ dma_wmb();
+
+ wr32(hw, cq->reg.tail, cq->next_to_use);
+
+err_unlock:
+ mutex_unlock(&cq->cq_lock);
+
+ return err;
+}
+
+/**
+ * idpf_ctlq_clean_sq - reclaim send descriptors on HW write back for the
+ * requested queue
+ * @cq: pointer to the specific Control queue
+ * @clean_count: (input|output) number of descriptors to clean as input, and
+ * number of descriptors actually cleaned as output
+ * @msg_status: (output) pointer to msg pointer array to be populated; needs
+ * to be allocated by caller
+ *
+ * Returns an array of message pointers associated with the cleaned
+ * descriptors. The pointers are to the original ctlq_msgs sent on the cleaned
+ * descriptors. The status will be returned for each; any messages that failed
+ * to send will have a non-zero status. The caller is expected to free original
+ * ctlq_msgs and free or reuse the DMA buffers.
+ */
+int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+ struct idpf_ctlq_msg *msg_status[])
+{
+ struct idpf_ctlq_desc *desc;
+ u16 i, num_to_clean;
+ u16 ntc, desc_err;
+
+ if (*clean_count == 0)
+ return 0;
+ if (*clean_count > cq->ring_size)
+ return -EBADR;
+
+ mutex_lock(&cq->cq_lock);
+
+ ntc = cq->next_to_clean;
+
+ num_to_clean = *clean_count;
+
+ for (i = 0; i < num_to_clean; i++) {
+ /* Fetch next descriptor and check if marked as done */
+ desc = IDPF_CTLQ_DESC(cq, ntc);
+ if (!(le16_to_cpu(desc->flags) & IDPF_CTLQ_FLAG_DD))
+ break;
+
+ /* strip off FW internal code */
+ desc_err = le16_to_cpu(desc->ret_val) & 0xff;
+
+ msg_status[i] = cq->bi.tx_msg[ntc];
+ msg_status[i]->status = desc_err;
+
+ cq->bi.tx_msg[ntc] = NULL;
+
+ /* Zero out any stale data */
+ memset(desc, 0, sizeof(*desc));
+
+ ntc++;
+ if (ntc == cq->ring_size)
+ ntc = 0;
+ }
+
+ cq->next_to_clean = ntc;
+
+ mutex_unlock(&cq->cq_lock);
+
+ /* Return number of descriptors actually cleaned */
+ *clean_count = i;
+
+ return 0;
+}
+
+/**
+ * idpf_ctlq_post_rx_buffs - post buffers to descriptor ring
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue handle
+ * @buff_count: (input|output) input is number of buffers caller is trying to
+ * return; output is number of buffers that were not posted
+ * @buffs: array of pointers to dma mem structs to be given to hardware
+ *
+ * Caller uses this function to return DMA buffers to the descriptor ring after
+ * consuming them; buff_count will be the number of buffers.
+ *
+ * Note: this function needs to be called after a receive call even
+ * if there are no DMA buffers to be returned, i.e. buff_count = 0,
+ * buffs = NULL to support direct commands
+ */
+int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
+ u16 *buff_count, struct idpf_dma_mem **buffs)
+{
+ struct idpf_ctlq_desc *desc;
+ u16 ntp = cq->next_to_post;
+ bool buffs_avail = false;
+ u16 tbp = ntp + 1;
+ int i = 0;
+
+ if (*buff_count > cq->ring_size)
+ return -EBADR;
+
+ if (*buff_count > 0)
+ buffs_avail = true;
+
+ mutex_lock(&cq->cq_lock);
+
+ if (tbp >= cq->ring_size)
+ tbp = 0;
+
+ if (tbp == cq->next_to_clean)
+ /* Nothing to do */
+ goto post_buffs_out;
+
+ /* Post buffers for as many as provided or up until the last one used */
+ while (ntp != cq->next_to_clean) {
+ desc = IDPF_CTLQ_DESC(cq, ntp);
+
+ if (cq->bi.rx_buff[ntp])
+ goto fill_desc;
+ if (!buffs_avail) {
+ /* If the caller hasn't given us any buffers or
+ * there are none left, search the ring itself
+ * for an available buffer to move to this
+ * entry starting at the next entry in the ring
+ */
+ tbp = ntp + 1;
+
+ /* Wrap ring if necessary */
+ if (tbp >= cq->ring_size)
+ tbp = 0;
+
+ while (tbp != cq->next_to_clean) {
+ if (cq->bi.rx_buff[tbp]) {
+ cq->bi.rx_buff[ntp] =
+ cq->bi.rx_buff[tbp];
+ cq->bi.rx_buff[tbp] = NULL;
+
+ /* Found a buffer, no need to
+ * search anymore
+ */
+ break;
+ }
+
+ /* Wrap ring if necessary */
+ tbp++;
+ if (tbp >= cq->ring_size)
+ tbp = 0;
+ }
+
+ if (tbp == cq->next_to_clean)
+ goto post_buffs_out;
+ } else {
+ /* Give back pointer to DMA buffer */
+ cq->bi.rx_buff[ntp] = buffs[i];
+ i++;
+
+ if (i >= *buff_count)
+ buffs_avail = false;
+ }
+
+fill_desc:
+ desc->flags =
+ cpu_to_le16(IDPF_CTLQ_FLAG_BUF | IDPF_CTLQ_FLAG_RD);
+
+ /* Post buffers to descriptor */
+ desc->datalen = cpu_to_le16(cq->bi.rx_buff[ntp]->size);
+ desc->params.indirect.addr_high =
+ cpu_to_le32(upper_32_bits(cq->bi.rx_buff[ntp]->pa));
+ desc->params.indirect.addr_low =
+ cpu_to_le32(lower_32_bits(cq->bi.rx_buff[ntp]->pa));
+
+ ntp++;
+ if (ntp == cq->ring_size)
+ ntp = 0;
+ }
+
+post_buffs_out:
+ /* Only update tail if buffers were actually posted */
+ if (cq->next_to_post != ntp) {
+ if (ntp)
+ /* Update next_to_post to ntp - 1 since current ntp
+ * will not have a buffer
+ */
+ cq->next_to_post = ntp - 1;
+ else
+ /* Wrap to end of end ring since current ntp is 0 */
+ cq->next_to_post = cq->ring_size - 1;
+
+ wr32(hw, cq->reg.tail, cq->next_to_post);
+ }
+
+ mutex_unlock(&cq->cq_lock);
+
+ /* return the number of buffers that were not posted */
+ *buff_count = *buff_count - i;
+
+ return 0;
+}
+
+/**
+ * idpf_ctlq_recv - receive control queue message call back
+ * @cq: pointer to control queue handle to receive on
+ * @num_q_msg: (input|output) input number of messages that should be received;
+ * output number of messages actually received
+ * @q_msg: (output) array of received control queue messages on this q;
+ * needs to be pre-allocated by caller for as many messages as requested
+ *
+ * Called by interrupt handler or polling mechanism. Caller is expected
+ * to free buffers
+ */
+int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+ struct idpf_ctlq_msg *q_msg)
+{
+ u16 num_to_clean, ntc, flags;
+ struct idpf_ctlq_desc *desc;
+ int err = 0;
+ u16 i;
+
+ if (*num_q_msg == 0)
+ return 0;
+ else if (*num_q_msg > cq->ring_size)
+ return -EBADR;
+
+ /* take the lock before we start messing with the ring */
+ mutex_lock(&cq->cq_lock);
+
+ ntc = cq->next_to_clean;
+
+ num_to_clean = *num_q_msg;
+
+ for (i = 0; i < num_to_clean; i++) {
+ /* Fetch next descriptor and check if marked as done */
+ desc = IDPF_CTLQ_DESC(cq, ntc);
+ flags = le16_to_cpu(desc->flags);
+
+ if (!(flags & IDPF_CTLQ_FLAG_DD))
+ break;
+
+ q_msg[i].vmvf_type = (flags &
+ (IDPF_CTLQ_FLAG_FTYPE_VM |
+ IDPF_CTLQ_FLAG_FTYPE_PF)) >>
+ IDPF_CTLQ_FLAG_FTYPE_S;
+
+ if (flags & IDPF_CTLQ_FLAG_ERR)
+ err = -EBADMSG;
+
+ q_msg[i].cookie.mbx.chnl_opcode =
+ le32_to_cpu(desc->v_opcode_dtype);
+ q_msg[i].cookie.mbx.chnl_retval =
+ le32_to_cpu(desc->v_retval);
+
+ q_msg[i].opcode = le16_to_cpu(desc->opcode);
+ q_msg[i].data_len = le16_to_cpu(desc->datalen);
+ q_msg[i].status = le16_to_cpu(desc->ret_val);
+
+ if (desc->datalen) {
+ memcpy(q_msg[i].ctx.indirect.context,
+ &desc->params.indirect, IDPF_INDIRECT_CTX_SIZE);
+
+ /* Assign pointer to dma buffer to ctlq_msg array
+ * to be given to upper layer
+ */
+ q_msg[i].ctx.indirect.payload = cq->bi.rx_buff[ntc];
+
+ /* Zero out pointer to DMA buffer info;
+ * will be repopulated by post buffers API
+ */
+ cq->bi.rx_buff[ntc] = NULL;
+ } else {
+ memcpy(q_msg[i].ctx.direct, desc->params.raw,
+ IDPF_DIRECT_CTX_SIZE);
+ }
+
+ /* Zero out stale data in descriptor */
+ memset(desc, 0, sizeof(struct idpf_ctlq_desc));
+
+ ntc++;
+ if (ntc == cq->ring_size)
+ ntc = 0;
+ }
+
+ cq->next_to_clean = ntc;
+
+ mutex_unlock(&cq->cq_lock);
+
+ *num_q_msg = i;
+ if (*num_q_msg == 0)
+ err = -ENOMSG;
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.h b/drivers/net/ethernet/intel/idpf/idpf_controlq.h
new file mode 100644
index 000000000000..c1aba09e9856
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef _IDPF_CONTROLQ_H_
+#define _IDPF_CONTROLQ_H_
+
+#include <linux/slab.h>
+
+#include "idpf_controlq_api.h"
+
+/* Maximum buffer length for all control queue types */
+#define IDPF_CTLQ_MAX_BUF_LEN 4096
+
+#define IDPF_CTLQ_DESC(R, i) \
+ (&(((struct idpf_ctlq_desc *)((R)->desc_ring.va))[i]))
+
+#define IDPF_CTLQ_DESC_UNUSED(R) \
+ ((u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->ring_size) + \
+ (R)->next_to_clean - (R)->next_to_use - 1))
+
+/* Control Queue default settings */
+#define IDPF_CTRL_SQ_CMD_TIMEOUT 250 /* msecs */
+
+struct idpf_ctlq_desc {
+ /* Control queue descriptor flags */
+ __le16 flags;
+ /* Control queue message opcode */
+ __le16 opcode;
+ __le16 datalen; /* 0 for direct commands */
+ union {
+ __le16 ret_val;
+ __le16 pfid_vfid;
+#define IDPF_CTLQ_DESC_VF_ID_S 0
+#define IDPF_CTLQ_DESC_VF_ID_M (0x7FF << IDPF_CTLQ_DESC_VF_ID_S)
+#define IDPF_CTLQ_DESC_PF_ID_S 11
+#define IDPF_CTLQ_DESC_PF_ID_M (0x1F << IDPF_CTLQ_DESC_PF_ID_S)
+ };
+
+ /* Virtchnl message opcode and virtchnl descriptor type
+ * v_opcode=[27:0], v_dtype=[31:28]
+ */
+ __le32 v_opcode_dtype;
+ /* Virtchnl return value */
+ __le32 v_retval;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } direct;
+ struct {
+ __le32 param0;
+ __le16 sw_cookie;
+ /* Virtchnl flags */
+ __le16 v_flags;
+ __le32 addr_high;
+ __le32 addr_low;
+ } indirect;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR| * RSV * |FTYPE | *RSV* |RD |VFC|BUF| HOST_ID |
+ */
+/* command flags and offsets */
+#define IDPF_CTLQ_FLAG_DD_S 0
+#define IDPF_CTLQ_FLAG_CMP_S 1
+#define IDPF_CTLQ_FLAG_ERR_S 2
+#define IDPF_CTLQ_FLAG_FTYPE_S 6
+#define IDPF_CTLQ_FLAG_RD_S 10
+#define IDPF_CTLQ_FLAG_VFC_S 11
+#define IDPF_CTLQ_FLAG_BUF_S 12
+#define IDPF_CTLQ_FLAG_HOST_ID_S 13
+
+#define IDPF_CTLQ_FLAG_DD BIT(IDPF_CTLQ_FLAG_DD_S) /* 0x1 */
+#define IDPF_CTLQ_FLAG_CMP BIT(IDPF_CTLQ_FLAG_CMP_S) /* 0x2 */
+#define IDPF_CTLQ_FLAG_ERR BIT(IDPF_CTLQ_FLAG_ERR_S) /* 0x4 */
+#define IDPF_CTLQ_FLAG_FTYPE_VM BIT(IDPF_CTLQ_FLAG_FTYPE_S) /* 0x40 */
+#define IDPF_CTLQ_FLAG_FTYPE_PF BIT(IDPF_CTLQ_FLAG_FTYPE_S + 1) /* 0x80 */
+#define IDPF_CTLQ_FLAG_RD BIT(IDPF_CTLQ_FLAG_RD_S) /* 0x400 */
+#define IDPF_CTLQ_FLAG_VFC BIT(IDPF_CTLQ_FLAG_VFC_S) /* 0x800 */
+#define IDPF_CTLQ_FLAG_BUF BIT(IDPF_CTLQ_FLAG_BUF_S) /* 0x1000 */
+
+/* Host ID is a special field that has 3b and not a 1b flag */
+#define IDPF_CTLQ_FLAG_HOST_ID_M MAKE_MASK(0x7000UL, IDPF_CTLQ_FLAG_HOST_ID_S)
+
+struct idpf_mbxq_desc {
+ u8 pad[8]; /* CTLQ flags/opcode/len/retval fields */
+ u32 chnl_opcode; /* avoid confusion with desc->opcode */
+ u32 chnl_retval; /* ditto for desc->retval */
+ u32 pf_vf_id; /* used by CP when sending to PF */
+};
+
+/* Define the driver hardware struct to replace other control structs as needed
+ * Align to ctlq_hw_info
+ */
+struct idpf_hw {
+ void __iomem *hw_addr;
+ resource_size_t hw_addr_len;
+
+ struct idpf_adapter *back;
+
+ /* control queue - send and receive */
+ struct idpf_ctlq_info *asq;
+ struct idpf_ctlq_info *arq;
+
+ /* pci info */
+ u16 device_id;
+ u16 vendor_id;
+ u16 subsystem_device_id;
+ u16 subsystem_vendor_id;
+ u8 revision_id;
+ bool adapter_stopped;
+
+ struct list_head cq_list_head;
+};
+
+int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw,
+ struct idpf_ctlq_info *cq);
+
+void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq);
+
+/* prototype for functions used for dynamic memory allocation */
+void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem,
+ u64 size);
+void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem);
+#endif /* _IDPF_CONTROLQ_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
new file mode 100644
index 000000000000..8dee098bbfb0
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef _IDPF_CONTROLQ_API_H_
+#define _IDPF_CONTROLQ_API_H_
+
+#include "idpf_mem.h"
+
+struct idpf_hw;
+
+/* Used for queue init, response and events */
+enum idpf_ctlq_type {
+ IDPF_CTLQ_TYPE_MAILBOX_TX = 0,
+ IDPF_CTLQ_TYPE_MAILBOX_RX = 1,
+ IDPF_CTLQ_TYPE_CONFIG_TX = 2,
+ IDPF_CTLQ_TYPE_CONFIG_RX = 3,
+ IDPF_CTLQ_TYPE_EVENT_RX = 4,
+ IDPF_CTLQ_TYPE_RDMA_TX = 5,
+ IDPF_CTLQ_TYPE_RDMA_RX = 6,
+ IDPF_CTLQ_TYPE_RDMA_COMPL = 7
+};
+
+/* Generic Control Queue Structures */
+struct idpf_ctlq_reg {
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+ /* Below applies only to default mb (if present) */
+ u32 len;
+ u32 bah;
+ u32 bal;
+ u32 len_mask;
+ u32 len_ena_mask;
+ u32 head_mask;
+};
+
+/* Generic queue msg structure */
+struct idpf_ctlq_msg {
+ u8 vmvf_type; /* represents the source of the message on recv */
+#define IDPF_VMVF_TYPE_VF 0
+#define IDPF_VMVF_TYPE_VM 1
+#define IDPF_VMVF_TYPE_PF 2
+ u8 host_id;
+ /* 3b field used only when sending a message to CP - to be used in
+ * combination with target func_id to route the message
+ */
+#define IDPF_HOST_ID_MASK 0x7
+
+ u16 opcode;
+ u16 data_len; /* data_len = 0 when no payload is attached */
+ union {
+ u16 func_id; /* when sending a message */
+ u16 status; /* when receiving a message */
+ };
+ union {
+ struct {
+ u32 chnl_opcode;
+ u32 chnl_retval;
+ } mbx;
+ } cookie;
+ union {
+#define IDPF_DIRECT_CTX_SIZE 16
+#define IDPF_INDIRECT_CTX_SIZE 8
+ /* 16 bytes of context can be provided or 8 bytes of context
+ * plus the address of a DMA buffer
+ */
+ u8 direct[IDPF_DIRECT_CTX_SIZE];
+ struct {
+ u8 context[IDPF_INDIRECT_CTX_SIZE];
+ struct idpf_dma_mem *payload;
+ } indirect;
+ } ctx;
+};
+
+/* Generic queue info structures */
+/* MB, CONFIG and EVENT q do not have extended info */
+struct idpf_ctlq_create_info {
+ enum idpf_ctlq_type type;
+ int id; /* absolute queue offset passed as input
+ * -1 for default mailbox if present
+ */
+ u16 len; /* Queue length passed as input */
+ u16 buf_size; /* buffer size passed as input */
+ u64 base_address; /* output, HPA of the Queue start */
+ struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */
+
+ int ext_info_size;
+ void *ext_info; /* Specific to q type */
+};
+
+/* Control Queue information */
+struct idpf_ctlq_info {
+ struct list_head cq_list;
+
+ enum idpf_ctlq_type cq_type;
+ int q_id;
+ struct mutex cq_lock; /* control queue lock */
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+ u16 next_to_post; /* starting descriptor to post buffers
+ * to after recev
+ */
+
+ struct idpf_dma_mem desc_ring; /* descriptor ring memory
+ * idpf_dma_mem is defined in OSdep.h
+ */
+ union {
+ struct idpf_dma_mem **rx_buff;
+ struct idpf_ctlq_msg **tx_msg;
+ } bi;
+
+ u16 buf_size; /* queue buffer size */
+ u16 ring_size; /* Number of descriptors */
+ struct idpf_ctlq_reg reg; /* registers accessed by ctlqs */
+};
+
+/**
+ * enum idpf_mbx_opc - PF/VF mailbox commands
+ * @idpf_mbq_opc_send_msg_to_cp: used by PF or VF to send a message to its CP
+ */
+enum idpf_mbx_opc {
+ idpf_mbq_opc_send_msg_to_cp = 0x0801,
+};
+
+/* API supported for control queue management */
+/* Will init all required q including default mb. "q_info" is an array of
+ * create_info structs equal to the number of control queues to be created.
+ */
+int idpf_ctlq_init(struct idpf_hw *hw, u8 num_q,
+ struct idpf_ctlq_create_info *q_info);
+
+/* Allocate and initialize a single control queue, which will be added to the
+ * control queue list; returns a handle to the created control queue
+ */
+int idpf_ctlq_add(struct idpf_hw *hw,
+ struct idpf_ctlq_create_info *qinfo,
+ struct idpf_ctlq_info **cq);
+
+/* Deinitialize and deallocate a single control queue */
+void idpf_ctlq_remove(struct idpf_hw *hw,
+ struct idpf_ctlq_info *cq);
+
+/* Sends messages to HW and will also free the buffer*/
+int idpf_ctlq_send(struct idpf_hw *hw,
+ struct idpf_ctlq_info *cq,
+ u16 num_q_msg,
+ struct idpf_ctlq_msg q_msg[]);
+
+/* Receives messages and called by interrupt handler/polling
+ * initiated by app/process. Also caller is supposed to free the buffers
+ */
+int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
+ struct idpf_ctlq_msg *q_msg);
+
+/* Reclaims send descriptors on HW write back */
+int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
+ struct idpf_ctlq_msg *msg_status[]);
+
+/* Indicate RX buffers are done being processed */
+int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw,
+ struct idpf_ctlq_info *cq,
+ u16 *buff_count,
+ struct idpf_dma_mem **buffs);
+
+/* Will destroy all q including the default mb */
+void idpf_ctlq_deinit(struct idpf_hw *hw);
+
+#endif /* _IDPF_CONTROLQ_API_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c b/drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c
new file mode 100644
index 000000000000..a942a6385d06
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_setup.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2023 Intel Corporation */
+
+#include "idpf_controlq.h"
+
+/**
+ * idpf_ctlq_alloc_desc_ring - Allocate Control Queue (CQ) rings
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ */
+static int idpf_ctlq_alloc_desc_ring(struct idpf_hw *hw,
+ struct idpf_ctlq_info *cq)
+{
+ size_t size = cq->ring_size * sizeof(struct idpf_ctlq_desc);
+
+ cq->desc_ring.va = idpf_alloc_dma_mem(hw, &cq->desc_ring, size);
+ if (!cq->desc_ring.va)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * idpf_ctlq_alloc_bufs - Allocate Control Queue (CQ) buffers
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ *
+ * Allocate the buffer head for all control queues, and if it's a receive
+ * queue, allocate DMA buffers
+ */
+static int idpf_ctlq_alloc_bufs(struct idpf_hw *hw,
+ struct idpf_ctlq_info *cq)
+{
+ int i;
+
+ /* Do not allocate DMA buffers for transmit queues */
+ if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX)
+ return 0;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+ cq->bi.rx_buff = kcalloc(cq->ring_size, sizeof(struct idpf_dma_mem *),
+ GFP_KERNEL);
+ if (!cq->bi.rx_buff)
+ return -ENOMEM;
+
+ /* allocate the mapped buffers (except for the last one) */
+ for (i = 0; i < cq->ring_size - 1; i++) {
+ struct idpf_dma_mem *bi;
+ int num = 1; /* number of idpf_dma_mem to be allocated */
+
+ cq->bi.rx_buff[i] = kcalloc(num, sizeof(struct idpf_dma_mem),
+ GFP_KERNEL);
+ if (!cq->bi.rx_buff[i])
+ goto unwind_alloc_cq_bufs;
+
+ bi = cq->bi.rx_buff[i];
+
+ bi->va = idpf_alloc_dma_mem(hw, bi, cq->buf_size);
+ if (!bi->va) {
+ /* unwind will not free the failed entry */
+ kfree(cq->bi.rx_buff[i]);
+ goto unwind_alloc_cq_bufs;
+ }
+ }
+
+ return 0;
+
+unwind_alloc_cq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--) {
+ idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
+ kfree(cq->bi.rx_buff[i]);
+ }
+ kfree(cq->bi.rx_buff);
+
+ return -ENOMEM;
+}
+
+/**
+ * idpf_ctlq_free_desc_ring - Free Control Queue (CQ) rings
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ */
+static void idpf_ctlq_free_desc_ring(struct idpf_hw *hw,
+ struct idpf_ctlq_info *cq)
+{
+ idpf_free_dma_mem(hw, &cq->desc_ring);
+}
+
+/**
+ * idpf_ctlq_free_bufs - Free CQ buffer info elements
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ *
+ * Free the DMA buffers for RX queues, and DMA buffer header for both RX and TX
+ * queues. The upper layers are expected to manage freeing of TX DMA buffers
+ */
+static void idpf_ctlq_free_bufs(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+ void *bi;
+
+ if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX) {
+ int i;
+
+ /* free DMA buffers for rx queues*/
+ for (i = 0; i < cq->ring_size; i++) {
+ if (cq->bi.rx_buff[i]) {
+ idpf_free_dma_mem(hw, cq->bi.rx_buff[i]);
+ kfree(cq->bi.rx_buff[i]);
+ }
+ }
+
+ bi = (void *)cq->bi.rx_buff;
+ } else {
+ bi = (void *)cq->bi.tx_msg;
+ }
+
+ /* free the buffer header */
+ kfree(bi);
+}
+
+/**
+ * idpf_ctlq_dealloc_ring_res - Free memory allocated for control queue
+ * @hw: pointer to hw struct
+ * @cq: pointer to the specific Control queue
+ *
+ * Free the memory used by the ring, buffers and other related structures
+ */
+void idpf_ctlq_dealloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+ /* free ring buffers and the ring itself */
+ idpf_ctlq_free_bufs(hw, cq);
+ idpf_ctlq_free_desc_ring(hw, cq);
+}
+
+/**
+ * idpf_ctlq_alloc_ring_res - allocate memory for descriptor ring and bufs
+ * @hw: pointer to hw struct
+ * @cq: pointer to control queue struct
+ *
+ * Do *NOT* hold cq_lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ */
+int idpf_ctlq_alloc_ring_res(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
+{
+ int err;
+
+ /* allocate the ring memory */
+ err = idpf_ctlq_alloc_desc_ring(hw, cq);
+ if (err)
+ return err;
+
+ /* allocate buffers in the rings */
+ err = idpf_ctlq_alloc_bufs(hw, cq);
+ if (err)
+ goto idpf_init_cq_free_ring;
+
+ /* success! */
+ return 0;
+
+idpf_init_cq_free_ring:
+ idpf_free_dma_mem(hw, &cq->desc_ring);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
new file mode 100644
index 000000000000..34ad1ac46b78
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2023 Intel Corporation */
+
+#include "idpf.h"
+#include "idpf_lan_pf_regs.h"
+
+#define IDPF_PF_ITR_IDX_SPACING 0x4
+
+/**
+ * idpf_ctlq_reg_init - initialize default mailbox registers
+ * @cq: pointer to the array of create control queues
+ */
+static void idpf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
+{
+ int i;
+
+ for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) {
+ struct idpf_ctlq_create_info *ccq = cq + i;
+
+ switch (ccq->type) {
+ case IDPF_CTLQ_TYPE_MAILBOX_TX:
+ /* set head and tail registers in our local struct */
+ ccq->reg.head = PF_FW_ATQH;
+ ccq->reg.tail = PF_FW_ATQT;
+ ccq->reg.len = PF_FW_ATQLEN;
+ ccq->reg.bah = PF_FW_ATQBAH;
+ ccq->reg.bal = PF_FW_ATQBAL;
+ ccq->reg.len_mask = PF_FW_ATQLEN_ATQLEN_M;
+ ccq->reg.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M;
+ ccq->reg.head_mask = PF_FW_ATQH_ATQH_M;
+ break;
+ case IDPF_CTLQ_TYPE_MAILBOX_RX:
+ /* set head and tail registers in our local struct */
+ ccq->reg.head = PF_FW_ARQH;
+ ccq->reg.tail = PF_FW_ARQT;
+ ccq->reg.len = PF_FW_ARQLEN;
+ ccq->reg.bah = PF_FW_ARQBAH;
+ ccq->reg.bal = PF_FW_ARQBAL;
+ ccq->reg.len_mask = PF_FW_ARQLEN_ARQLEN_M;
+ ccq->reg.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M;
+ ccq->reg.head_mask = PF_FW_ARQH_ARQH_M;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/**
+ * idpf_mb_intr_reg_init - Initialize mailbox interrupt register
+ * @adapter: adapter structure
+ */
+static void idpf_mb_intr_reg_init(struct idpf_adapter *adapter)
+{
+ struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg;
+ u32 dyn_ctl = le32_to_cpu(adapter->caps.mailbox_dyn_ctl);
+
+ intr->dyn_ctl = idpf_get_reg_addr(adapter, dyn_ctl);
+ intr->dyn_ctl_intena_m = PF_GLINT_DYN_CTL_INTENA_M;
+ intr->dyn_ctl_itridx_m = PF_GLINT_DYN_CTL_ITR_INDX_M;
+ intr->icr_ena = idpf_get_reg_addr(adapter, PF_INT_DIR_OICR_ENA);
+ intr->icr_ena_ctlq_m = PF_INT_DIR_OICR_ENA_M;
+}
+
+/**
+ * idpf_intr_reg_init - Initialize interrupt registers
+ * @vport: virtual port structure
+ */
+static int idpf_intr_reg_init(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ int num_vecs = vport->num_q_vectors;
+ struct idpf_vec_regs *reg_vals;
+ int num_regs, i, err = 0;
+ u32 rx_itr, tx_itr;
+ u16 total_vecs;
+
+ total_vecs = idpf_get_reserved_vecs(vport->adapter);
+ reg_vals = kcalloc(total_vecs, sizeof(struct idpf_vec_regs),
+ GFP_KERNEL);
+ if (!reg_vals)
+ return -ENOMEM;
+
+ num_regs = idpf_get_reg_intr_vecs(vport, reg_vals);
+ if (num_regs < num_vecs) {
+ err = -EINVAL;
+ goto free_reg_vals;
+ }
+
+ for (i = 0; i < num_vecs; i++) {
+ struct idpf_q_vector *q_vector = &vport->q_vectors[i];
+ u16 vec_id = vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC;
+ struct idpf_intr_reg *intr = &q_vector->intr_reg;
+ u32 spacing;
+
+ intr->dyn_ctl = idpf_get_reg_addr(adapter,
+ reg_vals[vec_id].dyn_ctl_reg);
+ intr->dyn_ctl_intena_m = PF_GLINT_DYN_CTL_INTENA_M;
+ intr->dyn_ctl_itridx_s = PF_GLINT_DYN_CTL_ITR_INDX_S;
+ intr->dyn_ctl_intrvl_s = PF_GLINT_DYN_CTL_INTERVAL_S;
+
+ spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
+ IDPF_PF_ITR_IDX_SPACING);
+ rx_itr = PF_GLINT_ITR_ADDR(VIRTCHNL2_ITR_IDX_0,
+ reg_vals[vec_id].itrn_reg,
+ spacing);
+ tx_itr = PF_GLINT_ITR_ADDR(VIRTCHNL2_ITR_IDX_1,
+ reg_vals[vec_id].itrn_reg,
+ spacing);
+ intr->rx_itr = idpf_get_reg_addr(adapter, rx_itr);
+ intr->tx_itr = idpf_get_reg_addr(adapter, tx_itr);
+ }
+
+free_reg_vals:
+ kfree(reg_vals);
+
+ return err;
+}
+
+/**
+ * idpf_reset_reg_init - Initialize reset registers
+ * @adapter: Driver specific private structure
+ */
+static void idpf_reset_reg_init(struct idpf_adapter *adapter)
+{
+ adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, PFGEN_RSTAT);
+ adapter->reset_reg.rstat_m = PFGEN_RSTAT_PFR_STATE_M;
+}
+
+/**
+ * idpf_trigger_reset - trigger reset
+ * @adapter: Driver specific private structure
+ * @trig_cause: Reason to trigger a reset
+ */
+static void idpf_trigger_reset(struct idpf_adapter *adapter,
+ enum idpf_flags __always_unused trig_cause)
+{
+ u32 reset_reg;
+
+ reset_reg = readl(idpf_get_reg_addr(adapter, PFGEN_CTRL));
+ writel(reset_reg | PFGEN_CTRL_PFSWR,
+ idpf_get_reg_addr(adapter, PFGEN_CTRL));
+}
+
+/**
+ * idpf_reg_ops_init - Initialize register API function pointers
+ * @adapter: Driver specific private structure
+ */
+static void idpf_reg_ops_init(struct idpf_adapter *adapter)
+{
+ adapter->dev_ops.reg_ops.ctlq_reg_init = idpf_ctlq_reg_init;
+ adapter->dev_ops.reg_ops.intr_reg_init = idpf_intr_reg_init;
+ adapter->dev_ops.reg_ops.mb_intr_reg_init = idpf_mb_intr_reg_init;
+ adapter->dev_ops.reg_ops.reset_reg_init = idpf_reset_reg_init;
+ adapter->dev_ops.reg_ops.trigger_reset = idpf_trigger_reset;
+}
+
+/**
+ * idpf_dev_ops_init - Initialize device API function pointers
+ * @adapter: Driver specific private structure
+ */
+void idpf_dev_ops_init(struct idpf_adapter *adapter)
+{
+ idpf_reg_ops_init(adapter);
+}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_devids.h b/drivers/net/ethernet/intel/idpf/idpf_devids.h
new file mode 100644
index 000000000000..5154a52ae61c
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_devids.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef _IDPF_DEVIDS_H_
+#define _IDPF_DEVIDS_H_
+
+#define IDPF_DEV_ID_PF 0x1452
+#define IDPF_DEV_ID_VF 0x145C
+
+#endif /* _IDPF_DEVIDS_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
new file mode 100644
index 000000000000..52ea38669f85
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -0,0 +1,1369 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2023 Intel Corporation */
+
+#include "idpf.h"
+
+/**
+ * idpf_get_rxnfc - command to get RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ * @rule_locs: pointer to store rule locations
+ *
+ * Returns Success if the command is supported.
+ */
+static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ u32 __always_unused *rule_locs)
+{
+ struct idpf_vport *vport;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = vport->num_rxq;
+ idpf_vport_ctrl_unlock(netdev);
+
+ return 0;
+ default:
+ break;
+ }
+
+ idpf_vport_ctrl_unlock(netdev);
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * idpf_get_rxfh_key_size - get the RSS hash key size
+ * @netdev: network interface device structure
+ *
+ * Returns the key size on success, error value on failure.
+ */
+static u32 idpf_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+
+ if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
+ return -EOPNOTSUPP;
+
+ user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
+
+ return user_config->rss_data.rss_key_size;
+}
+
+/**
+ * idpf_get_rxfh_indir_size - get the rx flow hash indirection table size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size on success, error value on failure.
+ */
+static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *user_config;
+
+ if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
+ return -EOPNOTSUPP;
+
+ user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
+
+ return user_config->rss_data.rss_lut_size;
+}
+
+/**
+ * idpf_get_rxfh - get the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key
+ * @hfunc: hash function in use
+ *
+ * Reads the indirection table directly from the hardware. Always returns 0.
+ */
+static int idpf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_rss_data *rss_data;
+ struct idpf_adapter *adapter;
+ int err = 0;
+ u16 i;
+
+ idpf_vport_ctrl_lock(netdev);
+
+ adapter = np->adapter;
+
+ if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) {
+ err = -EOPNOTSUPP;
+ goto unlock_mutex;
+ }
+
+ rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data;
+ if (np->state != __IDPF_VPORT_UP)
+ goto unlock_mutex;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (key)
+ memcpy(key, rss_data->rss_key, rss_data->rss_key_size);
+
+ if (indir) {
+ for (i = 0; i < rss_data->rss_lut_size; i++)
+ indir[i] = rss_data->rss_lut[i];
+ }
+
+unlock_mutex:
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+/**
+ * idpf_set_rxfh - set the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key
+ * @hfunc: hash function to use
+ *
+ * Returns -EINVAL if the table specifies an invalid queue id, otherwise
+ * returns 0 after programming the table.
+ */
+static int idpf_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_rss_data *rss_data;
+ struct idpf_adapter *adapter;
+ struct idpf_vport *vport;
+ int err = 0;
+ u16 lut;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ adapter = vport->adapter;
+
+ if (!idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) {
+ err = -EOPNOTSUPP;
+ goto unlock_mutex;
+ }
+
+ rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+ if (np->state != __IDPF_VPORT_UP)
+ goto unlock_mutex;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
+ err = -EOPNOTSUPP;
+ goto unlock_mutex;
+ }
+
+ if (key)
+ memcpy(rss_data->rss_key, key, rss_data->rss_key_size);
+
+ if (indir) {
+ for (lut = 0; lut < rss_data->rss_lut_size; lut++)
+ rss_data->rss_lut[lut] = indir[lut];
+ }
+
+ err = idpf_config_rss(vport);
+
+unlock_mutex:
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+/**
+ * idpf_get_channels: get the number of channels supported by the device
+ * @netdev: network interface device structure
+ * @ch: channel information structure
+ *
+ * Report maximum of TX and RX. Report one extra channel to match our MailBox
+ * Queue.
+ */
+static void idpf_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_config *vport_config;
+ u16 num_txq, num_rxq;
+ u16 combined;
+
+ vport_config = np->adapter->vport_config[np->vport_idx];
+
+ num_txq = vport_config->user_config.num_req_tx_qs;
+ num_rxq = vport_config->user_config.num_req_rx_qs;
+
+ combined = min(num_txq, num_rxq);
+
+ /* Report maximum channels */
+ ch->max_combined = min_t(u16, vport_config->max_q.max_txq,
+ vport_config->max_q.max_rxq);
+ ch->max_rx = vport_config->max_q.max_rxq;
+ ch->max_tx = vport_config->max_q.max_txq;
+
+ ch->max_other = IDPF_MAX_MBXQ;
+ ch->other_count = IDPF_MAX_MBXQ;
+
+ ch->combined_count = combined;
+ ch->rx_count = num_rxq - combined;
+ ch->tx_count = num_txq - combined;
+}
+
+/**
+ * idpf_set_channels: set the new channel count
+ * @netdev: network interface device structure
+ * @ch: channel information structure
+ *
+ * Negotiate a new number of channels with CP. Returns 0 on success, negative
+ * on failure.
+ */
+static int idpf_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct idpf_vport_config *vport_config;
+ u16 combined, num_txq, num_rxq;
+ unsigned int num_req_tx_q;
+ unsigned int num_req_rx_q;
+ struct idpf_vport *vport;
+ struct device *dev;
+ int err = 0;
+ u16 idx;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ idx = vport->idx;
+ vport_config = vport->adapter->vport_config[idx];
+
+ num_txq = vport_config->user_config.num_req_tx_qs;
+ num_rxq = vport_config->user_config.num_req_rx_qs;
+
+ combined = min(num_txq, num_rxq);
+
+ /* these checks are for cases where user didn't specify a particular
+ * value on cmd line but we get non-zero value anyway via
+ * get_channels(); look at ethtool.c in ethtool repository (the user
+ * space part), particularly, do_schannels() routine
+ */
+ if (ch->combined_count == combined)
+ ch->combined_count = 0;
+ if (ch->combined_count && ch->rx_count == num_rxq - combined)
+ ch->rx_count = 0;
+ if (ch->combined_count && ch->tx_count == num_txq - combined)
+ ch->tx_count = 0;
+
+ num_req_tx_q = ch->combined_count + ch->tx_count;
+ num_req_rx_q = ch->combined_count + ch->rx_count;
+
+ dev = &vport->adapter->pdev->dev;
+ /* It's possible to specify number of queues that exceeds max.
+ * Stack checks max combined_count and max [tx|rx]_count but not the
+ * max combined_count + [tx|rx]_count. These checks should catch that.
+ */
+ if (num_req_tx_q > vport_config->max_q.max_txq) {
+ dev_info(dev, "Maximum TX queues is %d\n",
+ vport_config->max_q.max_txq);
+ err = -EINVAL;
+ goto unlock_mutex;
+ }
+ if (num_req_rx_q > vport_config->max_q.max_rxq) {
+ dev_info(dev, "Maximum RX queues is %d\n",
+ vport_config->max_q.max_rxq);
+ err = -EINVAL;
+ goto unlock_mutex;
+ }
+
+ if (num_req_tx_q == num_txq && num_req_rx_q == num_rxq)
+ goto unlock_mutex;
+
+ vport_config->user_config.num_req_tx_qs = num_req_tx_q;
+ vport_config->user_config.num_req_rx_qs = num_req_rx_q;
+
+ err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE);
+ if (err) {
+ /* roll back queue change */
+ vport_config->user_config.num_req_tx_qs = num_txq;
+ vport_config->user_config.num_req_rx_qs = num_rxq;
+ }
+
+unlock_mutex:
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+/**
+ * idpf_get_ringparam - Get ring parameters
+ * @netdev: network interface device structure
+ * @ring: ethtool ringparam structure
+ * @kring: unused
+ * @ext_ack: unused
+ *
+ * Returns current ring parameters. TX and RX rings are reported separately,
+ * but the number of rings is not reported.
+ */
+static void idpf_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kring,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct idpf_vport *vport;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ ring->rx_max_pending = IDPF_MAX_RXQ_DESC;
+ ring->tx_max_pending = IDPF_MAX_TXQ_DESC;
+ ring->rx_pending = vport->rxq_desc_count;
+ ring->tx_pending = vport->txq_desc_count;
+
+ idpf_vport_ctrl_unlock(netdev);
+}
+
+/**
+ * idpf_set_ringparam - Set ring parameters
+ * @netdev: network interface device structure
+ * @ring: ethtool ringparam structure
+ * @kring: unused
+ * @ext_ack: unused
+ *
+ * Sets ring parameters. TX and RX rings are controlled separately, but the
+ * number of rings is not specified, so all rings get the same settings.
+ */
+static int idpf_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kring,
+ struct netlink_ext_ack *ext_ack)
+{
+ struct idpf_vport_user_config_data *config_data;
+ u32 new_rx_count, new_tx_count;
+ struct idpf_vport *vport;
+ int i, err = 0;
+ u16 idx;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ idx = vport->idx;
+
+ if (ring->tx_pending < IDPF_MIN_TXQ_DESC) {
+ netdev_err(netdev, "Descriptors requested (Tx: %u) is less than min supported (%u)\n",
+ ring->tx_pending,
+ IDPF_MIN_TXQ_DESC);
+ err = -EINVAL;
+ goto unlock_mutex;
+ }
+
+ if (ring->rx_pending < IDPF_MIN_RXQ_DESC) {
+ netdev_err(netdev, "Descriptors requested (Rx: %u) is less than min supported (%u)\n",
+ ring->rx_pending,
+ IDPF_MIN_RXQ_DESC);
+ err = -EINVAL;
+ goto unlock_mutex;
+ }
+
+ new_rx_count = ALIGN(ring->rx_pending, IDPF_REQ_RXQ_DESC_MULTIPLE);
+ if (new_rx_count != ring->rx_pending)
+ netdev_info(netdev, "Requested Rx descriptor count rounded up to %u\n",
+ new_rx_count);
+
+ new_tx_count = ALIGN(ring->tx_pending, IDPF_REQ_DESC_MULTIPLE);
+ if (new_tx_count != ring->tx_pending)
+ netdev_info(netdev, "Requested Tx descriptor count rounded up to %u\n",
+ new_tx_count);
+
+ if (new_tx_count == vport->txq_desc_count &&
+ new_rx_count == vport->rxq_desc_count)
+ goto unlock_mutex;
+
+ config_data = &vport->adapter->vport_config[idx]->user_config;
+ config_data->num_req_txq_desc = new_tx_count;
+ config_data->num_req_rxq_desc = new_rx_count;
+
+ /* Since we adjusted the RX completion queue count, the RX buffer queue
+ * descriptor count needs to be adjusted as well
+ */
+ for (i = 0; i < vport->num_bufqs_per_qgrp; i++)
+ vport->bufq_desc_count[i] =
+ IDPF_RX_BUFQ_DESC_COUNT(new_rx_count,
+ vport->num_bufqs_per_qgrp);
+
+ err = idpf_initiate_soft_reset(vport, IDPF_SR_Q_DESC_CHANGE);
+
+unlock_mutex:
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+/**
+ * struct idpf_stats - definition for an ethtool statistic
+ * @stat_string: statistic name to display in ethtool -S output
+ * @sizeof_stat: the sizeof() the stat, must be no greater than sizeof(u64)
+ * @stat_offset: offsetof() the stat from a base pointer
+ *
+ * This structure defines a statistic to be added to the ethtool stats buffer.
+ * It defines a statistic as offset from a common base pointer. Stats should
+ * be defined in constant arrays using the IDPF_STAT macro, with every element
+ * of the array using the same _type for calculating the sizeof_stat and
+ * stat_offset.
+ *
+ * The @sizeof_stat is expected to be sizeof(u8), sizeof(u16), sizeof(u32) or
+ * sizeof(u64). Other sizes are not expected and will produce a WARN_ONCE from
+ * the idpf_add_ethtool_stat() helper function.
+ *
+ * The @stat_string is interpreted as a format string, allowing formatted
+ * values to be inserted while looping over multiple structures for a given
+ * statistics array. Thus, every statistic string in an array should have the
+ * same type and number of format specifiers, to be formatted by variadic
+ * arguments to the idpf_add_stat_string() helper function.
+ */
+struct idpf_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+/* Helper macro to define an idpf_stat structure with proper size and type.
+ * Use this when defining constant statistics arrays. Note that @_type expects
+ * only a type name and is used multiple times.
+ */
+#define IDPF_STAT(_type, _name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = sizeof_field(_type, _stat), \
+ .stat_offset = offsetof(_type, _stat) \
+}
+
+/* Helper macro for defining some statistics related to queues */
+#define IDPF_QUEUE_STAT(_name, _stat) \
+ IDPF_STAT(struct idpf_queue, _name, _stat)
+
+/* Stats associated with a Tx queue */
+static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = {
+ IDPF_QUEUE_STAT("pkts", q_stats.tx.packets),
+ IDPF_QUEUE_STAT("bytes", q_stats.tx.bytes),
+ IDPF_QUEUE_STAT("lso_pkts", q_stats.tx.lso_pkts),
+};
+
+/* Stats associated with an Rx queue */
+static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = {
+ IDPF_QUEUE_STAT("pkts", q_stats.rx.packets),
+ IDPF_QUEUE_STAT("bytes", q_stats.rx.bytes),
+ IDPF_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rx.rsc_pkts),
+};
+
+#define IDPF_TX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_tx_queue_stats)
+#define IDPF_RX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_rx_queue_stats)
+
+#define IDPF_PORT_STAT(_name, _stat) \
+ IDPF_STAT(struct idpf_vport, _name, _stat)
+
+static const struct idpf_stats idpf_gstrings_port_stats[] = {
+ IDPF_PORT_STAT("rx-csum_errors", port_stats.rx_hw_csum_err),
+ IDPF_PORT_STAT("rx-hsplit", port_stats.rx_hsplit),
+ IDPF_PORT_STAT("rx-hsplit_hbo", port_stats.rx_hsplit_hbo),
+ IDPF_PORT_STAT("rx-bad_descs", port_stats.rx_bad_descs),
+ IDPF_PORT_STAT("tx-skb_drops", port_stats.tx_drops),
+ IDPF_PORT_STAT("tx-dma_map_errs", port_stats.tx_dma_map_errs),
+ IDPF_PORT_STAT("tx-linearized_pkts", port_stats.tx_linearize),
+ IDPF_PORT_STAT("tx-busy_events", port_stats.tx_busy),
+ IDPF_PORT_STAT("rx-unicast_pkts", port_stats.vport_stats.rx_unicast),
+ IDPF_PORT_STAT("rx-multicast_pkts", port_stats.vport_stats.rx_multicast),
+ IDPF_PORT_STAT("rx-broadcast_pkts", port_stats.vport_stats.rx_broadcast),
+ IDPF_PORT_STAT("rx-unknown_protocol", port_stats.vport_stats.rx_unknown_protocol),
+ IDPF_PORT_STAT("tx-unicast_pkts", port_stats.vport_stats.tx_unicast),
+ IDPF_PORT_STAT("tx-multicast_pkts", port_stats.vport_stats.tx_multicast),
+ IDPF_PORT_STAT("tx-broadcast_pkts", port_stats.vport_stats.tx_broadcast),
+};
+
+#define IDPF_PORT_STATS_LEN ARRAY_SIZE(idpf_gstrings_port_stats)
+
+/**
+ * __idpf_add_qstat_strings - copy stat strings into ethtool buffer
+ * @p: ethtool supplied buffer
+ * @stats: stat definitions array
+ * @size: size of the stats array
+ * @type: stat type
+ * @idx: stat index
+ *
+ * Format and copy the strings described by stats into the buffer pointed at
+ * by p.
+ */
+static void __idpf_add_qstat_strings(u8 **p, const struct idpf_stats *stats,
+ const unsigned int size, const char *type,
+ unsigned int idx)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ ethtool_sprintf(p, "%s_q-%u_%s",
+ type, idx, stats[i].stat_string);
+}
+
+/**
+ * idpf_add_qstat_strings - Copy queue stat strings into ethtool buffer
+ * @p: ethtool supplied buffer
+ * @stats: stat definitions array
+ * @type: stat type
+ * @idx: stat idx
+ *
+ * Format and copy the strings described by the const static stats value into
+ * the buffer pointed at by p.
+ *
+ * The parameter @stats is evaluated twice, so parameters with side effects
+ * should be avoided. Additionally, stats must be an array such that
+ * ARRAY_SIZE can be called on it.
+ */
+#define idpf_add_qstat_strings(p, stats, type, idx) \
+ __idpf_add_qstat_strings(p, stats, ARRAY_SIZE(stats), type, idx)
+
+/**
+ * idpf_add_stat_strings - Copy port stat strings into ethtool buffer
+ * @p: ethtool buffer
+ * @stats: struct to copy from
+ * @size: size of stats array to copy from
+ */
+static void idpf_add_stat_strings(u8 **p, const struct idpf_stats *stats,
+ const unsigned int size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++)
+ ethtool_sprintf(p, "%s", stats[i].stat_string);
+}
+
+/**
+ * idpf_get_stat_strings - Get stat strings
+ * @netdev: network interface device structure
+ * @data: buffer for string data
+ *
+ * Builds the statistics string table
+ */
+static void idpf_get_stat_strings(struct net_device *netdev, u8 *data)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_config *vport_config;
+ unsigned int i;
+
+ idpf_add_stat_strings(&data, idpf_gstrings_port_stats,
+ IDPF_PORT_STATS_LEN);
+
+ vport_config = np->adapter->vport_config[np->vport_idx];
+ /* It's critical that we always report a constant number of strings and
+ * that the strings are reported in the same order regardless of how
+ * many queues are actually in use.
+ */
+ for (i = 0; i < vport_config->max_q.max_txq; i++)
+ idpf_add_qstat_strings(&data, idpf_gstrings_tx_queue_stats,
+ "tx", i);
+
+ for (i = 0; i < vport_config->max_q.max_rxq; i++)
+ idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats,
+ "rx", i);
+
+ page_pool_ethtool_stats_get_strings(data);
+}
+
+/**
+ * idpf_get_strings - Get string set
+ * @netdev: network interface device structure
+ * @sset: id of string set
+ * @data: buffer for string data
+ *
+ * Builds string tables for various string sets
+ */
+static void idpf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ idpf_get_stat_strings(netdev, data);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * idpf_get_sset_count - Get length of string set
+ * @netdev: network interface device structure
+ * @sset: id of string set
+ *
+ * Reports size of various string tables.
+ */
+static int idpf_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_config *vport_config;
+ u16 max_txq, max_rxq;
+ unsigned int size;
+
+ if (sset != ETH_SS_STATS)
+ return -EINVAL;
+
+ vport_config = np->adapter->vport_config[np->vport_idx];
+ /* This size reported back here *must* be constant throughout the
+ * lifecycle of the netdevice, i.e. we must report the maximum length
+ * even for queues that don't technically exist. This is due to the
+ * fact that this userspace API uses three separate ioctl calls to get
+ * stats data but has no way to communicate back to userspace when that
+ * size has changed, which can typically happen as a result of changing
+ * number of queues. If the number/order of stats change in the middle
+ * of this call chain it will lead to userspace crashing/accessing bad
+ * data through buffer under/overflow.
+ */
+ max_txq = vport_config->max_q.max_txq;
+ max_rxq = vport_config->max_q.max_rxq;
+
+ size = IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) +
+ (IDPF_RX_QUEUE_STATS_LEN * max_rxq);
+ size += page_pool_ethtool_stats_get_count();
+
+ return size;
+}
+
+/**
+ * idpf_add_one_ethtool_stat - copy the stat into the supplied buffer
+ * @data: location to store the stat value
+ * @pstat: old stat pointer to copy from
+ * @stat: the stat definition
+ *
+ * Copies the stat data defined by the pointer and stat structure pair into
+ * the memory supplied as data. If the pointer is null, data will be zero'd.
+ */
+static void idpf_add_one_ethtool_stat(u64 *data, void *pstat,
+ const struct idpf_stats *stat)
+{
+ char *p;
+
+ if (!pstat) {
+ /* Ensure that the ethtool data buffer is zero'd for any stats
+ * which don't have a valid pointer.
+ */
+ *data = 0;
+ return;
+ }
+
+ p = (char *)pstat + stat->stat_offset;
+ switch (stat->sizeof_stat) {
+ case sizeof(u64):
+ *data = *((u64 *)p);
+ break;
+ case sizeof(u32):
+ *data = *((u32 *)p);
+ break;
+ case sizeof(u16):
+ *data = *((u16 *)p);
+ break;
+ case sizeof(u8):
+ *data = *((u8 *)p);
+ break;
+ default:
+ WARN_ONCE(1, "unexpected stat size for %s",
+ stat->stat_string);
+ *data = 0;
+ }
+}
+
+/**
+ * idpf_add_queue_stats - copy queue statistics into supplied buffer
+ * @data: ethtool stats buffer
+ * @q: the queue to copy
+ *
+ * Queue statistics must be copied while protected by u64_stats_fetch_begin,
+ * so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats
+ * are defined in idpf_gstrings_queue_stats. If the queue pointer is null,
+ * zero out the queue stat values and update the data pointer. Otherwise
+ * safely copy the stats from the queue into the supplied buffer and update
+ * the data pointer when finished.
+ *
+ * This function expects to be called while under rcu_read_lock().
+ */
+static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q)
+{
+ const struct idpf_stats *stats;
+ unsigned int start;
+ unsigned int size;
+ unsigned int i;
+
+ if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) {
+ size = IDPF_RX_QUEUE_STATS_LEN;
+ stats = idpf_gstrings_rx_queue_stats;
+ } else {
+ size = IDPF_TX_QUEUE_STATS_LEN;
+ stats = idpf_gstrings_tx_queue_stats;
+ }
+
+ /* To avoid invalid statistics values, ensure that we keep retrying
+ * the copy until we get a consistent value according to
+ * u64_stats_fetch_retry.
+ */
+ do {
+ start = u64_stats_fetch_begin(&q->stats_sync);
+ for (i = 0; i < size; i++)
+ idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]);
+ } while (u64_stats_fetch_retry(&q->stats_sync, start));
+
+ /* Once we successfully copy the stats in, update the data pointer */
+ *data += size;
+}
+
+/**
+ * idpf_add_empty_queue_stats - Add stats for a non-existent queue
+ * @data: pointer to data buffer
+ * @qtype: type of data queue
+ *
+ * We must report a constant length of stats back to userspace regardless of
+ * how many queues are actually in use because stats collection happens over
+ * three separate ioctls and there's no way to notify userspace the size
+ * changed between those calls. This adds empty to data to the stats since we
+ * don't have a real queue to refer to for this stats slot.
+ */
+static void idpf_add_empty_queue_stats(u64 **data, u16 qtype)
+{
+ unsigned int i;
+ int stats_len;
+
+ if (qtype == VIRTCHNL2_QUEUE_TYPE_RX)
+ stats_len = IDPF_RX_QUEUE_STATS_LEN;
+ else
+ stats_len = IDPF_TX_QUEUE_STATS_LEN;
+
+ for (i = 0; i < stats_len; i++)
+ (*data)[i] = 0;
+ *data += stats_len;
+}
+
+/**
+ * idpf_add_port_stats - Copy port stats into ethtool buffer
+ * @vport: virtual port struct
+ * @data: ethtool buffer to copy into
+ */
+static void idpf_add_port_stats(struct idpf_vport *vport, u64 **data)
+{
+ unsigned int size = IDPF_PORT_STATS_LEN;
+ unsigned int start;
+ unsigned int i;
+
+ do {
+ start = u64_stats_fetch_begin(&vport->port_stats.stats_sync);
+ for (i = 0; i < size; i++)
+ idpf_add_one_ethtool_stat(&(*data)[i], vport,
+ &idpf_gstrings_port_stats[i]);
+ } while (u64_stats_fetch_retry(&vport->port_stats.stats_sync, start));
+
+ *data += size;
+}
+
+/**
+ * idpf_collect_queue_stats - accumulate various per queue stats
+ * into port level stats
+ * @vport: pointer to vport struct
+ **/
+static void idpf_collect_queue_stats(struct idpf_vport *vport)
+{
+ struct idpf_port_stats *pstats = &vport->port_stats;
+ int i, j;
+
+ /* zero out port stats since they're actually tracked in per
+ * queue stats; this is only for reporting
+ */
+ u64_stats_update_begin(&pstats->stats_sync);
+ u64_stats_set(&pstats->rx_hw_csum_err, 0);
+ u64_stats_set(&pstats->rx_hsplit, 0);
+ u64_stats_set(&pstats->rx_hsplit_hbo, 0);
+ u64_stats_set(&pstats->rx_bad_descs, 0);
+ u64_stats_set(&pstats->tx_linearize, 0);
+ u64_stats_set(&pstats->tx_busy, 0);
+ u64_stats_set(&pstats->tx_drops, 0);
+ u64_stats_set(&pstats->tx_dma_map_errs, 0);
+ u64_stats_update_end(&pstats->stats_sync);
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
+ u16 num_rxq;
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ num_rxq = rxq_grp->splitq.num_rxq_sets;
+ else
+ num_rxq = rxq_grp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq; j++) {
+ u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs;
+ struct idpf_rx_queue_stats *stats;
+ struct idpf_queue *rxq;
+ unsigned int start;
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
+ else
+ rxq = rxq_grp->singleq.rxqs[j];
+
+ if (!rxq)
+ continue;
+
+ do {
+ start = u64_stats_fetch_begin(&rxq->stats_sync);
+
+ stats = &rxq->q_stats.rx;
+ hw_csum_err = u64_stats_read(&stats->hw_csum_err);
+ hsplit = u64_stats_read(&stats->hsplit_pkts);
+ hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf);
+ bad_descs = u64_stats_read(&stats->bad_descs);
+ } while (u64_stats_fetch_retry(&rxq->stats_sync, start));
+
+ u64_stats_update_begin(&pstats->stats_sync);
+ u64_stats_add(&pstats->rx_hw_csum_err, hw_csum_err);
+ u64_stats_add(&pstats->rx_hsplit, hsplit);
+ u64_stats_add(&pstats->rx_hsplit_hbo, hsplit_hbo);
+ u64_stats_add(&pstats->rx_bad_descs, bad_descs);
+ u64_stats_update_end(&pstats->stats_sync);
+ }
+ }
+
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+
+ for (j = 0; j < txq_grp->num_txq; j++) {
+ u64 linearize, qbusy, skb_drops, dma_map_errs;
+ struct idpf_queue *txq = txq_grp->txqs[j];
+ struct idpf_tx_queue_stats *stats;
+ unsigned int start;
+
+ if (!txq)
+ continue;
+
+ do {
+ start = u64_stats_fetch_begin(&txq->stats_sync);
+
+ stats = &txq->q_stats.tx;
+ linearize = u64_stats_read(&stats->linearize);
+ qbusy = u64_stats_read(&stats->q_busy);
+ skb_drops = u64_stats_read(&stats->skb_drops);
+ dma_map_errs = u64_stats_read(&stats->dma_map_errs);
+ } while (u64_stats_fetch_retry(&txq->stats_sync, start));
+
+ u64_stats_update_begin(&pstats->stats_sync);
+ u64_stats_add(&pstats->tx_linearize, linearize);
+ u64_stats_add(&pstats->tx_busy, qbusy);
+ u64_stats_add(&pstats->tx_drops, skb_drops);
+ u64_stats_add(&pstats->tx_dma_map_errs, dma_map_errs);
+ u64_stats_update_end(&pstats->stats_sync);
+ }
+ }
+}
+
+/**
+ * idpf_get_ethtool_stats - report device statistics
+ * @netdev: network interface device structure
+ * @stats: ethtool statistics structure
+ * @data: pointer to data buffer
+ *
+ * All statistics are added to the data buffer as an array of u64.
+ */
+static void idpf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats __always_unused *stats,
+ u64 *data)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_config *vport_config;
+ struct page_pool_stats pp_stats = { };
+ struct idpf_vport *vport;
+ unsigned int total = 0;
+ unsigned int i, j;
+ bool is_splitq;
+ u16 qtype;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ if (np->state != __IDPF_VPORT_UP) {
+ idpf_vport_ctrl_unlock(netdev);
+
+ return;
+ }
+
+ rcu_read_lock();
+
+ idpf_collect_queue_stats(vport);
+ idpf_add_port_stats(vport, &data);
+
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+
+ qtype = VIRTCHNL2_QUEUE_TYPE_TX;
+
+ for (j = 0; j < txq_grp->num_txq; j++, total++) {
+ struct idpf_queue *txq = txq_grp->txqs[j];
+
+ if (!txq)
+ idpf_add_empty_queue_stats(&data, qtype);
+ else
+ idpf_add_queue_stats(&data, txq);
+ }
+ }
+
+ vport_config = vport->adapter->vport_config[vport->idx];
+ /* It is critical we provide a constant number of stats back to
+ * userspace regardless of how many queues are actually in use because
+ * there is no way to inform userspace the size has changed between
+ * ioctl calls. This will fill in any missing stats with zero.
+ */
+ for (; total < vport_config->max_q.max_txq; total++)
+ idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_TX);
+ total = 0;
+
+ is_splitq = idpf_is_queue_model_split(vport->rxq_model);
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rxq_grp = &vport->rxq_grps[i];
+ u16 num_rxq;
+
+ qtype = VIRTCHNL2_QUEUE_TYPE_RX;
+
+ if (is_splitq)
+ num_rxq = rxq_grp->splitq.num_rxq_sets;
+ else
+ num_rxq = rxq_grp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq; j++, total++) {
+ struct idpf_queue *rxq;
+
+ if (is_splitq)
+ rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
+ else
+ rxq = rxq_grp->singleq.rxqs[j];
+ if (!rxq)
+ idpf_add_empty_queue_stats(&data, qtype);
+ else
+ idpf_add_queue_stats(&data, rxq);
+
+ /* In splitq mode, don't get page pool stats here since
+ * the pools are attached to the buffer queues
+ */
+ if (is_splitq)
+ continue;
+
+ if (rxq)
+ page_pool_get_stats(rxq->pp, &pp_stats);
+ }
+ }
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ struct idpf_queue *rxbufq =
+ &vport->rxq_grps[i].splitq.bufq_sets[j].bufq;
+
+ page_pool_get_stats(rxbufq->pp, &pp_stats);
+ }
+ }
+
+ for (; total < vport_config->max_q.max_rxq; total++)
+ idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX);
+
+ page_pool_ethtool_stats_get(data, &pp_stats);
+
+ rcu_read_unlock();
+
+ idpf_vport_ctrl_unlock(netdev);
+}
+
+/**
+ * idpf_find_rxq - find rxq from q index
+ * @vport: virtual port associated to queue
+ * @q_num: q index used to find queue
+ *
+ * returns pointer to rx queue
+ */
+static struct idpf_queue *idpf_find_rxq(struct idpf_vport *vport, int q_num)
+{
+ int q_grp, q_idx;
+
+ if (!idpf_is_queue_model_split(vport->rxq_model))
+ return vport->rxq_grps->singleq.rxqs[q_num];
+
+ q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
+ q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
+
+ return &vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq;
+}
+
+/**
+ * idpf_find_txq - find txq from q index
+ * @vport: virtual port associated to queue
+ * @q_num: q index used to find queue
+ *
+ * returns pointer to tx queue
+ */
+static struct idpf_queue *idpf_find_txq(struct idpf_vport *vport, int q_num)
+{
+ int q_grp;
+
+ if (!idpf_is_queue_model_split(vport->txq_model))
+ return vport->txqs[q_num];
+
+ q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
+
+ return vport->txq_grps[q_grp].complq;
+}
+
+/**
+ * __idpf_get_q_coalesce - get ITR values for specific queue
+ * @ec: ethtool structure to fill with driver's coalesce settings
+ * @q: quuee of Rx or Tx
+ */
+static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec,
+ struct idpf_queue *q)
+{
+ if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) {
+ ec->use_adaptive_rx_coalesce =
+ IDPF_ITR_IS_DYNAMIC(q->q_vector->rx_intr_mode);
+ ec->rx_coalesce_usecs = q->q_vector->rx_itr_value;
+ } else {
+ ec->use_adaptive_tx_coalesce =
+ IDPF_ITR_IS_DYNAMIC(q->q_vector->tx_intr_mode);
+ ec->tx_coalesce_usecs = q->q_vector->tx_itr_value;
+ }
+}
+
+/**
+ * idpf_get_q_coalesce - get ITR values for specific queue
+ * @netdev: pointer to the netdev associated with this query
+ * @ec: coalesce settings to program the device with
+ * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
+ *
+ * Return 0 on success, and negative on failure
+ */
+static int idpf_get_q_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ u32 q_num)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport *vport;
+ int err = 0;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ if (np->state != __IDPF_VPORT_UP)
+ goto unlock_mutex;
+
+ if (q_num >= vport->num_rxq && q_num >= vport->num_txq) {
+ err = -EINVAL;
+ goto unlock_mutex;
+ }
+
+ if (q_num < vport->num_rxq)
+ __idpf_get_q_coalesce(ec, idpf_find_rxq(vport, q_num));
+
+ if (q_num < vport->num_txq)
+ __idpf_get_q_coalesce(ec, idpf_find_txq(vport, q_num));
+
+unlock_mutex:
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+/**
+ * idpf_get_coalesce - get ITR values as requested by user
+ * @netdev: pointer to the netdev associated with this query
+ * @ec: coalesce settings to be filled
+ * @kec: unused
+ * @extack: unused
+ *
+ * Return 0 on success, and negative on failure
+ */
+static int idpf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *extack)
+{
+ /* Return coalesce based on queue number zero */
+ return idpf_get_q_coalesce(netdev, ec, 0);
+}
+
+/**
+ * idpf_get_per_q_coalesce - get ITR values as requested by user
+ * @netdev: pointer to the netdev associated with this query
+ * @q_num: queue for which the itr values has to retrieved
+ * @ec: coalesce settings to be filled
+ *
+ * Return 0 on success, and negative on failure
+ */
+
+static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
+ struct ethtool_coalesce *ec)
+{
+ return idpf_get_q_coalesce(netdev, ec, q_num);
+}
+
+/**
+ * __idpf_set_q_coalesce - set ITR values for specific queue
+ * @ec: ethtool structure from user to update ITR settings
+ * @q: queue for which itr values has to be set
+ * @is_rxq: is queue type rx
+ *
+ * Returns 0 on success, negative otherwise.
+ */
+static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
+ struct idpf_queue *q, bool is_rxq)
+{
+ u32 use_adaptive_coalesce, coalesce_usecs;
+ struct idpf_q_vector *qv = q->q_vector;
+ bool is_dim_ena = false;
+ u16 itr_val;
+
+ if (is_rxq) {
+ is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode);
+ use_adaptive_coalesce = ec->use_adaptive_rx_coalesce;
+ coalesce_usecs = ec->rx_coalesce_usecs;
+ itr_val = qv->rx_itr_value;
+ } else {
+ is_dim_ena = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode);
+ use_adaptive_coalesce = ec->use_adaptive_tx_coalesce;
+ coalesce_usecs = ec->tx_coalesce_usecs;
+ itr_val = qv->tx_itr_value;
+ }
+ if (coalesce_usecs != itr_val && use_adaptive_coalesce) {
+ netdev_err(q->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n");
+
+ return -EINVAL;
+ }
+
+ if (is_dim_ena && use_adaptive_coalesce)
+ return 0;
+
+ if (coalesce_usecs > IDPF_ITR_MAX) {
+ netdev_err(q->vport->netdev,
+ "Invalid value, %d-usecs range is 0-%d\n",
+ coalesce_usecs, IDPF_ITR_MAX);
+
+ return -EINVAL;
+ }
+
+ if (coalesce_usecs % 2) {
+ coalesce_usecs--;
+ netdev_info(q->vport->netdev,
+ "HW only supports even ITR values, ITR rounded to %d\n",
+ coalesce_usecs);
+ }
+
+ if (is_rxq) {
+ qv->rx_itr_value = coalesce_usecs;
+ if (use_adaptive_coalesce) {
+ qv->rx_intr_mode = IDPF_ITR_DYNAMIC;
+ } else {
+ qv->rx_intr_mode = !IDPF_ITR_DYNAMIC;
+ idpf_vport_intr_write_itr(qv, qv->rx_itr_value,
+ false);
+ }
+ } else {
+ qv->tx_itr_value = coalesce_usecs;
+ if (use_adaptive_coalesce) {
+ qv->tx_intr_mode = IDPF_ITR_DYNAMIC;
+ } else {
+ qv->tx_intr_mode = !IDPF_ITR_DYNAMIC;
+ idpf_vport_intr_write_itr(qv, qv->tx_itr_value, true);
+ }
+ }
+
+ /* Update of static/dynamic itr will be taken care when interrupt is
+ * fired
+ */
+ return 0;
+}
+
+/**
+ * idpf_set_q_coalesce - set ITR values for specific queue
+ * @vport: vport associated to the queue that need updating
+ * @ec: coalesce settings to program the device with
+ * @q_num: update ITR/INTRL (coalesce) settings for this queue number/index
+ * @is_rxq: is queue type rx
+ *
+ * Return 0 on success, and negative on failure
+ */
+static int idpf_set_q_coalesce(struct idpf_vport *vport,
+ struct ethtool_coalesce *ec,
+ int q_num, bool is_rxq)
+{
+ struct idpf_queue *q;
+
+ q = is_rxq ? idpf_find_rxq(vport, q_num) : idpf_find_txq(vport, q_num);
+
+ if (q && __idpf_set_q_coalesce(ec, q, is_rxq))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * idpf_set_coalesce - set ITR values as requested by user
+ * @netdev: pointer to the netdev associated with this query
+ * @ec: coalesce settings to program the device with
+ * @kec: unused
+ * @extack: unused
+ *
+ * Return 0 on success, and negative on failure
+ */
+static int idpf_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kec,
+ struct netlink_ext_ack *extack)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport *vport;
+ int i, err = 0;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ if (np->state != __IDPF_VPORT_UP)
+ goto unlock_mutex;
+
+ for (i = 0; i < vport->num_txq; i++) {
+ err = idpf_set_q_coalesce(vport, ec, i, false);
+ if (err)
+ goto unlock_mutex;
+ }
+
+ for (i = 0; i < vport->num_rxq; i++) {
+ err = idpf_set_q_coalesce(vport, ec, i, true);
+ if (err)
+ goto unlock_mutex;
+ }
+
+unlock_mutex:
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+/**
+ * idpf_set_per_q_coalesce - set ITR values as requested by user
+ * @netdev: pointer to the netdev associated with this query
+ * @q_num: queue for which the itr values has to be set
+ * @ec: coalesce settings to program the device with
+ *
+ * Return 0 on success, and negative on failure
+ */
+static int idpf_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
+ struct ethtool_coalesce *ec)
+{
+ struct idpf_vport *vport;
+ int err;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ err = idpf_set_q_coalesce(vport, ec, q_num, false);
+ if (err) {
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+ }
+
+ err = idpf_set_q_coalesce(vport, ec, q_num, true);
+
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+/**
+ * idpf_get_msglevel - Get debug message level
+ * @netdev: network interface device structure
+ *
+ * Returns current debug message level.
+ */
+static u32 idpf_get_msglevel(struct net_device *netdev)
+{
+ struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
+
+ return adapter->msg_enable;
+}
+
+/**
+ * idpf_set_msglevel - Set debug message level
+ * @netdev: network interface device structure
+ * @data: message level
+ *
+ * Set current debug message level. Higher values cause the driver to
+ * be noisier.
+ */
+static void idpf_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
+
+ adapter->msg_enable = data;
+}
+
+/**
+ * idpf_get_link_ksettings - Get Link Speed and Duplex settings
+ * @netdev: network interface device structure
+ * @cmd: ethtool command
+ *
+ * Reports speed/duplex settings.
+ **/
+static int idpf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct idpf_vport *vport;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ ethtool_link_ksettings_zero_link_mode(cmd, supported);
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.port = PORT_NONE;
+ if (vport->link_up) {
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.speed = vport->link_speed_mbps;
+ } else {
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ }
+
+ idpf_vport_ctrl_unlock(netdev);
+
+ return 0;
+}
+
+static const struct ethtool_ops idpf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE,
+ .get_msglevel = idpf_get_msglevel,
+ .set_msglevel = idpf_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = idpf_get_coalesce,
+ .set_coalesce = idpf_set_coalesce,
+ .get_per_queue_coalesce = idpf_get_per_q_coalesce,
+ .set_per_queue_coalesce = idpf_set_per_q_coalesce,
+ .get_ethtool_stats = idpf_get_ethtool_stats,
+ .get_strings = idpf_get_strings,
+ .get_sset_count = idpf_get_sset_count,
+ .get_channels = idpf_get_channels,
+ .get_rxnfc = idpf_get_rxnfc,
+ .get_rxfh_key_size = idpf_get_rxfh_key_size,
+ .get_rxfh_indir_size = idpf_get_rxfh_indir_size,
+ .get_rxfh = idpf_get_rxfh,
+ .set_rxfh = idpf_set_rxfh,
+ .set_channels = idpf_set_channels,
+ .get_ringparam = idpf_get_ringparam,
+ .set_ringparam = idpf_set_ringparam,
+ .get_link_ksettings = idpf_get_link_ksettings,
+};
+
+/**
+ * idpf_set_ethtool_ops - Initialize ethtool ops struct
+ * @netdev: network interface device structure
+ *
+ * Sets ethtool ops struct in our netdev so that ethtool can call
+ * our functions.
+ */
+void idpf_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &idpf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h
new file mode 100644
index 000000000000..24edb8a6ec2e
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_lan_pf_regs.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef _IDPF_LAN_PF_REGS_H_
+#define _IDPF_LAN_PF_REGS_H_
+
+/* Receive queues */
+#define PF_QRX_BASE 0x00000000
+#define PF_QRX_TAIL(_QRX) (PF_QRX_BASE + (((_QRX) * 0x1000)))
+#define PF_QRX_BUFFQ_BASE 0x03000000
+#define PF_QRX_BUFFQ_TAIL(_QRX) (PF_QRX_BUFFQ_BASE + (((_QRX) * 0x1000)))
+
+/* Transmit queues */
+#define PF_QTX_BASE 0x05000000
+#define PF_QTX_COMM_DBELL(_DBQM) (PF_QTX_BASE + ((_DBQM) * 0x1000))
+
+/* Control(PF Mailbox) Queue */
+#define PF_FW_BASE 0x08400000
+
+#define PF_FW_ARQBAL (PF_FW_BASE)
+#define PF_FW_ARQBAH (PF_FW_BASE + 0x4)
+#define PF_FW_ARQLEN (PF_FW_BASE + 0x8)
+#define PF_FW_ARQLEN_ARQLEN_S 0
+#define PF_FW_ARQLEN_ARQLEN_M GENMASK(12, 0)
+#define PF_FW_ARQLEN_ARQVFE_S 28
+#define PF_FW_ARQLEN_ARQVFE_M BIT(PF_FW_ARQLEN_ARQVFE_S)
+#define PF_FW_ARQLEN_ARQOVFL_S 29
+#define PF_FW_ARQLEN_ARQOVFL_M BIT(PF_FW_ARQLEN_ARQOVFL_S)
+#define PF_FW_ARQLEN_ARQCRIT_S 30
+#define PF_FW_ARQLEN_ARQCRIT_M BIT(PF_FW_ARQLEN_ARQCRIT_S)
+#define PF_FW_ARQLEN_ARQENABLE_S 31
+#define PF_FW_ARQLEN_ARQENABLE_M BIT(PF_FW_ARQLEN_ARQENABLE_S)
+#define PF_FW_ARQH (PF_FW_BASE + 0xC)
+#define PF_FW_ARQH_ARQH_S 0
+#define PF_FW_ARQH_ARQH_M GENMASK(12, 0)
+#define PF_FW_ARQT (PF_FW_BASE + 0x10)
+
+#define PF_FW_ATQBAL (PF_FW_BASE + 0x14)
+#define PF_FW_ATQBAH (PF_FW_BASE + 0x18)
+#define PF_FW_ATQLEN (PF_FW_BASE + 0x1C)
+#define PF_FW_ATQLEN_ATQLEN_S 0
+#define PF_FW_ATQLEN_ATQLEN_M GENMASK(9, 0)
+#define PF_FW_ATQLEN_ATQVFE_S 28
+#define PF_FW_ATQLEN_ATQVFE_M BIT(PF_FW_ATQLEN_ATQVFE_S)
+#define PF_FW_ATQLEN_ATQOVFL_S 29
+#define PF_FW_ATQLEN_ATQOVFL_M BIT(PF_FW_ATQLEN_ATQOVFL_S)
+#define PF_FW_ATQLEN_ATQCRIT_S 30
+#define PF_FW_ATQLEN_ATQCRIT_M BIT(PF_FW_ATQLEN_ATQCRIT_S)
+#define PF_FW_ATQLEN_ATQENABLE_S 31
+#define PF_FW_ATQLEN_ATQENABLE_M BIT(PF_FW_ATQLEN_ATQENABLE_S)
+#define PF_FW_ATQH (PF_FW_BASE + 0x20)
+#define PF_FW_ATQH_ATQH_S 0
+#define PF_FW_ATQH_ATQH_M GENMASK(9, 0)
+#define PF_FW_ATQT (PF_FW_BASE + 0x24)
+
+/* Interrupts */
+#define PF_GLINT_BASE 0x08900000
+#define PF_GLINT_DYN_CTL(_INT) (PF_GLINT_BASE + ((_INT) * 0x1000))
+#define PF_GLINT_DYN_CTL_INTENA_S 0
+#define PF_GLINT_DYN_CTL_INTENA_M BIT(PF_GLINT_DYN_CTL_INTENA_S)
+#define PF_GLINT_DYN_CTL_CLEARPBA_S 1
+#define PF_GLINT_DYN_CTL_CLEARPBA_M BIT(PF_GLINT_DYN_CTL_CLEARPBA_S)
+#define PF_GLINT_DYN_CTL_SWINT_TRIG_S 2
+#define PF_GLINT_DYN_CTL_SWINT_TRIG_M BIT(PF_GLINT_DYN_CTL_SWINT_TRIG_S)
+#define PF_GLINT_DYN_CTL_ITR_INDX_S 3
+#define PF_GLINT_DYN_CTL_ITR_INDX_M GENMASK(4, 3)
+#define PF_GLINT_DYN_CTL_INTERVAL_S 5
+#define PF_GLINT_DYN_CTL_INTERVAL_M BIT(PF_GLINT_DYN_CTL_INTERVAL_S)
+#define PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_S 24
+#define PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(PF_GLINT_DYN_CTL_SW_ITR_INDX_ENA_S)
+#define PF_GLINT_DYN_CTL_SW_ITR_INDX_S 25
+#define PF_GLINT_DYN_CTL_SW_ITR_INDX_M BIT(PF_GLINT_DYN_CTL_SW_ITR_INDX_S)
+#define PF_GLINT_DYN_CTL_WB_ON_ITR_S 30
+#define PF_GLINT_DYN_CTL_WB_ON_ITR_M BIT(PF_GLINT_DYN_CTL_WB_ON_ITR_S)
+#define PF_GLINT_DYN_CTL_INTENA_MSK_S 31
+#define PF_GLINT_DYN_CTL_INTENA_MSK_M BIT(PF_GLINT_DYN_CTL_INTENA_MSK_S)
+/* _ITR is ITR index, _INT is interrupt index, _itrn_indx_spacing is
+ * spacing b/w itrn registers of the same vector.
+ */
+#define PF_GLINT_ITR_ADDR(_ITR, _reg_start, _itrn_indx_spacing) \
+ ((_reg_start) + ((_ITR) * (_itrn_indx_spacing)))
+/* For PF, itrn_indx_spacing is 4 and itrn_reg_spacing is 0x1000 */
+#define PF_GLINT_ITR(_ITR, _INT) \
+ (PF_GLINT_BASE + (((_ITR) + 1) * 4) + ((_INT) * 0x1000))
+#define PF_GLINT_ITR_MAX_INDEX 2
+#define PF_GLINT_ITR_INTERVAL_S 0
+#define PF_GLINT_ITR_INTERVAL_M GENMASK(11, 0)
+
+/* Generic registers */
+#define PF_INT_DIR_OICR_ENA 0x08406000
+#define PF_INT_DIR_OICR_ENA_S 0
+#define PF_INT_DIR_OICR_ENA_M GENMASK(31, 0)
+#define PF_INT_DIR_OICR 0x08406004
+#define PF_INT_DIR_OICR_TSYN_EVNT 0
+#define PF_INT_DIR_OICR_PHY_TS_0 BIT(1)
+#define PF_INT_DIR_OICR_PHY_TS_1 BIT(2)
+#define PF_INT_DIR_OICR_CAUSE 0x08406008
+#define PF_INT_DIR_OICR_CAUSE_CAUSE_S 0
+#define PF_INT_DIR_OICR_CAUSE_CAUSE_M GENMASK(31, 0)
+#define PF_INT_PBA_CLEAR 0x0840600C
+
+#define PF_FUNC_RID 0x08406010
+#define PF_FUNC_RID_FUNCTION_NUMBER_S 0
+#define PF_FUNC_RID_FUNCTION_NUMBER_M GENMASK(2, 0)
+#define PF_FUNC_RID_DEVICE_NUMBER_S 3
+#define PF_FUNC_RID_DEVICE_NUMBER_M GENMASK(7, 3)
+#define PF_FUNC_RID_BUS_NUMBER_S 8
+#define PF_FUNC_RID_BUS_NUMBER_M GENMASK(15, 8)
+
+/* Reset registers */
+#define PFGEN_RTRIG 0x08407000
+#define PFGEN_RTRIG_CORER_S 0
+#define PFGEN_RTRIG_CORER_M BIT(0)
+#define PFGEN_RTRIG_LINKR_S 1
+#define PFGEN_RTRIG_LINKR_M BIT(1)
+#define PFGEN_RTRIG_IMCR_S 2
+#define PFGEN_RTRIG_IMCR_M BIT(2)
+#define PFGEN_RSTAT 0x08407008 /* PFR Status */
+#define PFGEN_RSTAT_PFR_STATE_S 0
+#define PFGEN_RSTAT_PFR_STATE_M GENMASK(1, 0)
+#define PFGEN_CTRL 0x0840700C
+#define PFGEN_CTRL_PFSWR BIT(0)
+
+#endif
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
new file mode 100644
index 000000000000..a5752dcab888
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
@@ -0,0 +1,293 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef _IDPF_LAN_TXRX_H_
+#define _IDPF_LAN_TXRX_H_
+
+enum idpf_rss_hash {
+ IDPF_HASH_INVALID = 0,
+ /* Values 1 - 28 are reserved for future use */
+ IDPF_HASH_NONF_UNICAST_IPV4_UDP = 29,
+ IDPF_HASH_NONF_MULTICAST_IPV4_UDP,
+ IDPF_HASH_NONF_IPV4_UDP,
+ IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK,
+ IDPF_HASH_NONF_IPV4_TCP,
+ IDPF_HASH_NONF_IPV4_SCTP,
+ IDPF_HASH_NONF_IPV4_OTHER,
+ IDPF_HASH_FRAG_IPV4,
+ /* Values 37-38 are reserved */
+ IDPF_HASH_NONF_UNICAST_IPV6_UDP = 39,
+ IDPF_HASH_NONF_MULTICAST_IPV6_UDP,
+ IDPF_HASH_NONF_IPV6_UDP,
+ IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK,
+ IDPF_HASH_NONF_IPV6_TCP,
+ IDPF_HASH_NONF_IPV6_SCTP,
+ IDPF_HASH_NONF_IPV6_OTHER,
+ IDPF_HASH_FRAG_IPV6,
+ IDPF_HASH_NONF_RSVD47,
+ IDPF_HASH_NONF_FCOE_OX,
+ IDPF_HASH_NONF_FCOE_RX,
+ IDPF_HASH_NONF_FCOE_OTHER,
+ /* Values 51-62 are reserved */
+ IDPF_HASH_L2_PAYLOAD = 63,
+
+ IDPF_HASH_MAX
+};
+
+/* Supported RSS offloads */
+#define IDPF_DEFAULT_RSS_HASH \
+ (BIT_ULL(IDPF_HASH_NONF_IPV4_UDP) | \
+ BIT_ULL(IDPF_HASH_NONF_IPV4_SCTP) | \
+ BIT_ULL(IDPF_HASH_NONF_IPV4_TCP) | \
+ BIT_ULL(IDPF_HASH_NONF_IPV4_OTHER) | \
+ BIT_ULL(IDPF_HASH_FRAG_IPV4) | \
+ BIT_ULL(IDPF_HASH_NONF_IPV6_UDP) | \
+ BIT_ULL(IDPF_HASH_NONF_IPV6_TCP) | \
+ BIT_ULL(IDPF_HASH_NONF_IPV6_SCTP) | \
+ BIT_ULL(IDPF_HASH_NONF_IPV6_OTHER) | \
+ BIT_ULL(IDPF_HASH_FRAG_IPV6) | \
+ BIT_ULL(IDPF_HASH_L2_PAYLOAD))
+
+#define IDPF_DEFAULT_RSS_HASH_EXPANDED (IDPF_DEFAULT_RSS_HASH | \
+ BIT_ULL(IDPF_HASH_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(IDPF_HASH_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(IDPF_HASH_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(IDPF_HASH_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(IDPF_HASH_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(IDPF_HASH_NONF_MULTICAST_IPV6_UDP))
+
+/* For idpf_splitq_base_tx_compl_desc */
+#define IDPF_TXD_COMPLQ_GEN_S 15
+#define IDPF_TXD_COMPLQ_GEN_M BIT_ULL(IDPF_TXD_COMPLQ_GEN_S)
+#define IDPF_TXD_COMPLQ_COMPL_TYPE_S 11
+#define IDPF_TXD_COMPLQ_COMPL_TYPE_M GENMASK_ULL(13, 11)
+#define IDPF_TXD_COMPLQ_QID_S 0
+#define IDPF_TXD_COMPLQ_QID_M GENMASK_ULL(9, 0)
+
+/* For base mode TX descriptors */
+
+#define IDPF_TXD_CTX_QW0_TUNN_L4T_CS_S 23
+#define IDPF_TXD_CTX_QW0_TUNN_L4T_CS_M BIT_ULL(IDPF_TXD_CTX_QW0_TUNN_L4T_CS_S)
+#define IDPF_TXD_CTX_QW0_TUNN_DECTTL_S 19
+#define IDPF_TXD_CTX_QW0_TUNN_DECTTL_M \
+ (0xFULL << IDPF_TXD_CTX_QW0_TUNN_DECTTL_S)
+#define IDPF_TXD_CTX_QW0_TUNN_NATLEN_S 12
+#define IDPF_TXD_CTX_QW0_TUNN_NATLEN_M \
+ (0X7FULL << IDPF_TXD_CTX_QW0_TUNN_NATLEN_S)
+#define IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_S 11
+#define IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_M \
+ BIT_ULL(IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_S)
+#define IDPF_TXD_CTX_EIP_NOINC_IPID_CONST \
+ IDPF_TXD_CTX_QW0_TUNN_EIP_NOINC_M
+#define IDPF_TXD_CTX_QW0_TUNN_NATT_S 9
+#define IDPF_TXD_CTX_QW0_TUNN_NATT_M (0x3ULL << IDPF_TXD_CTX_QW0_TUNN_NATT_S)
+#define IDPF_TXD_CTX_UDP_TUNNELING BIT_ULL(IDPF_TXD_CTX_QW0_TUNN_NATT_S)
+#define IDPF_TXD_CTX_GRE_TUNNELING (0x2ULL << IDPF_TXD_CTX_QW0_TUNN_NATT_S)
+#define IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_S 2
+#define IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_M \
+ (0x3FULL << IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_S)
+#define IDPF_TXD_CTX_QW0_TUNN_EXT_IP_S 0
+#define IDPF_TXD_CTX_QW0_TUNN_EXT_IP_M \
+ (0x3ULL << IDPF_TXD_CTX_QW0_TUNN_EXT_IP_S)
+
+#define IDPF_TXD_CTX_QW1_MSS_S 50
+#define IDPF_TXD_CTX_QW1_MSS_M GENMASK_ULL(63, 50)
+#define IDPF_TXD_CTX_QW1_TSO_LEN_S 30
+#define IDPF_TXD_CTX_QW1_TSO_LEN_M GENMASK_ULL(47, 30)
+#define IDPF_TXD_CTX_QW1_CMD_S 4
+#define IDPF_TXD_CTX_QW1_CMD_M GENMASK_ULL(15, 4)
+#define IDPF_TXD_CTX_QW1_DTYPE_S 0
+#define IDPF_TXD_CTX_QW1_DTYPE_M GENMASK_ULL(3, 0)
+#define IDPF_TXD_QW1_L2TAG1_S 48
+#define IDPF_TXD_QW1_L2TAG1_M GENMASK_ULL(63, 48)
+#define IDPF_TXD_QW1_TX_BUF_SZ_S 34
+#define IDPF_TXD_QW1_TX_BUF_SZ_M GENMASK_ULL(47, 34)
+#define IDPF_TXD_QW1_OFFSET_S 16
+#define IDPF_TXD_QW1_OFFSET_M GENMASK_ULL(33, 16)
+#define IDPF_TXD_QW1_CMD_S 4
+#define IDPF_TXD_QW1_CMD_M GENMASK_ULL(15, 4)
+#define IDPF_TXD_QW1_DTYPE_S 0
+#define IDPF_TXD_QW1_DTYPE_M GENMASK_ULL(3, 0)
+
+/* TX Completion Descriptor Completion Types */
+#define IDPF_TXD_COMPLT_ITR_FLUSH 0
+/* Descriptor completion type 1 is reserved */
+#define IDPF_TXD_COMPLT_RS 2
+/* Descriptor completion type 3 is reserved */
+#define IDPF_TXD_COMPLT_RE 4
+#define IDPF_TXD_COMPLT_SW_MARKER 5
+
+enum idpf_tx_desc_dtype_value {
+ IDPF_TX_DESC_DTYPE_DATA = 0,
+ IDPF_TX_DESC_DTYPE_CTX = 1,
+ /* DTYPE 2 is reserved
+ * DTYPE 3 is free for future use
+ * DTYPE 4 is reserved
+ */
+ IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX = 5,
+ /* DTYPE 6 is reserved */
+ IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2 = 7,
+ /* DTYPE 8, 9 are free for future use
+ * DTYPE 10 is reserved
+ * DTYPE 11 is free for future use
+ */
+ IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE = 12,
+ /* DTYPE 13, 14 are free for future use */
+
+ /* DESC_DONE - HW has completed write-back of descriptor */
+ IDPF_TX_DESC_DTYPE_DESC_DONE = 15,
+};
+
+enum idpf_tx_ctx_desc_cmd_bits {
+ IDPF_TX_CTX_DESC_TSO = 0x01,
+ IDPF_TX_CTX_DESC_TSYN = 0x02,
+ IDPF_TX_CTX_DESC_IL2TAG2 = 0x04,
+ IDPF_TX_CTX_DESC_RSVD = 0x08,
+ IDPF_TX_CTX_DESC_SWTCH_NOTAG = 0x00,
+ IDPF_TX_CTX_DESC_SWTCH_UPLINK = 0x10,
+ IDPF_TX_CTX_DESC_SWTCH_LOCAL = 0x20,
+ IDPF_TX_CTX_DESC_SWTCH_VSI = 0x30,
+ IDPF_TX_CTX_DESC_FILT_AU_EN = 0x40,
+ IDPF_TX_CTX_DESC_FILT_AU_EVICT = 0x80,
+ IDPF_TX_CTX_DESC_RSVD1 = 0xF00
+};
+
+enum idpf_tx_desc_len_fields {
+ /* Note: These are predefined bit offsets */
+ IDPF_TX_DESC_LEN_MACLEN_S = 0, /* 7 BITS */
+ IDPF_TX_DESC_LEN_IPLEN_S = 7, /* 7 BITS */
+ IDPF_TX_DESC_LEN_L4_LEN_S = 14 /* 4 BITS */
+};
+
+enum idpf_tx_base_desc_cmd_bits {
+ IDPF_TX_DESC_CMD_EOP = BIT(0),
+ IDPF_TX_DESC_CMD_RS = BIT(1),
+ /* only on VFs else RSVD */
+ IDPF_TX_DESC_CMD_ICRC = BIT(2),
+ IDPF_TX_DESC_CMD_IL2TAG1 = BIT(3),
+ IDPF_TX_DESC_CMD_RSVD1 = BIT(4),
+ IDPF_TX_DESC_CMD_IIPT_IPV6 = BIT(5),
+ IDPF_TX_DESC_CMD_IIPT_IPV4 = BIT(6),
+ IDPF_TX_DESC_CMD_IIPT_IPV4_CSUM = GENMASK(6, 5),
+ IDPF_TX_DESC_CMD_RSVD2 = BIT(7),
+ IDPF_TX_DESC_CMD_L4T_EOFT_TCP = BIT(8),
+ IDPF_TX_DESC_CMD_L4T_EOFT_SCTP = BIT(9),
+ IDPF_TX_DESC_CMD_L4T_EOFT_UDP = GENMASK(9, 8),
+ IDPF_TX_DESC_CMD_RSVD3 = BIT(10),
+ IDPF_TX_DESC_CMD_RSVD4 = BIT(11),
+};
+
+/* Transmit descriptors */
+/* splitq tx buf, singleq tx buf and singleq compl desc */
+struct idpf_base_tx_desc {
+ __le64 buf_addr; /* Address of descriptor's data buf */
+ __le64 qw1; /* type_cmd_offset_bsz_l2tag1 */
+}; /* read used with buffer queues */
+
+struct idpf_splitq_tx_compl_desc {
+ /* qid=[10:0] comptype=[13:11] rsvd=[14] gen=[15] */
+ __le16 qid_comptype_gen;
+ union {
+ __le16 q_head; /* Queue head */
+ __le16 compl_tag; /* Completion tag */
+ } q_head_compl_tag;
+ u8 ts[3];
+ u8 rsvd; /* Reserved */
+}; /* writeback used with completion queues */
+
+/* Context descriptors */
+struct idpf_base_tx_ctx_desc {
+ struct {
+ __le32 tunneling_params;
+ __le16 l2tag2;
+ __le16 rsvd1;
+ } qw0;
+ __le64 qw1; /* type_cmd_tlen_mss/rt_hint */
+};
+
+/* Common cmd field defines for all desc except Flex Flow Scheduler (0x0C) */
+enum idpf_tx_flex_desc_cmd_bits {
+ IDPF_TX_FLEX_DESC_CMD_EOP = BIT(0),
+ IDPF_TX_FLEX_DESC_CMD_RS = BIT(1),
+ IDPF_TX_FLEX_DESC_CMD_RE = BIT(2),
+ IDPF_TX_FLEX_DESC_CMD_IL2TAG1 = BIT(3),
+ IDPF_TX_FLEX_DESC_CMD_DUMMY = BIT(4),
+ IDPF_TX_FLEX_DESC_CMD_CS_EN = BIT(5),
+ IDPF_TX_FLEX_DESC_CMD_FILT_AU_EN = BIT(6),
+ IDPF_TX_FLEX_DESC_CMD_FILT_AU_EVICT = BIT(7),
+};
+
+struct idpf_flex_tx_desc {
+ __le64 buf_addr; /* Packet buffer address */
+ struct {
+#define IDPF_FLEX_TXD_QW1_DTYPE_S 0
+#define IDPF_FLEX_TXD_QW1_DTYPE_M GENMASK(4, 0)
+#define IDPF_FLEX_TXD_QW1_CMD_S 5
+#define IDPF_FLEX_TXD_QW1_CMD_M GENMASK(15, 5)
+ __le16 cmd_dtype;
+ /* DTYPE=IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2 (0x07) */
+ struct {
+ __le16 l2tag1;
+ __le16 l2tag2;
+ } l2tags;
+ __le16 buf_size;
+ } qw1;
+};
+
+struct idpf_flex_tx_sched_desc {
+ __le64 buf_addr; /* Packet buffer address */
+
+ /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE_16B (0x0C) */
+ struct {
+ u8 cmd_dtype;
+#define IDPF_TXD_FLEX_FLOW_DTYPE_M GENMASK(4, 0)
+#define IDPF_TXD_FLEX_FLOW_CMD_EOP BIT(5)
+#define IDPF_TXD_FLEX_FLOW_CMD_CS_EN BIT(6)
+#define IDPF_TXD_FLEX_FLOW_CMD_RE BIT(7)
+
+ /* [23:23] Horizon Overflow bit, [22:0] timestamp */
+ u8 ts[3];
+#define IDPF_TXD_FLOW_SCH_HORIZON_OVERFLOW_M BIT(7)
+
+ __le16 compl_tag;
+ __le16 rxr_bufsize;
+#define IDPF_TXD_FLEX_FLOW_RXR BIT(14)
+#define IDPF_TXD_FLEX_FLOW_BUFSIZE_M GENMASK(13, 0)
+ } qw1;
+};
+
+/* Common cmd fields for all flex context descriptors
+ * Note: these defines already account for the 5 bit dtype in the cmd_dtype
+ * field
+ */
+enum idpf_tx_flex_ctx_desc_cmd_bits {
+ IDPF_TX_FLEX_CTX_DESC_CMD_TSO = BIT(5),
+ IDPF_TX_FLEX_CTX_DESC_CMD_TSYN_EN = BIT(6),
+ IDPF_TX_FLEX_CTX_DESC_CMD_L2TAG2 = BIT(7),
+ IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_UPLNK = BIT(9),
+ IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_LOCAL = BIT(10),
+ IDPF_TX_FLEX_CTX_DESC_CMD_SWTCH_TARGETVSI = GENMASK(10, 9),
+};
+
+/* Standard flex descriptor TSO context quad word */
+struct idpf_flex_tx_tso_ctx_qw {
+ __le32 flex_tlen;
+#define IDPF_TXD_FLEX_CTX_TLEN_M GENMASK(17, 0)
+#define IDPF_TXD_FLEX_TSO_CTX_FLEX_S 24
+ __le16 mss_rt;
+#define IDPF_TXD_FLEX_CTX_MSS_RT_M GENMASK(13, 0)
+ u8 hdr_len;
+ u8 flex;
+};
+
+struct idpf_flex_tx_ctx_desc {
+ /* DTYPE = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX (0x05) */
+ struct {
+ struct idpf_flex_tx_tso_ctx_qw qw0;
+ struct {
+ __le16 cmd_dtype;
+ u8 flex[6];
+ } qw1;
+ } tso;
+};
+#endif /* _IDPF_LAN_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_vf_regs.h b/drivers/net/ethernet/intel/idpf/idpf_lan_vf_regs.h
new file mode 100644
index 000000000000..3d73b6c76863
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_lan_vf_regs.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef _IDPF_LAN_VF_REGS_H_
+#define _IDPF_LAN_VF_REGS_H_
+
+/* Reset */
+#define VFGEN_RSTAT 0x00008800
+#define VFGEN_RSTAT_VFR_STATE_S 0
+#define VFGEN_RSTAT_VFR_STATE_M GENMASK(1, 0)
+
+/* Control(VF Mailbox) Queue */
+#define VF_BASE 0x00006000
+
+#define VF_ATQBAL (VF_BASE + 0x1C00)
+#define VF_ATQBAH (VF_BASE + 0x1800)
+#define VF_ATQLEN (VF_BASE + 0x0800)
+#define VF_ATQLEN_ATQLEN_S 0
+#define VF_ATQLEN_ATQLEN_M GENMASK(9, 0)
+#define VF_ATQLEN_ATQVFE_S 28
+#define VF_ATQLEN_ATQVFE_M BIT(VF_ATQLEN_ATQVFE_S)
+#define VF_ATQLEN_ATQOVFL_S 29
+#define VF_ATQLEN_ATQOVFL_M BIT(VF_ATQLEN_ATQOVFL_S)
+#define VF_ATQLEN_ATQCRIT_S 30
+#define VF_ATQLEN_ATQCRIT_M BIT(VF_ATQLEN_ATQCRIT_S)
+#define VF_ATQLEN_ATQENABLE_S 31
+#define VF_ATQLEN_ATQENABLE_M BIT(VF_ATQLEN_ATQENABLE_S)
+#define VF_ATQH (VF_BASE + 0x0400)
+#define VF_ATQH_ATQH_S 0
+#define VF_ATQH_ATQH_M GENMASK(9, 0)
+#define VF_ATQT (VF_BASE + 0x2400)
+
+#define VF_ARQBAL (VF_BASE + 0x0C00)
+#define VF_ARQBAH (VF_BASE)
+#define VF_ARQLEN (VF_BASE + 0x2000)
+#define VF_ARQLEN_ARQLEN_S 0
+#define VF_ARQLEN_ARQLEN_M GENMASK(9, 0)
+#define VF_ARQLEN_ARQVFE_S 28
+#define VF_ARQLEN_ARQVFE_M BIT(VF_ARQLEN_ARQVFE_S)
+#define VF_ARQLEN_ARQOVFL_S 29
+#define VF_ARQLEN_ARQOVFL_M BIT(VF_ARQLEN_ARQOVFL_S)
+#define VF_ARQLEN_ARQCRIT_S 30
+#define VF_ARQLEN_ARQCRIT_M BIT(VF_ARQLEN_ARQCRIT_S)
+#define VF_ARQLEN_ARQENABLE_S 31
+#define VF_ARQLEN_ARQENABLE_M BIT(VF_ARQLEN_ARQENABLE_S)
+#define VF_ARQH (VF_BASE + 0x1400)
+#define VF_ARQH_ARQH_S 0
+#define VF_ARQH_ARQH_M GENMASK(12, 0)
+#define VF_ARQT (VF_BASE + 0x1000)
+
+/* Transmit queues */
+#define VF_QTX_TAIL_BASE 0x00000000
+#define VF_QTX_TAIL(_QTX) (VF_QTX_TAIL_BASE + (_QTX) * 0x4)
+#define VF_QTX_TAIL_EXT_BASE 0x00040000
+#define VF_QTX_TAIL_EXT(_QTX) (VF_QTX_TAIL_EXT_BASE + ((_QTX) * 4))
+
+/* Receive queues */
+#define VF_QRX_TAIL_BASE 0x00002000
+#define VF_QRX_TAIL(_QRX) (VF_QRX_TAIL_BASE + ((_QRX) * 4))
+#define VF_QRX_TAIL_EXT_BASE 0x00050000
+#define VF_QRX_TAIL_EXT(_QRX) (VF_QRX_TAIL_EXT_BASE + ((_QRX) * 4))
+#define VF_QRXB_TAIL_BASE 0x00060000
+#define VF_QRXB_TAIL(_QRX) (VF_QRXB_TAIL_BASE + ((_QRX) * 4))
+
+/* Interrupts */
+#define VF_INT_DYN_CTL0 0x00005C00
+#define VF_INT_DYN_CTL0_INTENA_S 0
+#define VF_INT_DYN_CTL0_INTENA_M BIT(VF_INT_DYN_CTL0_INTENA_S)
+#define VF_INT_DYN_CTL0_ITR_INDX_S 3
+#define VF_INT_DYN_CTL0_ITR_INDX_M GENMASK(4, 3)
+#define VF_INT_DYN_CTLN(_INT) (0x00003800 + ((_INT) * 4))
+#define VF_INT_DYN_CTLN_EXT(_INT) (0x00070000 + ((_INT) * 4))
+#define VF_INT_DYN_CTLN_INTENA_S 0
+#define VF_INT_DYN_CTLN_INTENA_M BIT(VF_INT_DYN_CTLN_INTENA_S)
+#define VF_INT_DYN_CTLN_CLEARPBA_S 1
+#define VF_INT_DYN_CTLN_CLEARPBA_M BIT(VF_INT_DYN_CTLN_CLEARPBA_S)
+#define VF_INT_DYN_CTLN_SWINT_TRIG_S 2
+#define VF_INT_DYN_CTLN_SWINT_TRIG_M BIT(VF_INT_DYN_CTLN_SWINT_TRIG_S)
+#define VF_INT_DYN_CTLN_ITR_INDX_S 3
+#define VF_INT_DYN_CTLN_ITR_INDX_M GENMASK(4, 3)
+#define VF_INT_DYN_CTLN_INTERVAL_S 5
+#define VF_INT_DYN_CTLN_INTERVAL_M BIT(VF_INT_DYN_CTLN_INTERVAL_S)
+#define VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_S 24
+#define VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_M BIT(VF_INT_DYN_CTLN_SW_ITR_INDX_ENA_S)
+#define VF_INT_DYN_CTLN_SW_ITR_INDX_S 25
+#define VF_INT_DYN_CTLN_SW_ITR_INDX_M BIT(VF_INT_DYN_CTLN_SW_ITR_INDX_S)
+#define VF_INT_DYN_CTLN_WB_ON_ITR_S 30
+#define VF_INT_DYN_CTLN_WB_ON_ITR_M BIT(VF_INT_DYN_CTLN_WB_ON_ITR_S)
+#define VF_INT_DYN_CTLN_INTENA_MSK_S 31
+#define VF_INT_DYN_CTLN_INTENA_MSK_M BIT(VF_INT_DYN_CTLN_INTENA_MSK_S)
+/* _ITR is ITR index, _INT is interrupt index, _itrn_indx_spacing is spacing
+ * b/w itrn registers of the same vector
+ */
+#define VF_INT_ITR0(_ITR) (0x00004C00 + ((_ITR) * 4))
+#define VF_INT_ITRN_ADDR(_ITR, _reg_start, _itrn_indx_spacing) \
+ ((_reg_start) + ((_ITR) * (_itrn_indx_spacing)))
+/* For VF with 16 vector support, itrn_reg_spacing is 0x4, itrn_indx_spacing
+ * is 0x40 and base register offset is 0x00002800
+ */
+#define VF_INT_ITRN(_INT, _ITR) \
+ (0x00002800 + ((_INT) * 4) + ((_ITR) * 0x40))
+/* For VF with 64 vector support, itrn_reg_spacing is 0x4, itrn_indx_spacing
+ * is 0x100 and base register offset is 0x00002C00
+ */
+#define VF_INT_ITRN_64(_INT, _ITR) \
+ (0x00002C00 + ((_INT) * 4) + ((_ITR) * 0x100))
+/* For VF with 2k vector support, itrn_reg_spacing is 0x4, itrn_indx_spacing
+ * is 0x2000 and base register offset is 0x00072000
+ */
+#define VF_INT_ITRN_2K(_INT, _ITR) \
+ (0x00072000 + ((_INT) * 4) + ((_ITR) * 0x2000))
+#define VF_INT_ITRN_MAX_INDEX 2
+#define VF_INT_ITRN_INTERVAL_S 0
+#define VF_INT_ITRN_INTERVAL_M GENMASK(11, 0)
+#define VF_INT_PBA_CLEAR 0x00008900
+
+#define VF_INT_ICR0_ENA1 0x00005000
+#define VF_INT_ICR0_ENA1_ADMINQ_S 30
+#define VF_INT_ICR0_ENA1_ADMINQ_M BIT(VF_INT_ICR0_ENA1_ADMINQ_S)
+#define VF_INT_ICR0_ENA1_RSVD_S 31
+#define VF_INT_ICR01 0x00004800
+#define VF_QF_HENA(_i) (0x0000C400 + ((_i) * 4))
+#define VF_QF_HENA_MAX_INDX 1
+#define VF_QF_HKEY(_i) (0x0000CC00 + ((_i) * 4))
+#define VF_QF_HKEY_MAX_INDX 12
+#define VF_QF_HLUT(_i) (0x0000D000 + ((_i) * 4))
+#define VF_QF_HLUT_MAX_INDX 15
+#endif
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
new file mode 100644
index 000000000000..19809b0ddcd9
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -0,0 +1,2379 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2023 Intel Corporation */
+
+#include "idpf.h"
+
+static const struct net_device_ops idpf_netdev_ops_splitq;
+static const struct net_device_ops idpf_netdev_ops_singleq;
+
+const char * const idpf_vport_vc_state_str[] = {
+ IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_STRING)
+};
+
+/**
+ * idpf_init_vector_stack - Fill the MSIX vector stack with vector index
+ * @adapter: private data struct
+ *
+ * Return 0 on success, error on failure
+ */
+static int idpf_init_vector_stack(struct idpf_adapter *adapter)
+{
+ struct idpf_vector_lifo *stack;
+ u16 min_vec;
+ u32 i;
+
+ mutex_lock(&adapter->vector_lock);
+ min_vec = adapter->num_msix_entries - adapter->num_avail_msix;
+ stack = &adapter->vector_stack;
+ stack->size = adapter->num_msix_entries;
+ /* set the base and top to point at start of the 'free pool' to
+ * distribute the unused vectors on-demand basis
+ */
+ stack->base = min_vec;
+ stack->top = min_vec;
+
+ stack->vec_idx = kcalloc(stack->size, sizeof(u16), GFP_KERNEL);
+ if (!stack->vec_idx) {
+ mutex_unlock(&adapter->vector_lock);
+
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < stack->size; i++)
+ stack->vec_idx[i] = i;
+
+ mutex_unlock(&adapter->vector_lock);
+
+ return 0;
+}
+
+/**
+ * idpf_deinit_vector_stack - zero out the MSIX vector stack
+ * @adapter: private data struct
+ */
+static void idpf_deinit_vector_stack(struct idpf_adapter *adapter)
+{
+ struct idpf_vector_lifo *stack;
+
+ mutex_lock(&adapter->vector_lock);
+ stack = &adapter->vector_stack;
+ kfree(stack->vec_idx);
+ stack->vec_idx = NULL;
+ mutex_unlock(&adapter->vector_lock);
+}
+
+/**
+ * idpf_mb_intr_rel_irq - Free the IRQ association with the OS
+ * @adapter: adapter structure
+ *
+ * This will also disable interrupt mode and queue up mailbox task. Mailbox
+ * task will reschedule itself if not in interrupt mode.
+ */
+static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
+{
+ clear_bit(IDPF_MB_INTR_MODE, adapter->flags);
+ free_irq(adapter->msix_entries[0].vector, adapter);
+ queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
+}
+
+/**
+ * idpf_intr_rel - Release interrupt capabilities and free memory
+ * @adapter: adapter to disable interrupts on
+ */
+void idpf_intr_rel(struct idpf_adapter *adapter)
+{
+ int err;
+
+ if (!adapter->msix_entries)
+ return;
+
+ idpf_mb_intr_rel_irq(adapter);
+ pci_free_irq_vectors(adapter->pdev);
+
+ err = idpf_send_dealloc_vectors_msg(adapter);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to deallocate vectors: %d\n", err);
+
+ idpf_deinit_vector_stack(adapter);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+}
+
+/**
+ * idpf_mb_intr_clean - Interrupt handler for the mailbox
+ * @irq: interrupt number
+ * @data: pointer to the adapter structure
+ */
+static irqreturn_t idpf_mb_intr_clean(int __always_unused irq, void *data)
+{
+ struct idpf_adapter *adapter = (struct idpf_adapter *)data;
+
+ queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * idpf_mb_irq_enable - Enable MSIX interrupt for the mailbox
+ * @adapter: adapter to get the hardware address for register write
+ */
+static void idpf_mb_irq_enable(struct idpf_adapter *adapter)
+{
+ struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg;
+ u32 val;
+
+ val = intr->dyn_ctl_intena_m | intr->dyn_ctl_itridx_m;
+ writel(val, intr->dyn_ctl);
+ writel(intr->icr_ena_ctlq_m, intr->icr_ena);
+}
+
+/**
+ * idpf_mb_intr_req_irq - Request irq for the mailbox interrupt
+ * @adapter: adapter structure to pass to the mailbox irq handler
+ */
+static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter)
+{
+ struct idpf_q_vector *mb_vector = &adapter->mb_vector;
+ int irq_num, mb_vidx = 0, err;
+
+ irq_num = adapter->msix_entries[mb_vidx].vector;
+ mb_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d",
+ dev_driver_string(&adapter->pdev->dev),
+ "Mailbox", mb_vidx);
+ err = request_irq(irq_num, adapter->irq_mb_handler, 0,
+ mb_vector->name, adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "IRQ request for mailbox failed, error: %d\n", err);
+
+ return err;
+ }
+
+ set_bit(IDPF_MB_INTR_MODE, adapter->flags);
+
+ return 0;
+}
+
+/**
+ * idpf_set_mb_vec_id - Set vector index for mailbox
+ * @adapter: adapter structure to access the vector chunks
+ *
+ * The first vector id in the requested vector chunks from the CP is for
+ * the mailbox
+ */
+static void idpf_set_mb_vec_id(struct idpf_adapter *adapter)
+{
+ if (adapter->req_vec_chunks)
+ adapter->mb_vector.v_idx =
+ le16_to_cpu(adapter->caps.mailbox_vector_id);
+ else
+ adapter->mb_vector.v_idx = 0;
+}
+
+/**
+ * idpf_mb_intr_init - Initialize the mailbox interrupt
+ * @adapter: adapter structure to store the mailbox vector
+ */
+static int idpf_mb_intr_init(struct idpf_adapter *adapter)
+{
+ adapter->dev_ops.reg_ops.mb_intr_reg_init(adapter);
+ adapter->irq_mb_handler = idpf_mb_intr_clean;
+
+ return idpf_mb_intr_req_irq(adapter);
+}
+
+/**
+ * idpf_vector_lifo_push - push MSIX vector index onto stack
+ * @adapter: private data struct
+ * @vec_idx: vector index to store
+ */
+static int idpf_vector_lifo_push(struct idpf_adapter *adapter, u16 vec_idx)
+{
+ struct idpf_vector_lifo *stack = &adapter->vector_stack;
+
+ lockdep_assert_held(&adapter->vector_lock);
+
+ if (stack->top == stack->base) {
+ dev_err(&adapter->pdev->dev, "Exceeded the vector stack limit: %d\n",
+ stack->top);
+ return -EINVAL;
+ }
+
+ stack->vec_idx[--stack->top] = vec_idx;
+
+ return 0;
+}
+
+/**
+ * idpf_vector_lifo_pop - pop MSIX vector index from stack
+ * @adapter: private data struct
+ */
+static int idpf_vector_lifo_pop(struct idpf_adapter *adapter)
+{
+ struct idpf_vector_lifo *stack = &adapter->vector_stack;
+
+ lockdep_assert_held(&adapter->vector_lock);
+
+ if (stack->top == stack->size) {
+ dev_err(&adapter->pdev->dev, "No interrupt vectors are available to distribute!\n");
+
+ return -EINVAL;
+ }
+
+ return stack->vec_idx[stack->top++];
+}
+
+/**
+ * idpf_vector_stash - Store the vector indexes onto the stack
+ * @adapter: private data struct
+ * @q_vector_idxs: vector index array
+ * @vec_info: info related to the number of vectors
+ *
+ * This function is a no-op if there are no vectors indexes to be stashed
+ */
+static void idpf_vector_stash(struct idpf_adapter *adapter, u16 *q_vector_idxs,
+ struct idpf_vector_info *vec_info)
+{
+ int i, base = 0;
+ u16 vec_idx;
+
+ lockdep_assert_held(&adapter->vector_lock);
+
+ if (!vec_info->num_curr_vecs)
+ return;
+
+ /* For default vports, no need to stash vector allocated from the
+ * default pool onto the stack
+ */
+ if (vec_info->default_vport)
+ base = IDPF_MIN_Q_VEC;
+
+ for (i = vec_info->num_curr_vecs - 1; i >= base ; i--) {
+ vec_idx = q_vector_idxs[i];
+ idpf_vector_lifo_push(adapter, vec_idx);
+ adapter->num_avail_msix++;
+ }
+}
+
+/**
+ * idpf_req_rel_vector_indexes - Request or release MSIX vector indexes
+ * @adapter: driver specific private structure
+ * @q_vector_idxs: vector index array
+ * @vec_info: info related to the number of vectors
+ *
+ * This is the core function to distribute the MSIX vectors acquired from the
+ * OS. It expects the caller to pass the number of vectors required and
+ * also previously allocated. First, it stashes previously allocated vector
+ * indexes on to the stack and then figures out if it can allocate requested
+ * vectors. It can wait on acquiring the mutex lock. If the caller passes 0 as
+ * requested vectors, then this function just stashes the already allocated
+ * vectors and returns 0.
+ *
+ * Returns actual number of vectors allocated on success, error value on failure
+ * If 0 is returned, implies the stack has no vectors to allocate which is also
+ * a failure case for the caller
+ */
+int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter,
+ u16 *q_vector_idxs,
+ struct idpf_vector_info *vec_info)
+{
+ u16 num_req_vecs, num_alloc_vecs = 0, max_vecs;
+ struct idpf_vector_lifo *stack;
+ int i, j, vecid;
+
+ mutex_lock(&adapter->vector_lock);
+ stack = &adapter->vector_stack;
+ num_req_vecs = vec_info->num_req_vecs;
+
+ /* Stash interrupt vector indexes onto the stack if required */
+ idpf_vector_stash(adapter, q_vector_idxs, vec_info);
+
+ if (!num_req_vecs)
+ goto rel_lock;
+
+ if (vec_info->default_vport) {
+ /* As IDPF_MIN_Q_VEC per default vport is put aside in the
+ * default pool of the stack, use them for default vports
+ */
+ j = vec_info->index * IDPF_MIN_Q_VEC + IDPF_MBX_Q_VEC;
+ for (i = 0; i < IDPF_MIN_Q_VEC; i++) {
+ q_vector_idxs[num_alloc_vecs++] = stack->vec_idx[j++];
+ num_req_vecs--;
+ }
+ }
+
+ /* Find if stack has enough vector to allocate */
+ max_vecs = min(adapter->num_avail_msix, num_req_vecs);
+
+ for (j = 0; j < max_vecs; j++) {
+ vecid = idpf_vector_lifo_pop(adapter);
+ q_vector_idxs[num_alloc_vecs++] = vecid;
+ }
+ adapter->num_avail_msix -= max_vecs;
+
+rel_lock:
+ mutex_unlock(&adapter->vector_lock);
+
+ return num_alloc_vecs;
+}
+
+/**
+ * idpf_intr_req - Request interrupt capabilities
+ * @adapter: adapter to enable interrupts on
+ *
+ * Returns 0 on success, negative on failure
+ */
+int idpf_intr_req(struct idpf_adapter *adapter)
+{
+ u16 default_vports = idpf_get_default_vports(adapter);
+ int num_q_vecs, total_vecs, num_vec_ids;
+ int min_vectors, v_actual, err;
+ unsigned int vector;
+ u16 *vecids;
+
+ total_vecs = idpf_get_reserved_vecs(adapter);
+ num_q_vecs = total_vecs - IDPF_MBX_Q_VEC;
+
+ err = idpf_send_alloc_vectors_msg(adapter, num_q_vecs);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to allocate %d vectors: %d\n", num_q_vecs, err);
+
+ return -EAGAIN;
+ }
+
+ min_vectors = IDPF_MBX_Q_VEC + IDPF_MIN_Q_VEC * default_vports;
+ v_actual = pci_alloc_irq_vectors(adapter->pdev, min_vectors,
+ total_vecs, PCI_IRQ_MSIX);
+ if (v_actual < min_vectors) {
+ dev_err(&adapter->pdev->dev, "Failed to allocate MSIX vectors: %d\n",
+ v_actual);
+ err = -EAGAIN;
+ goto send_dealloc_vecs;
+ }
+
+ adapter->msix_entries = kcalloc(v_actual, sizeof(struct msix_entry),
+ GFP_KERNEL);
+
+ if (!adapter->msix_entries) {
+ err = -ENOMEM;
+ goto free_irq;
+ }
+
+ idpf_set_mb_vec_id(adapter);
+
+ vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
+ if (!vecids) {
+ err = -ENOMEM;
+ goto free_msix;
+ }
+
+ if (adapter->req_vec_chunks) {
+ struct virtchnl2_vector_chunks *vchunks;
+ struct virtchnl2_alloc_vectors *ac;
+
+ ac = adapter->req_vec_chunks;
+ vchunks = &ac->vchunks;
+
+ num_vec_ids = idpf_get_vec_ids(adapter, vecids, total_vecs,
+ vchunks);
+ if (num_vec_ids < v_actual) {
+ err = -EINVAL;
+ goto free_vecids;
+ }
+ } else {
+ int i;
+
+ for (i = 0; i < v_actual; i++)
+ vecids[i] = i;
+ }
+
+ for (vector = 0; vector < v_actual; vector++) {
+ adapter->msix_entries[vector].entry = vecids[vector];
+ adapter->msix_entries[vector].vector =
+ pci_irq_vector(adapter->pdev, vector);
+ }
+
+ adapter->num_req_msix = total_vecs;
+ adapter->num_msix_entries = v_actual;
+ /* 'num_avail_msix' is used to distribute excess vectors to the vports
+ * after considering the minimum vectors required per each default
+ * vport
+ */
+ adapter->num_avail_msix = v_actual - min_vectors;
+
+ /* Fill MSIX vector lifo stack with vector indexes */
+ err = idpf_init_vector_stack(adapter);
+ if (err)
+ goto free_vecids;
+
+ err = idpf_mb_intr_init(adapter);
+ if (err)
+ goto deinit_vec_stack;
+ idpf_mb_irq_enable(adapter);
+ kfree(vecids);
+
+ return 0;
+
+deinit_vec_stack:
+ idpf_deinit_vector_stack(adapter);
+free_vecids:
+ kfree(vecids);
+free_msix:
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+free_irq:
+ pci_free_irq_vectors(adapter->pdev);
+send_dealloc_vecs:
+ idpf_send_dealloc_vectors_msg(adapter);
+
+ return err;
+}
+
+/**
+ * idpf_find_mac_filter - Search filter list for specific mac filter
+ * @vconfig: Vport config structure
+ * @macaddr: The MAC address
+ *
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * mac_filter_list_lock.
+ **/
+static struct idpf_mac_filter *idpf_find_mac_filter(struct idpf_vport_config *vconfig,
+ const u8 *macaddr)
+{
+ struct idpf_mac_filter *f;
+
+ if (!macaddr)
+ return NULL;
+
+ list_for_each_entry(f, &vconfig->user_config.mac_filter_list, list) {
+ if (ether_addr_equal(macaddr, f->macaddr))
+ return f;
+ }
+
+ return NULL;
+}
+
+/**
+ * __idpf_del_mac_filter - Delete a MAC filter from the filter list
+ * @vport_config: Vport config structure
+ * @macaddr: The MAC address
+ *
+ * Returns 0 on success, error value on failure
+ **/
+static int __idpf_del_mac_filter(struct idpf_vport_config *vport_config,
+ const u8 *macaddr)
+{
+ struct idpf_mac_filter *f;
+
+ spin_lock_bh(&vport_config->mac_filter_list_lock);
+ f = idpf_find_mac_filter(vport_config, macaddr);
+ if (f) {
+ list_del(&f->list);
+ kfree(f);
+ }
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+
+ return 0;
+}
+
+/**
+ * idpf_del_mac_filter - Delete a MAC filter from the filter list
+ * @vport: Main vport structure
+ * @np: Netdev private structure
+ * @macaddr: The MAC address
+ * @async: Don't wait for return message
+ *
+ * Removes filter from list and if interface is up, tells hardware about the
+ * removed filter.
+ **/
+static int idpf_del_mac_filter(struct idpf_vport *vport,
+ struct idpf_netdev_priv *np,
+ const u8 *macaddr, bool async)
+{
+ struct idpf_vport_config *vport_config;
+ struct idpf_mac_filter *f;
+
+ vport_config = np->adapter->vport_config[np->vport_idx];
+
+ spin_lock_bh(&vport_config->mac_filter_list_lock);
+ f = idpf_find_mac_filter(vport_config, macaddr);
+ if (f) {
+ f->remove = true;
+ } else {
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+
+ return -EINVAL;
+ }
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+
+ if (np->state == __IDPF_VPORT_UP) {
+ int err;
+
+ err = idpf_add_del_mac_filters(vport, np, false, async);
+ if (err)
+ return err;
+ }
+
+ return __idpf_del_mac_filter(vport_config, macaddr);
+}
+
+/**
+ * __idpf_add_mac_filter - Add mac filter helper function
+ * @vport_config: Vport config structure
+ * @macaddr: Address to add
+ *
+ * Takes mac_filter_list_lock spinlock to add new filter to list.
+ */
+static int __idpf_add_mac_filter(struct idpf_vport_config *vport_config,
+ const u8 *macaddr)
+{
+ struct idpf_mac_filter *f;
+
+ spin_lock_bh(&vport_config->mac_filter_list_lock);
+
+ f = idpf_find_mac_filter(vport_config, macaddr);
+ if (f) {
+ f->remove = false;
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+
+ return 0;
+ }
+
+ f = kzalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f) {
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(f->macaddr, macaddr);
+ list_add_tail(&f->list, &vport_config->user_config.mac_filter_list);
+ f->add = true;
+
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+
+ return 0;
+}
+
+/**
+ * idpf_add_mac_filter - Add a mac filter to the filter list
+ * @vport: Main vport structure
+ * @np: Netdev private structure
+ * @macaddr: The MAC address
+ * @async: Don't wait for return message
+ *
+ * Returns 0 on success or error on failure. If interface is up, we'll also
+ * send the virtchnl message to tell hardware about the filter.
+ **/
+static int idpf_add_mac_filter(struct idpf_vport *vport,
+ struct idpf_netdev_priv *np,
+ const u8 *macaddr, bool async)
+{
+ struct idpf_vport_config *vport_config;
+ int err;
+
+ vport_config = np->adapter->vport_config[np->vport_idx];
+ err = __idpf_add_mac_filter(vport_config, macaddr);
+ if (err)
+ return err;
+
+ if (np->state == __IDPF_VPORT_UP)
+ err = idpf_add_del_mac_filters(vport, np, true, async);
+
+ return err;
+}
+
+/**
+ * idpf_del_all_mac_filters - Delete all MAC filters in list
+ * @vport: main vport struct
+ *
+ * Takes mac_filter_list_lock spinlock. Deletes all filters
+ */
+static void idpf_del_all_mac_filters(struct idpf_vport *vport)
+{
+ struct idpf_vport_config *vport_config;
+ struct idpf_mac_filter *f, *ftmp;
+
+ vport_config = vport->adapter->vport_config[vport->idx];
+ spin_lock_bh(&vport_config->mac_filter_list_lock);
+
+ list_for_each_entry_safe(f, ftmp, &vport_config->user_config.mac_filter_list,
+ list) {
+ list_del(&f->list);
+ kfree(f);
+ }
+
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+}
+
+/**
+ * idpf_restore_mac_filters - Re-add all MAC filters in list
+ * @vport: main vport struct
+ *
+ * Takes mac_filter_list_lock spinlock. Sets add field to true for filters to
+ * resync filters back to HW.
+ */
+static void idpf_restore_mac_filters(struct idpf_vport *vport)
+{
+ struct idpf_vport_config *vport_config;
+ struct idpf_mac_filter *f;
+
+ vport_config = vport->adapter->vport_config[vport->idx];
+ spin_lock_bh(&vport_config->mac_filter_list_lock);
+
+ list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list)
+ f->add = true;
+
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+
+ idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev),
+ true, false);
+}
+
+/**
+ * idpf_remove_mac_filters - Remove all MAC filters in list
+ * @vport: main vport struct
+ *
+ * Takes mac_filter_list_lock spinlock. Sets remove field to true for filters
+ * to remove filters in HW.
+ */
+static void idpf_remove_mac_filters(struct idpf_vport *vport)
+{
+ struct idpf_vport_config *vport_config;
+ struct idpf_mac_filter *f;
+
+ vport_config = vport->adapter->vport_config[vport->idx];
+ spin_lock_bh(&vport_config->mac_filter_list_lock);
+
+ list_for_each_entry(f, &vport_config->user_config.mac_filter_list, list)
+ f->remove = true;
+
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+
+ idpf_add_del_mac_filters(vport, netdev_priv(vport->netdev),
+ false, false);
+}
+
+/**
+ * idpf_deinit_mac_addr - deinitialize mac address for vport
+ * @vport: main vport structure
+ */
+static void idpf_deinit_mac_addr(struct idpf_vport *vport)
+{
+ struct idpf_vport_config *vport_config;
+ struct idpf_mac_filter *f;
+
+ vport_config = vport->adapter->vport_config[vport->idx];
+
+ spin_lock_bh(&vport_config->mac_filter_list_lock);
+
+ f = idpf_find_mac_filter(vport_config, vport->default_mac_addr);
+ if (f) {
+ list_del(&f->list);
+ kfree(f);
+ }
+
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+}
+
+/**
+ * idpf_init_mac_addr - initialize mac address for vport
+ * @vport: main vport structure
+ * @netdev: pointer to netdev struct associated with this vport
+ */
+static int idpf_init_mac_addr(struct idpf_vport *vport,
+ struct net_device *netdev)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_adapter *adapter = vport->adapter;
+ int err;
+
+ if (is_valid_ether_addr(vport->default_mac_addr)) {
+ eth_hw_addr_set(netdev, vport->default_mac_addr);
+ ether_addr_copy(netdev->perm_addr, vport->default_mac_addr);
+
+ return idpf_add_mac_filter(vport, np, vport->default_mac_addr,
+ false);
+ }
+
+ if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS,
+ VIRTCHNL2_CAP_MACFILTER)) {
+ dev_err(&adapter->pdev->dev,
+ "MAC address is not provided and capability is not set\n");
+
+ return -EINVAL;
+ }
+
+ eth_hw_addr_random(netdev);
+ err = idpf_add_mac_filter(vport, np, netdev->dev_addr, false);
+ if (err)
+ return err;
+
+ dev_info(&adapter->pdev->dev, "Invalid MAC address %pM, using random %pM\n",
+ vport->default_mac_addr, netdev->dev_addr);
+ ether_addr_copy(vport->default_mac_addr, netdev->dev_addr);
+
+ return 0;
+}
+
+/**
+ * idpf_cfg_netdev - Allocate, configure and register a netdev
+ * @vport: main vport structure
+ *
+ * Returns 0 on success, negative value on failure.
+ */
+static int idpf_cfg_netdev(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vport_config *vport_config;
+ netdev_features_t dflt_features;
+ netdev_features_t offloads = 0;
+ struct idpf_netdev_priv *np;
+ struct net_device *netdev;
+ u16 idx = vport->idx;
+ int err;
+
+ vport_config = adapter->vport_config[idx];
+
+ /* It's possible we already have a netdev allocated and registered for
+ * this vport
+ */
+ if (test_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags)) {
+ netdev = adapter->netdevs[idx];
+ np = netdev_priv(netdev);
+ np->vport = vport;
+ np->vport_idx = vport->idx;
+ np->vport_id = vport->vport_id;
+ vport->netdev = netdev;
+
+ return idpf_init_mac_addr(vport, netdev);
+ }
+
+ netdev = alloc_etherdev_mqs(sizeof(struct idpf_netdev_priv),
+ vport_config->max_q.max_txq,
+ vport_config->max_q.max_rxq);
+ if (!netdev)
+ return -ENOMEM;
+
+ vport->netdev = netdev;
+ np = netdev_priv(netdev);
+ np->vport = vport;
+ np->adapter = adapter;
+ np->vport_idx = vport->idx;
+ np->vport_id = vport->vport_id;
+
+ spin_lock_init(&np->stats_lock);
+
+ err = idpf_init_mac_addr(vport, netdev);
+ if (err) {
+ free_netdev(vport->netdev);
+ vport->netdev = NULL;
+
+ return err;
+ }
+
+ /* assign netdev_ops */
+ if (idpf_is_queue_model_split(vport->txq_model))
+ netdev->netdev_ops = &idpf_netdev_ops_splitq;
+ else
+ netdev->netdev_ops = &idpf_netdev_ops_singleq;
+
+ /* setup watchdog timeout value to be 5 second */
+ netdev->watchdog_timeo = 5 * HZ;
+
+ /* configure default MTU size */
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = vport->max_mtu;
+
+ dflt_features = NETIF_F_SG |
+ NETIF_F_HIGHDMA;
+
+ if (idpf_is_cap_ena_all(adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS))
+ dflt_features |= NETIF_F_RXHASH;
+ if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V4))
+ dflt_features |= NETIF_F_IP_CSUM;
+ if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM_L4V6))
+ dflt_features |= NETIF_F_IPV6_CSUM;
+ if (idpf_is_cap_ena(adapter, IDPF_CSUM_CAPS, IDPF_CAP_RX_CSUM))
+ dflt_features |= NETIF_F_RXCSUM;
+ if (idpf_is_cap_ena_all(adapter, IDPF_CSUM_CAPS, IDPF_CAP_SCTP_CSUM))
+ dflt_features |= NETIF_F_SCTP_CRC;
+
+ if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV4_TCP))
+ dflt_features |= NETIF_F_TSO;
+ if (idpf_is_cap_ena(adapter, IDPF_SEG_CAPS, VIRTCHNL2_CAP_SEG_IPV6_TCP))
+ dflt_features |= NETIF_F_TSO6;
+ if (idpf_is_cap_ena_all(adapter, IDPF_SEG_CAPS,
+ VIRTCHNL2_CAP_SEG_IPV4_UDP |
+ VIRTCHNL2_CAP_SEG_IPV6_UDP))
+ dflt_features |= NETIF_F_GSO_UDP_L4;
+ if (idpf_is_cap_ena_all(adapter, IDPF_RSC_CAPS, IDPF_CAP_RSC))
+ offloads |= NETIF_F_GRO_HW;
+ /* advertise to stack only if offloads for encapsulated packets is
+ * supported
+ */
+ if (idpf_is_cap_ena(vport->adapter, IDPF_SEG_CAPS,
+ VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL)) {
+ offloads |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_IPXIP6 |
+ 0;
+
+ if (!idpf_is_cap_ena_all(vport->adapter, IDPF_CSUM_CAPS,
+ IDPF_CAP_TUNNEL_TX_CSUM))
+ netdev->gso_partial_features |=
+ NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+ offloads |= NETIF_F_TSO_MANGLEID;
+ }
+ if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_LOOPBACK))
+ offloads |= NETIF_F_LOOPBACK;
+
+ netdev->features |= dflt_features;
+ netdev->hw_features |= dflt_features | offloads;
+ netdev->hw_enc_features |= dflt_features | offloads;
+ idpf_set_ethtool_ops(netdev);
+ SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
+
+ /* carrier off on init to avoid Tx hangs */
+ netif_carrier_off(netdev);
+
+ /* make sure transmit queues start off as stopped */
+ netif_tx_stop_all_queues(netdev);
+
+ /* The vport can be arbitrarily released so we need to also track
+ * netdevs in the adapter struct
+ */
+ adapter->netdevs[idx] = netdev;
+
+ return 0;
+}
+
+/**
+ * idpf_get_free_slot - get the next non-NULL location index in array
+ * @adapter: adapter in which to look for a free vport slot
+ */
+static int idpf_get_free_slot(struct idpf_adapter *adapter)
+{
+ unsigned int i;
+
+ for (i = 0; i < adapter->max_vports; i++) {
+ if (!adapter->vports[i])
+ return i;
+ }
+
+ return IDPF_NO_FREE_SLOT;
+}
+
+/**
+ * idpf_remove_features - Turn off feature configs
+ * @vport: virtual port structure
+ */
+static void idpf_remove_features(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+
+ if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER))
+ idpf_remove_mac_filters(vport);
+}
+
+/**
+ * idpf_vport_stop - Disable a vport
+ * @vport: vport to disable
+ */
+static void idpf_vport_stop(struct idpf_vport *vport)
+{
+ struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+
+ if (np->state <= __IDPF_VPORT_DOWN)
+ return;
+
+ netif_carrier_off(vport->netdev);
+ netif_tx_disable(vport->netdev);
+
+ idpf_send_disable_vport_msg(vport);
+ idpf_send_disable_queues_msg(vport);
+ idpf_send_map_unmap_queue_vector_msg(vport, false);
+ /* Normally we ask for queues in create_vport, but if the number of
+ * initially requested queues have changed, for example via ethtool
+ * set channels, we do delete queues and then add the queues back
+ * instead of deleting and reallocating the vport.
+ */
+ if (test_and_clear_bit(IDPF_VPORT_DEL_QUEUES, vport->flags))
+ idpf_send_delete_queues_msg(vport);
+
+ idpf_remove_features(vport);
+
+ vport->link_up = false;
+ idpf_vport_intr_deinit(vport);
+ idpf_vport_intr_rel(vport);
+ idpf_vport_queues_rel(vport);
+ np->state = __IDPF_VPORT_DOWN;
+}
+
+/**
+ * idpf_stop - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * The stop entry point is called when an interface is de-activated by the OS,
+ * and the netdevice enters the DOWN state. The hardware is still under the
+ * driver's control, but the netdev interface is disabled.
+ *
+ * Returns success only - not allowed to fail
+ */
+static int idpf_stop(struct net_device *netdev)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport *vport;
+
+ if (test_bit(IDPF_REMOVE_IN_PROG, np->adapter->flags))
+ return 0;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ idpf_vport_stop(vport);
+
+ idpf_vport_ctrl_unlock(netdev);
+
+ return 0;
+}
+
+/**
+ * idpf_decfg_netdev - Unregister the netdev
+ * @vport: vport for which netdev to be unregistered
+ */
+static void idpf_decfg_netdev(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+
+ unregister_netdev(vport->netdev);
+ free_netdev(vport->netdev);
+ vport->netdev = NULL;
+
+ adapter->netdevs[vport->idx] = NULL;
+}
+
+/**
+ * idpf_vport_rel - Delete a vport and free its resources
+ * @vport: the vport being removed
+ */
+static void idpf_vport_rel(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vport_config *vport_config;
+ struct idpf_vector_info vec_info;
+ struct idpf_rss_data *rss_data;
+ struct idpf_vport_max_q max_q;
+ u16 idx = vport->idx;
+ int i;
+
+ vport_config = adapter->vport_config[vport->idx];
+ idpf_deinit_rss(vport);
+ rss_data = &vport_config->user_config.rss_data;
+ kfree(rss_data->rss_key);
+ rss_data->rss_key = NULL;
+
+ idpf_send_destroy_vport_msg(vport);
+
+ /* Set all bits as we dont know on which vc_state the vport vhnl_wq
+ * is waiting on and wakeup the virtchnl workqueue even if it is
+ * waiting for the response as we are going down
+ */
+ for (i = 0; i < IDPF_VC_NBITS; i++)
+ set_bit(i, vport->vc_state);
+ wake_up(&vport->vchnl_wq);
+
+ mutex_destroy(&vport->vc_buf_lock);
+
+ /* Clear all the bits */
+ for (i = 0; i < IDPF_VC_NBITS; i++)
+ clear_bit(i, vport->vc_state);
+
+ /* Release all max queues allocated to the adapter's pool */
+ max_q.max_rxq = vport_config->max_q.max_rxq;
+ max_q.max_txq = vport_config->max_q.max_txq;
+ max_q.max_bufq = vport_config->max_q.max_bufq;
+ max_q.max_complq = vport_config->max_q.max_complq;
+ idpf_vport_dealloc_max_qs(adapter, &max_q);
+
+ /* Release all the allocated vectors on the stack */
+ vec_info.num_req_vecs = 0;
+ vec_info.num_curr_vecs = vport->num_q_vectors;
+ vec_info.default_vport = vport->default_vport;
+
+ idpf_req_rel_vector_indexes(adapter, vport->q_vector_idxs, &vec_info);
+
+ kfree(vport->q_vector_idxs);
+ vport->q_vector_idxs = NULL;
+
+ kfree(adapter->vport_params_recvd[idx]);
+ adapter->vport_params_recvd[idx] = NULL;
+ kfree(adapter->vport_params_reqd[idx]);
+ adapter->vport_params_reqd[idx] = NULL;
+ if (adapter->vport_config[idx]) {
+ kfree(adapter->vport_config[idx]->req_qs_chunks);
+ adapter->vport_config[idx]->req_qs_chunks = NULL;
+ }
+ kfree(vport);
+ adapter->num_alloc_vports--;
+}
+
+/**
+ * idpf_vport_dealloc - cleanup and release a given vport
+ * @vport: pointer to idpf vport structure
+ *
+ * returns nothing
+ */
+static void idpf_vport_dealloc(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ unsigned int i = vport->idx;
+
+ idpf_deinit_mac_addr(vport);
+ idpf_vport_stop(vport);
+
+ if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
+ idpf_decfg_netdev(vport);
+ if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
+ idpf_del_all_mac_filters(vport);
+
+ if (adapter->netdevs[i]) {
+ struct idpf_netdev_priv *np = netdev_priv(adapter->netdevs[i]);
+
+ np->vport = NULL;
+ }
+
+ idpf_vport_rel(vport);
+
+ adapter->vports[i] = NULL;
+ adapter->next_vport = idpf_get_free_slot(adapter);
+}
+
+/**
+ * idpf_vport_alloc - Allocates the next available struct vport in the adapter
+ * @adapter: board private structure
+ * @max_q: vport max queue info
+ *
+ * returns a pointer to a vport on success, NULL on failure.
+ */
+static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q)
+{
+ struct idpf_rss_data *rss_data;
+ u16 idx = adapter->next_vport;
+ struct idpf_vport *vport;
+ u16 num_max_q;
+
+ if (idx == IDPF_NO_FREE_SLOT)
+ return NULL;
+
+ vport = kzalloc(sizeof(*vport), GFP_KERNEL);
+ if (!vport)
+ return vport;
+
+ if (!adapter->vport_config[idx]) {
+ struct idpf_vport_config *vport_config;
+
+ vport_config = kzalloc(sizeof(*vport_config), GFP_KERNEL);
+ if (!vport_config) {
+ kfree(vport);
+
+ return NULL;
+ }
+
+ adapter->vport_config[idx] = vport_config;
+ }
+
+ vport->idx = idx;
+ vport->adapter = adapter;
+ vport->compln_clean_budget = IDPF_TX_COMPLQ_CLEAN_BUDGET;
+ vport->default_vport = adapter->num_alloc_vports <
+ idpf_get_default_vports(adapter);
+
+ num_max_q = max(max_q->max_txq, max_q->max_rxq);
+ vport->q_vector_idxs = kcalloc(num_max_q, sizeof(u16), GFP_KERNEL);
+ if (!vport->q_vector_idxs) {
+ kfree(vport);
+
+ return NULL;
+ }
+ idpf_vport_init(vport, max_q);
+
+ /* This alloc is done separate from the LUT because it's not strictly
+ * dependent on how many queues we have. If we change number of queues
+ * and soft reset we'll need a new LUT but the key can remain the same
+ * for as long as the vport exists.
+ */
+ rss_data = &adapter->vport_config[idx]->user_config.rss_data;
+ rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
+ if (!rss_data->rss_key) {
+ kfree(vport);
+
+ return NULL;
+ }
+ /* Initialize default rss key */
+ netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
+
+ /* fill vport slot in the adapter struct */
+ adapter->vports[idx] = vport;
+ adapter->vport_ids[idx] = idpf_get_vport_id(vport);
+
+ adapter->num_alloc_vports++;
+ /* prepare adapter->next_vport for next use */
+ adapter->next_vport = idpf_get_free_slot(adapter);
+
+ return vport;
+}
+
+/**
+ * idpf_get_stats64 - get statistics for network device structure
+ * @netdev: network interface device structure
+ * @stats: main device statistics structure
+ */
+static void idpf_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+
+ spin_lock_bh(&np->stats_lock);
+ *stats = np->netstats;
+ spin_unlock_bh(&np->stats_lock);
+}
+
+/**
+ * idpf_statistics_task - Delayed task to get statistics over mailbox
+ * @work: work_struct handle to our data
+ */
+void idpf_statistics_task(struct work_struct *work)
+{
+ struct idpf_adapter *adapter;
+ int i;
+
+ adapter = container_of(work, struct idpf_adapter, stats_task.work);
+
+ for (i = 0; i < adapter->max_vports; i++) {
+ struct idpf_vport *vport = adapter->vports[i];
+
+ if (vport && !test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
+ idpf_send_get_stats_msg(vport);
+ }
+
+ queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
+ msecs_to_jiffies(10000));
+}
+
+/**
+ * idpf_mbx_task - Delayed task to handle mailbox responses
+ * @work: work_struct handle
+ */
+void idpf_mbx_task(struct work_struct *work)
+{
+ struct idpf_adapter *adapter;
+
+ adapter = container_of(work, struct idpf_adapter, mbx_task.work);
+
+ if (test_bit(IDPF_MB_INTR_MODE, adapter->flags))
+ idpf_mb_irq_enable(adapter);
+ else
+ queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task,
+ msecs_to_jiffies(300));
+
+ idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_UNKNOWN, NULL, 0);
+}
+
+/**
+ * idpf_service_task - Delayed task for handling mailbox responses
+ * @work: work_struct handle to our data
+ *
+ */
+void idpf_service_task(struct work_struct *work)
+{
+ struct idpf_adapter *adapter;
+
+ adapter = container_of(work, struct idpf_adapter, serv_task.work);
+
+ if (idpf_is_reset_detected(adapter) &&
+ !idpf_is_reset_in_prog(adapter) &&
+ !test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
+ dev_info(&adapter->pdev->dev, "HW reset detected\n");
+ set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
+ queue_delayed_work(adapter->vc_event_wq,
+ &adapter->vc_event_task,
+ msecs_to_jiffies(10));
+ }
+
+ queue_delayed_work(adapter->serv_wq, &adapter->serv_task,
+ msecs_to_jiffies(300));
+}
+
+/**
+ * idpf_restore_features - Restore feature configs
+ * @vport: virtual port structure
+ */
+static void idpf_restore_features(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+
+ if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER))
+ idpf_restore_mac_filters(vport);
+}
+
+/**
+ * idpf_set_real_num_queues - set number of queues for netdev
+ * @vport: virtual port structure
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int idpf_set_real_num_queues(struct idpf_vport *vport)
+{
+ int err;
+
+ err = netif_set_real_num_rx_queues(vport->netdev, vport->num_rxq);
+ if (err)
+ return err;
+
+ return netif_set_real_num_tx_queues(vport->netdev, vport->num_txq);
+}
+
+/**
+ * idpf_up_complete - Complete interface up sequence
+ * @vport: virtual port structure
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int idpf_up_complete(struct idpf_vport *vport)
+{
+ struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+
+ if (vport->link_up && !netif_carrier_ok(vport->netdev)) {
+ netif_carrier_on(vport->netdev);
+ netif_tx_start_all_queues(vport->netdev);
+ }
+
+ np->state = __IDPF_VPORT_UP;
+
+ return 0;
+}
+
+/**
+ * idpf_rx_init_buf_tail - Write initial buffer ring tail value
+ * @vport: virtual port struct
+ */
+static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
+{
+ int i, j;
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *grp = &vport->rxq_grps[i];
+
+ if (idpf_is_queue_model_split(vport->rxq_model)) {
+ for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ struct idpf_queue *q =
+ &grp->splitq.bufq_sets[j].bufq;
+
+ writel(q->next_to_alloc, q->tail);
+ }
+ } else {
+ for (j = 0; j < grp->singleq.num_rxq; j++) {
+ struct idpf_queue *q =
+ grp->singleq.rxqs[j];
+
+ writel(q->next_to_alloc, q->tail);
+ }
+ }
+ }
+}
+
+/**
+ * idpf_vport_open - Bring up a vport
+ * @vport: vport to bring up
+ * @alloc_res: allocate queue resources
+ */
+static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res)
+{
+ struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vport_config *vport_config;
+ int err;
+
+ if (np->state != __IDPF_VPORT_DOWN)
+ return -EBUSY;
+
+ /* we do not allow interface up just yet */
+ netif_carrier_off(vport->netdev);
+
+ if (alloc_res) {
+ err = idpf_vport_queues_alloc(vport);
+ if (err)
+ return err;
+ }
+
+ err = idpf_vport_intr_alloc(vport);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n",
+ vport->vport_id, err);
+ goto queues_rel;
+ }
+
+ err = idpf_vport_queue_ids_init(vport);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n",
+ vport->vport_id, err);
+ goto intr_rel;
+ }
+
+ err = idpf_vport_intr_init(vport);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n",
+ vport->vport_id, err);
+ goto intr_rel;
+ }
+
+ err = idpf_rx_bufs_init_all(vport);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
+ vport->vport_id, err);
+ goto intr_rel;
+ }
+
+ err = idpf_queue_reg_init(vport);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
+ vport->vport_id, err);
+ goto intr_rel;
+ }
+
+ idpf_rx_init_buf_tail(vport);
+
+ err = idpf_send_config_queues_msg(vport);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to configure queues for vport %u, %d\n",
+ vport->vport_id, err);
+ goto intr_deinit;
+ }
+
+ err = idpf_send_map_unmap_queue_vector_msg(vport, true);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to map queue vectors for vport %u: %d\n",
+ vport->vport_id, err);
+ goto intr_deinit;
+ }
+
+ err = idpf_send_enable_queues_msg(vport);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to enable queues for vport %u: %d\n",
+ vport->vport_id, err);
+ goto unmap_queue_vectors;
+ }
+
+ err = idpf_send_enable_vport_msg(vport);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to enable vport %u: %d\n",
+ vport->vport_id, err);
+ err = -EAGAIN;
+ goto disable_queues;
+ }
+
+ idpf_restore_features(vport);
+
+ vport_config = adapter->vport_config[vport->idx];
+ if (vport_config->user_config.rss_data.rss_lut)
+ err = idpf_config_rss(vport);
+ else
+ err = idpf_init_rss(vport);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to initialize RSS for vport %u: %d\n",
+ vport->vport_id, err);
+ goto disable_vport;
+ }
+
+ err = idpf_up_complete(vport);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to complete interface up for vport %u: %d\n",
+ vport->vport_id, err);
+ goto deinit_rss;
+ }
+
+ return 0;
+
+deinit_rss:
+ idpf_deinit_rss(vport);
+disable_vport:
+ idpf_send_disable_vport_msg(vport);
+disable_queues:
+ idpf_send_disable_queues_msg(vport);
+unmap_queue_vectors:
+ idpf_send_map_unmap_queue_vector_msg(vport, false);
+intr_deinit:
+ idpf_vport_intr_deinit(vport);
+intr_rel:
+ idpf_vport_intr_rel(vport);
+queues_rel:
+ idpf_vport_queues_rel(vport);
+
+ return err;
+}
+
+/**
+ * idpf_init_task - Delayed initialization task
+ * @work: work_struct handle to our data
+ *
+ * Init task finishes up pending work started in probe. Due to the asynchronous
+ * nature in which the device communicates with hardware, we may have to wait
+ * several milliseconds to get a response. Instead of busy polling in probe,
+ * pulling it out into a delayed work task prevents us from bogging down the
+ * whole system waiting for a response from hardware.
+ */
+void idpf_init_task(struct work_struct *work)
+{
+ struct idpf_vport_config *vport_config;
+ struct idpf_vport_max_q max_q;
+ struct idpf_adapter *adapter;
+ struct idpf_netdev_priv *np;
+ struct idpf_vport *vport;
+ u16 num_default_vports;
+ struct pci_dev *pdev;
+ bool default_vport;
+ int index, err;
+
+ adapter = container_of(work, struct idpf_adapter, init_task.work);
+
+ num_default_vports = idpf_get_default_vports(adapter);
+ if (adapter->num_alloc_vports < num_default_vports)
+ default_vport = true;
+ else
+ default_vport = false;
+
+ err = idpf_vport_alloc_max_qs(adapter, &max_q);
+ if (err)
+ goto unwind_vports;
+
+ err = idpf_send_create_vport_msg(adapter, &max_q);
+ if (err) {
+ idpf_vport_dealloc_max_qs(adapter, &max_q);
+ goto unwind_vports;
+ }
+
+ pdev = adapter->pdev;
+ vport = idpf_vport_alloc(adapter, &max_q);
+ if (!vport) {
+ err = -EFAULT;
+ dev_err(&pdev->dev, "failed to allocate vport: %d\n",
+ err);
+ idpf_vport_dealloc_max_qs(adapter, &max_q);
+ goto unwind_vports;
+ }
+
+ index = vport->idx;
+ vport_config = adapter->vport_config[index];
+
+ init_waitqueue_head(&vport->sw_marker_wq);
+ init_waitqueue_head(&vport->vchnl_wq);
+
+ mutex_init(&vport->vc_buf_lock);
+ spin_lock_init(&vport_config->mac_filter_list_lock);
+
+ INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
+
+ err = idpf_check_supported_desc_ids(vport);
+ if (err) {
+ dev_err(&pdev->dev, "failed to get required descriptor ids\n");
+ goto cfg_netdev_err;
+ }
+
+ if (idpf_cfg_netdev(vport))
+ goto cfg_netdev_err;
+
+ err = idpf_send_get_rx_ptype_msg(vport);
+ if (err)
+ goto handle_err;
+
+ /* Once state is put into DOWN, driver is ready for dev_open */
+ np = netdev_priv(vport->netdev);
+ np->state = __IDPF_VPORT_DOWN;
+ if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags))
+ idpf_vport_open(vport, true);
+
+ /* Spawn and return 'idpf_init_task' work queue until all the
+ * default vports are created
+ */
+ if (adapter->num_alloc_vports < num_default_vports) {
+ queue_delayed_work(adapter->init_wq, &adapter->init_task,
+ msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
+
+ return;
+ }
+
+ for (index = 0; index < adapter->max_vports; index++) {
+ if (adapter->netdevs[index] &&
+ !test_bit(IDPF_VPORT_REG_NETDEV,
+ adapter->vport_config[index]->flags)) {
+ register_netdev(adapter->netdevs[index]);
+ set_bit(IDPF_VPORT_REG_NETDEV,
+ adapter->vport_config[index]->flags);
+ }
+ }
+
+ /* As all the required vports are created, clear the reset flag
+ * unconditionally here in case we were in reset and the link was down.
+ */
+ clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
+ /* Start the statistics task now */
+ queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
+ msecs_to_jiffies(10 * (pdev->devfn & 0x07)));
+
+ return;
+
+handle_err:
+ idpf_decfg_netdev(vport);
+cfg_netdev_err:
+ idpf_vport_rel(vport);
+ adapter->vports[index] = NULL;
+unwind_vports:
+ if (default_vport) {
+ for (index = 0; index < adapter->max_vports; index++) {
+ if (adapter->vports[index])
+ idpf_vport_dealloc(adapter->vports[index]);
+ }
+ }
+ clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
+}
+
+/**
+ * idpf_sriov_ena - Enable or change number of VFs
+ * @adapter: private data struct
+ * @num_vfs: number of VFs to allocate
+ */
+static int idpf_sriov_ena(struct idpf_adapter *adapter, int num_vfs)
+{
+ struct device *dev = &adapter->pdev->dev;
+ int err;
+
+ err = idpf_send_set_sriov_vfs_msg(adapter, num_vfs);
+ if (err) {
+ dev_err(dev, "Failed to allocate VFs: %d\n", err);
+
+ return err;
+ }
+
+ err = pci_enable_sriov(adapter->pdev, num_vfs);
+ if (err) {
+ idpf_send_set_sriov_vfs_msg(adapter, 0);
+ dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
+
+ return err;
+ }
+
+ adapter->num_vfs = num_vfs;
+
+ return num_vfs;
+}
+
+/**
+ * idpf_sriov_configure - Configure the requested VFs
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of vfs to allocate
+ *
+ * Enable or change the number of VFs. Called when the user updates the number
+ * of VFs in sysfs.
+ **/
+int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ struct idpf_adapter *adapter = pci_get_drvdata(pdev);
+
+ if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_SRIOV)) {
+ dev_info(&pdev->dev, "SR-IOV is not supported on this device\n");
+
+ return -EOPNOTSUPP;
+ }
+
+ if (num_vfs)
+ return idpf_sriov_ena(adapter, num_vfs);
+
+ if (pci_vfs_assigned(pdev)) {
+ dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs\n");
+
+ return -EBUSY;
+ }
+
+ pci_disable_sriov(adapter->pdev);
+ idpf_send_set_sriov_vfs_msg(adapter, 0);
+ adapter->num_vfs = 0;
+
+ return 0;
+}
+
+/**
+ * idpf_deinit_task - Device deinit routine
+ * @adapter: Driver specific private structure
+ *
+ * Extended remove logic which will be used for
+ * hard reset as well
+ */
+void idpf_deinit_task(struct idpf_adapter *adapter)
+{
+ unsigned int i;
+
+ /* Wait until the init_task is done else this thread might release
+ * the resources first and the other thread might end up in a bad state
+ */
+ cancel_delayed_work_sync(&adapter->init_task);
+
+ if (!adapter->vports)
+ return;
+
+ cancel_delayed_work_sync(&adapter->stats_task);
+
+ for (i = 0; i < adapter->max_vports; i++) {
+ if (adapter->vports[i])
+ idpf_vport_dealloc(adapter->vports[i]);
+ }
+}
+
+/**
+ * idpf_check_reset_complete - check that reset is complete
+ * @hw: pointer to hw struct
+ * @reset_reg: struct with reset registers
+ *
+ * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
+ **/
+static int idpf_check_reset_complete(struct idpf_hw *hw,
+ struct idpf_reset_reg *reset_reg)
+{
+ struct idpf_adapter *adapter = hw->back;
+ int i;
+
+ for (i = 0; i < 2000; i++) {
+ u32 reg_val = readl(reset_reg->rstat);
+
+ /* 0xFFFFFFFF might be read if other side hasn't cleared the
+ * register for us yet and 0xFFFFFFFF is not a valid value for
+ * the register, so treat that as invalid.
+ */
+ if (reg_val != 0xFFFFFFFF && (reg_val & reset_reg->rstat_m))
+ return 0;
+
+ usleep_range(5000, 10000);
+ }
+
+ dev_warn(&adapter->pdev->dev, "Device reset timeout!\n");
+ /* Clear the reset flag unconditionally here since the reset
+ * technically isn't in progress anymore from the driver's perspective
+ */
+ clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
+
+ return -EBUSY;
+}
+
+/**
+ * idpf_set_vport_state - Set the vport state to be after the reset
+ * @adapter: Driver specific private structure
+ */
+static void idpf_set_vport_state(struct idpf_adapter *adapter)
+{
+ u16 i;
+
+ for (i = 0; i < adapter->max_vports; i++) {
+ struct idpf_netdev_priv *np;
+
+ if (!adapter->netdevs[i])
+ continue;
+
+ np = netdev_priv(adapter->netdevs[i]);
+ if (np->state == __IDPF_VPORT_UP)
+ set_bit(IDPF_VPORT_UP_REQUESTED,
+ adapter->vport_config[i]->flags);
+ }
+}
+
+/**
+ * idpf_init_hard_reset - Initiate a hardware reset
+ * @adapter: Driver specific private structure
+ *
+ * Deallocate the vports and all the resources associated with them and
+ * reallocate. Also reinitialize the mailbox. Return 0 on success,
+ * negative on failure.
+ */
+static int idpf_init_hard_reset(struct idpf_adapter *adapter)
+{
+ struct idpf_reg_ops *reg_ops = &adapter->dev_ops.reg_ops;
+ struct device *dev = &adapter->pdev->dev;
+ struct net_device *netdev;
+ int err;
+ u16 i;
+
+ mutex_lock(&adapter->vport_ctrl_lock);
+
+ dev_info(dev, "Device HW Reset initiated\n");
+
+ /* Avoid TX hangs on reset */
+ for (i = 0; i < adapter->max_vports; i++) {
+ netdev = adapter->netdevs[i];
+ if (!netdev)
+ continue;
+
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+ }
+
+ /* Prepare for reset */
+ if (test_and_clear_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
+ reg_ops->trigger_reset(adapter, IDPF_HR_DRV_LOAD);
+ } else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) {
+ bool is_reset = idpf_is_reset_detected(adapter);
+
+ idpf_set_vport_state(adapter);
+ idpf_vc_core_deinit(adapter);
+ if (!is_reset)
+ reg_ops->trigger_reset(adapter, IDPF_HR_FUNC_RESET);
+ idpf_deinit_dflt_mbx(adapter);
+ } else {
+ dev_err(dev, "Unhandled hard reset cause\n");
+ err = -EBADRQC;
+ goto unlock_mutex;
+ }
+
+ /* Wait for reset to complete */
+ err = idpf_check_reset_complete(&adapter->hw, &adapter->reset_reg);
+ if (err) {
+ dev_err(dev, "The driver was unable to contact the device's firmware. Check that the FW is running. Driver state= 0x%x\n",
+ adapter->state);
+ goto unlock_mutex;
+ }
+
+ /* Reset is complete and so start building the driver resources again */
+ err = idpf_init_dflt_mbx(adapter);
+ if (err) {
+ dev_err(dev, "Failed to initialize default mailbox: %d\n", err);
+ goto unlock_mutex;
+ }
+
+ /* Initialize the state machine, also allocate memory and request
+ * resources
+ */
+ err = idpf_vc_core_init(adapter);
+ if (err) {
+ idpf_deinit_dflt_mbx(adapter);
+ goto unlock_mutex;
+ }
+
+ /* Wait till all the vports are initialized to release the reset lock,
+ * else user space callbacks may access uninitialized vports
+ */
+ while (test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
+ msleep(100);
+
+unlock_mutex:
+ mutex_unlock(&adapter->vport_ctrl_lock);
+
+ return err;
+}
+
+/**
+ * idpf_vc_event_task - Handle virtchannel event logic
+ * @work: work queue struct
+ */
+void idpf_vc_event_task(struct work_struct *work)
+{
+ struct idpf_adapter *adapter;
+
+ adapter = container_of(work, struct idpf_adapter, vc_event_task.work);
+
+ if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
+ return;
+
+ if (test_bit(IDPF_HR_FUNC_RESET, adapter->flags) ||
+ test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
+ set_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
+ idpf_init_hard_reset(adapter);
+ }
+}
+
+/**
+ * idpf_initiate_soft_reset - Initiate a software reset
+ * @vport: virtual port data struct
+ * @reset_cause: reason for the soft reset
+ *
+ * Soft reset only reallocs vport queue resources. Returns 0 on success,
+ * negative on failure.
+ */
+int idpf_initiate_soft_reset(struct idpf_vport *vport,
+ enum idpf_vport_reset_cause reset_cause)
+{
+ struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ enum idpf_vport_state current_state = np->state;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vport *new_vport;
+ int err, i;
+
+ /* If the system is low on memory, we can end up in bad state if we
+ * free all the memory for queue resources and try to allocate them
+ * again. Instead, we can pre-allocate the new resources before doing
+ * anything and bailing if the alloc fails.
+ *
+ * Make a clone of the existing vport to mimic its current
+ * configuration, then modify the new structure with any requested
+ * changes. Once the allocation of the new resources is done, stop the
+ * existing vport and copy the configuration to the main vport. If an
+ * error occurred, the existing vport will be untouched.
+ *
+ */
+ new_vport = kzalloc(sizeof(*vport), GFP_KERNEL);
+ if (!new_vport)
+ return -ENOMEM;
+
+ /* This purposely avoids copying the end of the struct because it
+ * contains wait_queues and mutexes and other stuff we don't want to
+ * mess with. Nothing below should use those variables from new_vport
+ * and should instead always refer to them in vport if they need to.
+ */
+ memcpy(new_vport, vport, offsetof(struct idpf_vport, vc_state));
+
+ /* Adjust resource parameters prior to reallocating resources */
+ switch (reset_cause) {
+ case IDPF_SR_Q_CHANGE:
+ err = idpf_vport_adjust_qs(new_vport);
+ if (err)
+ goto free_vport;
+ break;
+ case IDPF_SR_Q_DESC_CHANGE:
+ /* Update queue parameters before allocating resources */
+ idpf_vport_calc_num_q_desc(new_vport);
+ break;
+ case IDPF_SR_MTU_CHANGE:
+ case IDPF_SR_RSC_CHANGE:
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "Unhandled soft reset cause\n");
+ err = -EINVAL;
+ goto free_vport;
+ }
+
+ err = idpf_vport_queues_alloc(new_vport);
+ if (err)
+ goto free_vport;
+ if (current_state <= __IDPF_VPORT_DOWN) {
+ idpf_send_delete_queues_msg(vport);
+ } else {
+ set_bit(IDPF_VPORT_DEL_QUEUES, vport->flags);
+ idpf_vport_stop(vport);
+ }
+
+ idpf_deinit_rss(vport);
+ /* We're passing in vport here because we need its wait_queue
+ * to send a message and it should be getting all the vport
+ * config data out of the adapter but we need to be careful not
+ * to add code to add_queues to change the vport config within
+ * vport itself as it will be wiped with a memcpy later.
+ */
+ err = idpf_send_add_queues_msg(vport, new_vport->num_txq,
+ new_vport->num_complq,
+ new_vport->num_rxq,
+ new_vport->num_bufq);
+ if (err)
+ goto err_reset;
+
+ /* Same comment as above regarding avoiding copying the wait_queues and
+ * mutexes applies here. We do not want to mess with those if possible.
+ */
+ memcpy(vport, new_vport, offsetof(struct idpf_vport, vc_state));
+
+ /* Since idpf_vport_queues_alloc was called with new_port, the queue
+ * back pointers are currently pointing to the local new_vport. Reset
+ * the backpointers to the original vport here
+ */
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ int j;
+
+ tx_qgrp->vport = vport;
+ for (j = 0; j < tx_qgrp->num_txq; j++)
+ tx_qgrp->txqs[j]->vport = vport;
+
+ if (idpf_is_queue_model_split(vport->txq_model))
+ tx_qgrp->complq->vport = vport;
+ }
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ struct idpf_queue *q;
+ u16 num_rxq;
+ int j;
+
+ rx_qgrp->vport = vport;
+ for (j = 0; j < vport->num_bufqs_per_qgrp; j++)
+ rx_qgrp->splitq.bufq_sets[j].bufq.vport = vport;
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+ else
+ num_rxq = rx_qgrp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq; j++) {
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ else
+ q = rx_qgrp->singleq.rxqs[j];
+ q->vport = vport;
+ }
+ }
+
+ if (reset_cause == IDPF_SR_Q_CHANGE)
+ idpf_vport_alloc_vec_indexes(vport);
+
+ err = idpf_set_real_num_queues(vport);
+ if (err)
+ goto err_reset;
+
+ if (current_state == __IDPF_VPORT_UP)
+ err = idpf_vport_open(vport, false);
+
+ kfree(new_vport);
+
+ return err;
+
+err_reset:
+ idpf_vport_queues_rel(new_vport);
+free_vport:
+ kfree(new_vport);
+
+ return err;
+}
+
+/**
+ * idpf_addr_sync - Callback for dev_(mc|uc)_sync to add address
+ * @netdev: the netdevice
+ * @addr: address to add
+ *
+ * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
+ * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock
+ * meaning we cannot sleep in this context. Due to this, we have to add the
+ * filter and send the virtchnl message asynchronously without waiting for the
+ * response from the other side. We won't know whether or not the operation
+ * actually succeeded until we get the message back. Returns 0 on success,
+ * negative on failure.
+ */
+static int idpf_addr_sync(struct net_device *netdev, const u8 *addr)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+
+ return idpf_add_mac_filter(np->vport, np, addr, true);
+}
+
+/**
+ * idpf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
+ * @netdev: the netdevice
+ * @addr: address to add
+ *
+ * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
+ * __dev_(uc|mc)_sync from .set_rx_mode. Kernel takes addr_list_lock spinlock
+ * meaning we cannot sleep in this context. Due to this we have to delete the
+ * filter and send the virtchnl message asynchronously without waiting for the
+ * return from the other side. We won't know whether or not the operation
+ * actually succeeded until we get the message back. Returns 0 on success,
+ * negative on failure.
+ */
+static int idpf_addr_unsync(struct net_device *netdev, const u8 *addr)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+
+ /* Under some circumstances, we might receive a request to delete
+ * our own device address from our uc list. Because we store the
+ * device address in the VSI's MAC filter list, we need to ignore
+ * such requests and not delete our device address from this list.
+ */
+ if (ether_addr_equal(addr, netdev->dev_addr))
+ return 0;
+
+ idpf_del_mac_filter(np->vport, np, addr, true);
+
+ return 0;
+}
+
+/**
+ * idpf_set_rx_mode - NDO callback to set the netdev filters
+ * @netdev: network interface device structure
+ *
+ * Stack takes addr_list_lock spinlock before calling our .set_rx_mode. We
+ * cannot sleep in this context.
+ */
+static void idpf_set_rx_mode(struct net_device *netdev)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_user_config_data *config_data;
+ struct idpf_adapter *adapter;
+ bool changed = false;
+ struct device *dev;
+ int err;
+
+ adapter = np->adapter;
+ dev = &adapter->pdev->dev;
+
+ if (idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_MACFILTER)) {
+ __dev_uc_sync(netdev, idpf_addr_sync, idpf_addr_unsync);
+ __dev_mc_sync(netdev, idpf_addr_sync, idpf_addr_unsync);
+ }
+
+ if (!idpf_is_cap_ena(adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_PROMISC))
+ return;
+
+ config_data = &adapter->vport_config[np->vport_idx]->user_config;
+ /* IFF_PROMISC enables both unicast and multicast promiscuous,
+ * while IFF_ALLMULTI only enables multicast such that:
+ *
+ * promisc + allmulti = unicast | multicast
+ * promisc + !allmulti = unicast | multicast
+ * !promisc + allmulti = multicast
+ */
+ if ((netdev->flags & IFF_PROMISC) &&
+ !test_and_set_bit(__IDPF_PROMISC_UC, config_data->user_flags)) {
+ changed = true;
+ dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
+ if (!test_and_set_bit(__IDPF_PROMISC_MC, adapter->flags))
+ dev_info(dev, "Entering multicast promiscuous mode\n");
+ }
+
+ if (!(netdev->flags & IFF_PROMISC) &&
+ test_and_clear_bit(__IDPF_PROMISC_UC, config_data->user_flags)) {
+ changed = true;
+ dev_info(dev, "Leaving promiscuous mode\n");
+ }
+
+ if (netdev->flags & IFF_ALLMULTI &&
+ !test_and_set_bit(__IDPF_PROMISC_MC, config_data->user_flags)) {
+ changed = true;
+ dev_info(dev, "Entering multicast promiscuous mode\n");
+ }
+
+ if (!(netdev->flags & (IFF_ALLMULTI | IFF_PROMISC)) &&
+ test_and_clear_bit(__IDPF_PROMISC_MC, config_data->user_flags)) {
+ changed = true;
+ dev_info(dev, "Leaving multicast promiscuous mode\n");
+ }
+
+ if (!changed)
+ return;
+
+ err = idpf_set_promiscuous(adapter, config_data, np->vport_id);
+ if (err)
+ dev_err(dev, "Failed to set promiscuous mode: %d\n", err);
+}
+
+/**
+ * idpf_vport_manage_rss_lut - disable/enable RSS
+ * @vport: the vport being changed
+ *
+ * In the event of disable request for RSS, this function will zero out RSS
+ * LUT, while in the event of enable request for RSS, it will reconfigure RSS
+ * LUT with the default LUT configuration.
+ */
+static int idpf_vport_manage_rss_lut(struct idpf_vport *vport)
+{
+ bool ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
+ struct idpf_rss_data *rss_data;
+ u16 idx = vport->idx;
+ int lut_size;
+
+ rss_data = &vport->adapter->vport_config[idx]->user_config.rss_data;
+ lut_size = rss_data->rss_lut_size * sizeof(u32);
+
+ if (ena) {
+ /* This will contain the default or user configured LUT */
+ memcpy(rss_data->rss_lut, rss_data->cached_lut, lut_size);
+ } else {
+ /* Save a copy of the current LUT to be restored later if
+ * requested.
+ */
+ memcpy(rss_data->cached_lut, rss_data->rss_lut, lut_size);
+
+ /* Zero out the current LUT to disable */
+ memset(rss_data->rss_lut, 0, lut_size);
+ }
+
+ return idpf_config_rss(vport);
+}
+
+/**
+ * idpf_set_features - set the netdev feature flags
+ * @netdev: ptr to the netdev being adjusted
+ * @features: the feature set that the stack is suggesting
+ */
+static int idpf_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = netdev->features ^ features;
+ struct idpf_adapter *adapter;
+ struct idpf_vport *vport;
+ int err = 0;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ adapter = vport->adapter;
+
+ if (idpf_is_reset_in_prog(adapter)) {
+ dev_err(&adapter->pdev->dev, "Device is resetting, changing netdev features temporarily unavailable.\n");
+ err = -EBUSY;
+ goto unlock_mutex;
+ }
+
+ if (changed & NETIF_F_RXHASH) {
+ netdev->features ^= NETIF_F_RXHASH;
+ err = idpf_vport_manage_rss_lut(vport);
+ if (err)
+ goto unlock_mutex;
+ }
+
+ if (changed & NETIF_F_GRO_HW) {
+ netdev->features ^= NETIF_F_GRO_HW;
+ err = idpf_initiate_soft_reset(vport, IDPF_SR_RSC_CHANGE);
+ if (err)
+ goto unlock_mutex;
+ }
+
+ if (changed & NETIF_F_LOOPBACK) {
+ netdev->features ^= NETIF_F_LOOPBACK;
+ err = idpf_send_ena_dis_loopback_msg(vport);
+ }
+
+unlock_mutex:
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+/**
+ * idpf_open - Called when a network interface becomes active
+ * @netdev: network interface device structure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the netdev watchdog is enabled,
+ * and the stack is notified that the interface is ready.
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static int idpf_open(struct net_device *netdev)
+{
+ struct idpf_vport *vport;
+ int err;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ err = idpf_vport_open(vport, true);
+
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+/**
+ * idpf_change_mtu - NDO callback to change the MTU
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct idpf_vport *vport;
+ int err;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ netdev->mtu = new_mtu;
+
+ err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE);
+
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+/**
+ * idpf_features_check - Validate packet conforms to limits
+ * @skb: skb buffer
+ * @netdev: This port's netdev
+ * @features: Offload features that the stack believes apply
+ */
+static netdev_features_t idpf_features_check(struct sk_buff *skb,
+ struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
+ struct idpf_adapter *adapter = vport->adapter;
+ size_t len;
+
+ /* No point in doing any of this if neither checksum nor GSO are
+ * being requested for this frame. We can rule out both by just
+ * checking for CHECKSUM_PARTIAL
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return features;
+
+ /* We cannot support GSO if the MSS is going to be less than
+ * 88 bytes. If it is then we need to drop support for GSO.
+ */
+ if (skb_is_gso(skb) &&
+ (skb_shinfo(skb)->gso_size < IDPF_TX_TSO_MIN_MSS))
+ features &= ~NETIF_F_GSO_MASK;
+
+ /* Ensure MACLEN is <= 126 bytes (63 words) and not an odd size */
+ len = skb_network_offset(skb);
+ if (unlikely(len & ~(126)))
+ goto unsupported;
+
+ len = skb_network_header_len(skb);
+ if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
+ goto unsupported;
+
+ if (!skb->encapsulation)
+ return features;
+
+ /* L4TUNLEN can support 127 words */
+ len = skb_inner_network_header(skb) - skb_transport_header(skb);
+ if (unlikely(len & ~(127 * 2)))
+ goto unsupported;
+
+ /* IPLEN can support at most 127 dwords */
+ len = skb_inner_network_header_len(skb);
+ if (unlikely(len > idpf_get_max_tx_hdr_size(adapter)))
+ goto unsupported;
+
+ /* No need to validate L4LEN as TCP is the only protocol with a
+ * a flexible value and we support all possible values supported
+ * by TCP, which is at most 15 dwords
+ */
+
+ return features;
+
+unsupported:
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+}
+
+/**
+ * idpf_set_mac - NDO callback to set port mac address
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int idpf_set_mac(struct net_device *netdev, void *p)
+{
+ struct idpf_netdev_priv *np = netdev_priv(netdev);
+ struct idpf_vport_config *vport_config;
+ struct sockaddr *addr = p;
+ struct idpf_vport *vport;
+ int err = 0;
+
+ idpf_vport_ctrl_lock(netdev);
+ vport = idpf_netdev_to_vport(netdev);
+
+ if (!idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
+ VIRTCHNL2_CAP_MACFILTER)) {
+ dev_info(&vport->adapter->pdev->dev, "Setting MAC address is not supported\n");
+ err = -EOPNOTSUPP;
+ goto unlock_mutex;
+ }
+
+ if (!is_valid_ether_addr(addr->sa_data)) {
+ dev_info(&vport->adapter->pdev->dev, "Invalid MAC address: %pM\n",
+ addr->sa_data);
+ err = -EADDRNOTAVAIL;
+ goto unlock_mutex;
+ }
+
+ if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
+ goto unlock_mutex;
+
+ vport_config = vport->adapter->vport_config[vport->idx];
+ err = idpf_add_mac_filter(vport, np, addr->sa_data, false);
+ if (err) {
+ __idpf_del_mac_filter(vport_config, addr->sa_data);
+ goto unlock_mutex;
+ }
+
+ if (is_valid_ether_addr(vport->default_mac_addr))
+ idpf_del_mac_filter(vport, np, vport->default_mac_addr, false);
+
+ ether_addr_copy(vport->default_mac_addr, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->sa_data);
+
+unlock_mutex:
+ idpf_vport_ctrl_unlock(netdev);
+
+ return err;
+}
+
+/**
+ * idpf_alloc_dma_mem - Allocate dma memory
+ * @hw: pointer to hw struct
+ * @mem: pointer to dma_mem struct
+ * @size: size of the memory to allocate
+ */
+void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)
+{
+ struct idpf_adapter *adapter = hw->back;
+ size_t sz = ALIGN(size, 4096);
+
+ mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz,
+ &mem->pa, GFP_KERNEL);
+ mem->size = sz;
+
+ return mem->va;
+}
+
+/**
+ * idpf_free_dma_mem - Free the allocated dma memory
+ * @hw: pointer to hw struct
+ * @mem: pointer to dma_mem struct
+ */
+void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
+{
+ struct idpf_adapter *adapter = hw->back;
+
+ dma_free_coherent(&adapter->pdev->dev, mem->size,
+ mem->va, mem->pa);
+ mem->size = 0;
+ mem->va = NULL;
+ mem->pa = 0;
+}
+
+static const struct net_device_ops idpf_netdev_ops_splitq = {
+ .ndo_open = idpf_open,
+ .ndo_stop = idpf_stop,
+ .ndo_start_xmit = idpf_tx_splitq_start,
+ .ndo_features_check = idpf_features_check,
+ .ndo_set_rx_mode = idpf_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = idpf_set_mac,
+ .ndo_change_mtu = idpf_change_mtu,
+ .ndo_get_stats64 = idpf_get_stats64,
+ .ndo_set_features = idpf_set_features,
+ .ndo_tx_timeout = idpf_tx_timeout,
+};
+
+static const struct net_device_ops idpf_netdev_ops_singleq = {
+ .ndo_open = idpf_open,
+ .ndo_stop = idpf_stop,
+ .ndo_start_xmit = idpf_tx_singleq_start,
+ .ndo_features_check = idpf_features_check,
+ .ndo_set_rx_mode = idpf_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = idpf_set_mac,
+ .ndo_change_mtu = idpf_change_mtu,
+ .ndo_get_stats64 = idpf_get_stats64,
+ .ndo_set_features = idpf_set_features,
+ .ndo_tx_timeout = idpf_tx_timeout,
+};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
new file mode 100644
index 000000000000..e1febc74cefd
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2023 Intel Corporation */
+
+#include "idpf.h"
+#include "idpf_devids.h"
+
+#define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver"
+
+MODULE_DESCRIPTION(DRV_SUMMARY);
+MODULE_LICENSE("GPL");
+
+/**
+ * idpf_remove - Device removal routine
+ * @pdev: PCI device information struct
+ */
+static void idpf_remove(struct pci_dev *pdev)
+{
+ struct idpf_adapter *adapter = pci_get_drvdata(pdev);
+ int i;
+
+ set_bit(IDPF_REMOVE_IN_PROG, adapter->flags);
+
+ /* Wait until vc_event_task is done to consider if any hard reset is
+ * in progress else we may go ahead and release the resources but the
+ * thread doing the hard reset might continue the init path and
+ * end up in bad state.
+ */
+ cancel_delayed_work_sync(&adapter->vc_event_task);
+ if (adapter->num_vfs)
+ idpf_sriov_configure(pdev, 0);
+
+ idpf_vc_core_deinit(adapter);
+ /* Be a good citizen and leave the device clean on exit */
+ adapter->dev_ops.reg_ops.trigger_reset(adapter, IDPF_HR_FUNC_RESET);
+ idpf_deinit_dflt_mbx(adapter);
+
+ if (!adapter->netdevs)
+ goto destroy_wqs;
+
+ /* There are some cases where it's possible to still have netdevs
+ * registered with the stack at this point, e.g. if the driver detected
+ * a HW reset and rmmod is called before it fully recovers. Unregister
+ * any stale netdevs here.
+ */
+ for (i = 0; i < adapter->max_vports; i++) {
+ if (!adapter->netdevs[i])
+ continue;
+ if (adapter->netdevs[i]->reg_state != NETREG_UNINITIALIZED)
+ unregister_netdev(adapter->netdevs[i]);
+ free_netdev(adapter->netdevs[i]);
+ adapter->netdevs[i] = NULL;
+ }
+
+destroy_wqs:
+ destroy_workqueue(adapter->init_wq);
+ destroy_workqueue(adapter->serv_wq);
+ destroy_workqueue(adapter->mbx_wq);
+ destroy_workqueue(adapter->stats_wq);
+ destroy_workqueue(adapter->vc_event_wq);
+
+ for (i = 0; i < adapter->max_vports; i++) {
+ kfree(adapter->vport_config[i]);
+ adapter->vport_config[i] = NULL;
+ }
+ kfree(adapter->vport_config);
+ adapter->vport_config = NULL;
+ kfree(adapter->netdevs);
+ adapter->netdevs = NULL;
+
+ mutex_destroy(&adapter->vport_ctrl_lock);
+ mutex_destroy(&adapter->vector_lock);
+ mutex_destroy(&adapter->queue_lock);
+ mutex_destroy(&adapter->vc_buf_lock);
+
+ pci_set_drvdata(pdev, NULL);
+ kfree(adapter);
+}
+
+/**
+ * idpf_shutdown - PCI callback for shutting down device
+ * @pdev: PCI device information struct
+ */
+static void idpf_shutdown(struct pci_dev *pdev)
+{
+ idpf_remove(pdev);
+
+ if (system_state == SYSTEM_POWER_OFF)
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
+/**
+ * idpf_cfg_hw - Initialize HW struct
+ * @adapter: adapter to setup hw struct for
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_cfg_hw(struct idpf_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct idpf_hw *hw = &adapter->hw;
+
+ hw->hw_addr = pcim_iomap_table(pdev)[0];
+ if (!hw->hw_addr) {
+ pci_err(pdev, "failed to allocate PCI iomap table\n");
+
+ return -ENOMEM;
+ }
+
+ hw->back = adapter;
+
+ return 0;
+}
+
+/**
+ * idpf_probe - Device initialization routine
+ * @pdev: PCI device information struct
+ * @ent: entry in idpf_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct idpf_adapter *adapter;
+ int err;
+
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter)
+ return -ENOMEM;
+
+ adapter->req_tx_splitq = true;
+ adapter->req_rx_splitq = true;
+
+ switch (ent->device) {
+ case IDPF_DEV_ID_PF:
+ idpf_dev_ops_init(adapter);
+ break;
+ case IDPF_DEV_ID_VF:
+ idpf_vf_dev_ops_init(adapter);
+ adapter->crc_enable = true;
+ break;
+ default:
+ err = -ENODEV;
+ dev_err(&pdev->dev, "Unexpected dev ID 0x%x in idpf probe\n",
+ ent->device);
+ goto err_free;
+ }
+
+ adapter->pdev = pdev;
+ err = pcim_enable_device(pdev);
+ if (err)
+ goto err_free;
+
+ err = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (err) {
+ pci_err(pdev, "pcim_iomap_regions failed %pe\n", ERR_PTR(err));
+
+ goto err_free;
+ }
+
+ /* set up for high or low dma */
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (err) {
+ pci_err(pdev, "DMA configuration failed: %pe\n", ERR_PTR(err));
+
+ goto err_free;
+ }
+
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, adapter);
+
+ adapter->init_wq = alloc_workqueue("%s-%s-init", 0, 0,
+ dev_driver_string(dev),
+ dev_name(dev));
+ if (!adapter->init_wq) {
+ dev_err(dev, "Failed to allocate init workqueue\n");
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ adapter->serv_wq = alloc_workqueue("%s-%s-service", 0, 0,
+ dev_driver_string(dev),
+ dev_name(dev));
+ if (!adapter->serv_wq) {
+ dev_err(dev, "Failed to allocate service workqueue\n");
+ err = -ENOMEM;
+ goto err_serv_wq_alloc;
+ }
+
+ adapter->mbx_wq = alloc_workqueue("%s-%s-mbx", 0, 0,
+ dev_driver_string(dev),
+ dev_name(dev));
+ if (!adapter->mbx_wq) {
+ dev_err(dev, "Failed to allocate mailbox workqueue\n");
+ err = -ENOMEM;
+ goto err_mbx_wq_alloc;
+ }
+
+ adapter->stats_wq = alloc_workqueue("%s-%s-stats", 0, 0,
+ dev_driver_string(dev),
+ dev_name(dev));
+ if (!adapter->stats_wq) {
+ dev_err(dev, "Failed to allocate workqueue\n");
+ err = -ENOMEM;
+ goto err_stats_wq_alloc;
+ }
+
+ adapter->vc_event_wq = alloc_workqueue("%s-%s-vc_event", 0, 0,
+ dev_driver_string(dev),
+ dev_name(dev));
+ if (!adapter->vc_event_wq) {
+ dev_err(dev, "Failed to allocate virtchnl event workqueue\n");
+ err = -ENOMEM;
+ goto err_vc_event_wq_alloc;
+ }
+
+ /* setup msglvl */
+ adapter->msg_enable = netif_msg_init(-1, IDPF_AVAIL_NETIF_M);
+
+ err = idpf_cfg_hw(adapter);
+ if (err) {
+ dev_err(dev, "Failed to configure HW structure for adapter: %d\n",
+ err);
+ goto err_cfg_hw;
+ }
+
+ mutex_init(&adapter->vport_ctrl_lock);
+ mutex_init(&adapter->vector_lock);
+ mutex_init(&adapter->queue_lock);
+ mutex_init(&adapter->vc_buf_lock);
+
+ init_waitqueue_head(&adapter->vchnl_wq);
+
+ INIT_DELAYED_WORK(&adapter->init_task, idpf_init_task);
+ INIT_DELAYED_WORK(&adapter->serv_task, idpf_service_task);
+ INIT_DELAYED_WORK(&adapter->mbx_task, idpf_mbx_task);
+ INIT_DELAYED_WORK(&adapter->stats_task, idpf_statistics_task);
+ INIT_DELAYED_WORK(&adapter->vc_event_task, idpf_vc_event_task);
+
+ adapter->dev_ops.reg_ops.reset_reg_init(adapter);
+ set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
+ queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
+ msecs_to_jiffies(10 * (pdev->devfn & 0x07)));
+
+ return 0;
+
+err_cfg_hw:
+ destroy_workqueue(adapter->vc_event_wq);
+err_vc_event_wq_alloc:
+ destroy_workqueue(adapter->stats_wq);
+err_stats_wq_alloc:
+ destroy_workqueue(adapter->mbx_wq);
+err_mbx_wq_alloc:
+ destroy_workqueue(adapter->serv_wq);
+err_serv_wq_alloc:
+ destroy_workqueue(adapter->init_wq);
+err_free:
+ kfree(adapter);
+ return err;
+}
+
+/* idpf_pci_tbl - PCI Dev idpf ID Table
+ */
+static const struct pci_device_id idpf_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, IDPF_DEV_ID_PF)},
+ { PCI_VDEVICE(INTEL, IDPF_DEV_ID_VF)},
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(pci, idpf_pci_tbl);
+
+static struct pci_driver idpf_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = idpf_pci_tbl,
+ .probe = idpf_probe,
+ .sriov_configure = idpf_sriov_configure,
+ .remove = idpf_remove,
+ .shutdown = idpf_shutdown,
+};
+module_pci_driver(idpf_driver);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_mem.h b/drivers/net/ethernet/intel/idpf/idpf_mem.h
new file mode 100644
index 000000000000..b21a04fccf0f
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_mem.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef _IDPF_MEM_H_
+#define _IDPF_MEM_H_
+
+#include <linux/io.h>
+
+struct idpf_dma_mem {
+ void *va;
+ dma_addr_t pa;
+ size_t size;
+};
+
+#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+#define rd32(a, reg) readl((a)->hw_addr + (reg))
+#define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg)))
+#define rd64(a, reg) readq((a)->hw_addr + (reg))
+
+#endif /* _IDPF_MEM_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
new file mode 100644
index 000000000000..81288a17da2a
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -0,0 +1,1183 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2023 Intel Corporation */
+
+#include "idpf.h"
+
+/**
+ * idpf_tx_singleq_csum - Enable tx checksum offloads
+ * @skb: pointer to skb
+ * @off: pointer to struct that holds offload parameters
+ *
+ * Returns 0 or error (negative) if checksum offload cannot be executed, 1
+ * otherwise.
+ */
+static int idpf_tx_singleq_csum(struct sk_buff *skb,
+ struct idpf_tx_offload_params *off)
+{
+ u32 l4_len, l3_len, l2_len;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ unsigned char *hdr;
+ } l4;
+ u32 offset, cmd = 0;
+ u8 l4_proto = 0;
+ __be16 frag_off;
+ bool is_tso;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+
+ /* compute outer L2 header size */
+ l2_len = ip.hdr - skb->data;
+ offset = FIELD_PREP(0x3F << IDPF_TX_DESC_LEN_MACLEN_S, l2_len / 2);
+ is_tso = !!(off->tx_flags & IDPF_TX_FLAGS_TSO);
+ if (skb->encapsulation) {
+ u32 tunnel = 0;
+
+ /* define outer network header type */
+ if (off->tx_flags & IDPF_TX_FLAGS_IPV4) {
+ /* The stack computes the IP header already, the only
+ * time we need the hardware to recompute it is in the
+ * case of TSO.
+ */
+ tunnel |= is_tso ?
+ IDPF_TX_CTX_EXT_IP_IPV4 :
+ IDPF_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+
+ l4_proto = ip.v4->protocol;
+ } else if (off->tx_flags & IDPF_TX_FLAGS_IPV6) {
+ tunnel |= IDPF_TX_CTX_EXT_IP_IPV6;
+
+ l4_proto = ip.v6->nexthdr;
+ if (ipv6_ext_hdr(l4_proto))
+ ipv6_skip_exthdr(skb, skb_network_offset(skb) +
+ sizeof(*ip.v6),
+ &l4_proto, &frag_off);
+ }
+
+ /* define outer transport */
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ tunnel |= IDPF_TXD_CTX_UDP_TUNNELING;
+ break;
+ case IPPROTO_GRE:
+ tunnel |= IDPF_TXD_CTX_GRE_TUNNELING;
+ break;
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ l4.hdr = skb_inner_network_header(skb);
+ break;
+ default:
+ if (is_tso)
+ return -1;
+
+ skb_checksum_help(skb);
+
+ return 0;
+ }
+ off->tx_flags |= IDPF_TX_FLAGS_TUNNEL;
+
+ /* compute outer L3 header size */
+ tunnel |= FIELD_PREP(IDPF_TXD_CTX_QW0_TUNN_EXT_IPLEN_M,
+ (l4.hdr - ip.hdr) / 4);
+
+ /* switch IP header pointer from outer to inner header */
+ ip.hdr = skb_inner_network_header(skb);
+
+ /* compute tunnel header size */
+ tunnel |= FIELD_PREP(IDPF_TXD_CTX_QW0_TUNN_NATLEN_M,
+ (ip.hdr - l4.hdr) / 2);
+
+ /* indicate if we need to offload outer UDP header */
+ if (is_tso &&
+ !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
+ tunnel |= IDPF_TXD_CTX_QW0_TUNN_L4T_CS_M;
+
+ /* record tunnel offload values */
+ off->cd_tunneling |= tunnel;
+
+ /* switch L4 header pointer from outer to inner */
+ l4.hdr = skb_inner_transport_header(skb);
+ l4_proto = 0;
+
+ /* reset type as we transition from outer to inner headers */
+ off->tx_flags &= ~(IDPF_TX_FLAGS_IPV4 | IDPF_TX_FLAGS_IPV6);
+ if (ip.v4->version == 4)
+ off->tx_flags |= IDPF_TX_FLAGS_IPV4;
+ if (ip.v6->version == 6)
+ off->tx_flags |= IDPF_TX_FLAGS_IPV6;
+ }
+
+ /* Enable IP checksum offloads */
+ if (off->tx_flags & IDPF_TX_FLAGS_IPV4) {
+ l4_proto = ip.v4->protocol;
+ /* See comment above regarding need for HW to recompute IP
+ * header checksum in the case of TSO.
+ */
+ if (is_tso)
+ cmd |= IDPF_TX_DESC_CMD_IIPT_IPV4_CSUM;
+ else
+ cmd |= IDPF_TX_DESC_CMD_IIPT_IPV4;
+
+ } else if (off->tx_flags & IDPF_TX_FLAGS_IPV6) {
+ cmd |= IDPF_TX_DESC_CMD_IIPT_IPV6;
+ l4_proto = ip.v6->nexthdr;
+ if (ipv6_ext_hdr(l4_proto))
+ ipv6_skip_exthdr(skb, skb_network_offset(skb) +
+ sizeof(*ip.v6), &l4_proto,
+ &frag_off);
+ } else {
+ return -1;
+ }
+
+ /* compute inner L3 header size */
+ l3_len = l4.hdr - ip.hdr;
+ offset |= (l3_len / 4) << IDPF_TX_DESC_LEN_IPLEN_S;
+
+ /* Enable L4 checksum offloads */
+ switch (l4_proto) {
+ case IPPROTO_TCP:
+ /* enable checksum offloads */
+ cmd |= IDPF_TX_DESC_CMD_L4T_EOFT_TCP;
+ l4_len = l4.tcp->doff;
+ break;
+ case IPPROTO_UDP:
+ /* enable UDP checksum offload */
+ cmd |= IDPF_TX_DESC_CMD_L4T_EOFT_UDP;
+ l4_len = sizeof(struct udphdr) >> 2;
+ break;
+ case IPPROTO_SCTP:
+ /* enable SCTP checksum offload */
+ cmd |= IDPF_TX_DESC_CMD_L4T_EOFT_SCTP;
+ l4_len = sizeof(struct sctphdr) >> 2;
+ break;
+ default:
+ if (is_tso)
+ return -1;
+
+ skb_checksum_help(skb);
+
+ return 0;
+ }
+
+ offset |= l4_len << IDPF_TX_DESC_LEN_L4_LEN_S;
+ off->td_cmd |= cmd;
+ off->hdr_offsets |= offset;
+
+ return 1;
+}
+
+/**
+ * idpf_tx_singleq_map - Build the Tx base descriptor
+ * @tx_q: queue to send buffer on
+ * @first: first buffer info buffer to use
+ * @offloads: pointer to struct that holds offload parameters
+ *
+ * This function loops over the skb data pointed to by *first
+ * and gets a physical address for each memory location and programs
+ * it and the length into the transmit base mode descriptor.
+ */
+static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
+ struct idpf_tx_buf *first,
+ struct idpf_tx_offload_params *offloads)
+{
+ u32 offsets = offloads->hdr_offsets;
+ struct idpf_tx_buf *tx_buf = first;
+ struct idpf_base_tx_desc *tx_desc;
+ struct sk_buff *skb = first->skb;
+ u64 td_cmd = offloads->td_cmd;
+ unsigned int data_len, size;
+ u16 i = tx_q->next_to_use;
+ struct netdev_queue *nq;
+ skb_frag_t *frag;
+ dma_addr_t dma;
+ u64 td_tag = 0;
+
+ data_len = skb->data_len;
+ size = skb_headlen(skb);
+
+ tx_desc = IDPF_BASE_TX_DESC(tx_q, i);
+
+ dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
+
+ /* write each descriptor with CRC bit */
+ if (tx_q->vport->crc_enable)
+ td_cmd |= IDPF_TX_DESC_CMD_ICRC;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
+
+ if (dma_mapping_error(tx_q->dev, dma))
+ return idpf_tx_dma_map_error(tx_q, skb, first, i);
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buf, len, size);
+ dma_unmap_addr_set(tx_buf, dma, dma);
+
+ /* align size to end of page */
+ max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+
+ /* account for data chunks larger than the hardware
+ * can handle
+ */
+ while (unlikely(size > IDPF_TX_MAX_DESC_DATA)) {
+ tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd,
+ offsets,
+ max_data,
+ td_tag);
+ tx_desc++;
+ i++;
+
+ if (i == tx_q->desc_count) {
+ tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ i = 0;
+ }
+
+ dma += max_data;
+ size -= max_data;
+
+ max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ }
+
+ if (!data_len)
+ break;
+
+ tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets,
+ size, td_tag);
+ tx_desc++;
+ i++;
+
+ if (i == tx_q->desc_count) {
+ tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ i = 0;
+ }
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+
+ dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
+ DMA_TO_DEVICE);
+
+ tx_buf = &tx_q->tx_buf[i];
+ }
+
+ skb_tx_timestamp(first->skb);
+
+ /* write last descriptor with RS and EOP bits */
+ td_cmd |= (u64)(IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS);
+
+ tx_desc->qw1 = idpf_tx_singleq_build_ctob(td_cmd, offsets,
+ size, td_tag);
+
+ IDPF_SINGLEQ_BUMP_RING_IDX(tx_q, i);
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+ netdev_tx_sent_queue(nq, first->bytecount);
+
+ idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
+}
+
+/**
+ * idpf_tx_singleq_get_ctx_desc - grab next desc and update buffer ring
+ * @txq: queue to put context descriptor on
+ *
+ * Since the TX buffer rings mimics the descriptor ring, update the tx buffer
+ * ring entry to reflect that this index is a context descriptor
+ */
+static struct idpf_base_tx_ctx_desc *
+idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq)
+{
+ struct idpf_base_tx_ctx_desc *ctx_desc;
+ int ntu = txq->next_to_use;
+
+ memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf));
+ txq->tx_buf[ntu].ctx_entry = true;
+
+ ctx_desc = IDPF_BASE_TX_CTX_DESC(txq, ntu);
+
+ IDPF_SINGLEQ_BUMP_RING_IDX(txq, ntu);
+ txq->next_to_use = ntu;
+
+ return ctx_desc;
+}
+
+/**
+ * idpf_tx_singleq_build_ctx_desc - populate context descriptor
+ * @txq: queue to send buffer on
+ * @offload: offload parameter structure
+ **/
+static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq,
+ struct idpf_tx_offload_params *offload)
+{
+ struct idpf_base_tx_ctx_desc *desc = idpf_tx_singleq_get_ctx_desc(txq);
+ u64 qw1 = (u64)IDPF_TX_DESC_DTYPE_CTX;
+
+ if (offload->tso_segs) {
+ qw1 |= IDPF_TX_CTX_DESC_TSO << IDPF_TXD_CTX_QW1_CMD_S;
+ qw1 |= ((u64)offload->tso_len << IDPF_TXD_CTX_QW1_TSO_LEN_S) &
+ IDPF_TXD_CTX_QW1_TSO_LEN_M;
+ qw1 |= ((u64)offload->mss << IDPF_TXD_CTX_QW1_MSS_S) &
+ IDPF_TXD_CTX_QW1_MSS_M;
+
+ u64_stats_update_begin(&txq->stats_sync);
+ u64_stats_inc(&txq->q_stats.tx.lso_pkts);
+ u64_stats_update_end(&txq->stats_sync);
+ }
+
+ desc->qw0.tunneling_params = cpu_to_le32(offload->cd_tunneling);
+
+ desc->qw0.l2tag2 = 0;
+ desc->qw0.rsvd1 = 0;
+ desc->qw1 = cpu_to_le64(qw1);
+}
+
+/**
+ * idpf_tx_singleq_frame - Sends buffer on Tx ring using base descriptors
+ * @skb: send buffer
+ * @tx_q: queue to send buffer on
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ */
+static netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
+ struct idpf_queue *tx_q)
+{
+ struct idpf_tx_offload_params offload = { };
+ struct idpf_tx_buf *first;
+ unsigned int count;
+ __be16 protocol;
+ int csum, tso;
+
+ count = idpf_tx_desc_count_required(tx_q, skb);
+ if (unlikely(!count))
+ return idpf_tx_drop_skb(tx_q, skb);
+
+ if (idpf_tx_maybe_stop_common(tx_q,
+ count + IDPF_TX_DESCS_PER_CACHE_LINE +
+ IDPF_TX_DESCS_FOR_CTX)) {
+ idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ protocol = vlan_get_protocol(skb);
+ if (protocol == htons(ETH_P_IP))
+ offload.tx_flags |= IDPF_TX_FLAGS_IPV4;
+ else if (protocol == htons(ETH_P_IPV6))
+ offload.tx_flags |= IDPF_TX_FLAGS_IPV6;
+
+ tso = idpf_tso(skb, &offload);
+ if (tso < 0)
+ goto out_drop;
+
+ csum = idpf_tx_singleq_csum(skb, &offload);
+ if (csum < 0)
+ goto out_drop;
+
+ if (tso || offload.cd_tunneling)
+ idpf_tx_singleq_build_ctx_desc(tx_q, &offload);
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_q->tx_buf[tx_q->next_to_use];
+ first->skb = skb;
+
+ if (tso) {
+ first->gso_segs = offload.tso_segs;
+ first->bytecount = skb->len + ((first->gso_segs - 1) * offload.tso_hdr_len);
+ } else {
+ first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
+ first->gso_segs = 1;
+ }
+ idpf_tx_singleq_map(tx_q, first, &offload);
+
+ return NETDEV_TX_OK;
+
+out_drop:
+ return idpf_tx_drop_skb(tx_q, skb);
+}
+
+/**
+ * idpf_tx_singleq_start - Selects the right Tx queue to send buffer
+ * @skb: send buffer
+ * @netdev: network interface device structure
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ */
+netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
+ struct idpf_queue *tx_q;
+
+ tx_q = vport->txqs[skb_get_queue_mapping(skb)];
+
+ /* hardware can't handle really short frames, hardware padding works
+ * beyond this point
+ */
+ if (skb_put_padto(skb, IDPF_TX_MIN_PKT_LEN)) {
+ idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
+
+ return NETDEV_TX_OK;
+ }
+
+ return idpf_tx_singleq_frame(skb, tx_q);
+}
+
+/**
+ * idpf_tx_singleq_clean - Reclaim resources from queue
+ * @tx_q: Tx queue to clean
+ * @napi_budget: Used to determine if we are in netpoll
+ * @cleaned: returns number of packets cleaned
+ *
+ */
+static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget,
+ int *cleaned)
+{
+ unsigned int budget = tx_q->vport->compln_clean_budget;
+ unsigned int total_bytes = 0, total_pkts = 0;
+ struct idpf_base_tx_desc *tx_desc;
+ s16 ntc = tx_q->next_to_clean;
+ struct idpf_netdev_priv *np;
+ struct idpf_tx_buf *tx_buf;
+ struct idpf_vport *vport;
+ struct netdev_queue *nq;
+ bool dont_wake;
+
+ tx_desc = IDPF_BASE_TX_DESC(tx_q, ntc);
+ tx_buf = &tx_q->tx_buf[ntc];
+ ntc -= tx_q->desc_count;
+
+ do {
+ struct idpf_base_tx_desc *eop_desc;
+
+ /* If this entry in the ring was used as a context descriptor,
+ * it's corresponding entry in the buffer ring will indicate as
+ * such. We can skip this descriptor since there is no buffer
+ * to clean.
+ */
+ if (tx_buf->ctx_entry) {
+ /* Clear this flag here to avoid stale flag values when
+ * this buffer is used for actual data in the future.
+ * There are cases where the tx_buf struct / the flags
+ * field will not be cleared before being reused.
+ */
+ tx_buf->ctx_entry = false;
+ goto fetch_next_txq_desc;
+ }
+
+ /* if next_to_watch is not set then no work pending */
+ eop_desc = (struct idpf_base_tx_desc *)tx_buf->next_to_watch;
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ smp_rmb();
+
+ /* if the descriptor isn't done, no work yet to do */
+ if (!(eop_desc->qw1 &
+ cpu_to_le64(IDPF_TX_DESC_DTYPE_DESC_DONE)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->next_to_watch = NULL;
+
+ /* update the statistics for this packet */
+ total_bytes += tx_buf->bytecount;
+ total_pkts += tx_buf->gso_segs;
+
+ napi_consume_skb(tx_buf->skb, napi_budget);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_q->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+
+ /* clear tx_buf data */
+ tx_buf->skb = NULL;
+ dma_unmap_len_set(tx_buf, len, 0);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buf++;
+ tx_desc++;
+ ntc++;
+ if (unlikely(!ntc)) {
+ ntc -= tx_q->desc_count;
+ tx_buf = tx_q->tx_buf;
+ tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buf, len)) {
+ dma_unmap_page(tx_q->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ }
+ }
+
+ /* update budget only if we did something */
+ budget--;
+
+fetch_next_txq_desc:
+ tx_buf++;
+ tx_desc++;
+ ntc++;
+ if (unlikely(!ntc)) {
+ ntc -= tx_q->desc_count;
+ tx_buf = tx_q->tx_buf;
+ tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ }
+ } while (likely(budget));
+
+ ntc += tx_q->desc_count;
+ tx_q->next_to_clean = ntc;
+
+ *cleaned += total_pkts;
+
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_add(&tx_q->q_stats.tx.packets, total_pkts);
+ u64_stats_add(&tx_q->q_stats.tx.bytes, total_bytes);
+ u64_stats_update_end(&tx_q->stats_sync);
+
+ vport = tx_q->vport;
+ np = netdev_priv(vport->netdev);
+ nq = netdev_get_tx_queue(vport->netdev, tx_q->idx);
+
+ dont_wake = np->state != __IDPF_VPORT_UP ||
+ !netif_carrier_ok(vport->netdev);
+ __netif_txq_completed_wake(nq, total_pkts, total_bytes,
+ IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
+ dont_wake);
+
+ return !!budget;
+}
+
+/**
+ * idpf_tx_singleq_clean_all - Clean all Tx queues
+ * @q_vec: queue vector
+ * @budget: Used to determine if we are in netpoll
+ * @cleaned: returns number of packets cleaned
+ *
+ * Returns false if clean is not complete else returns true
+ */
+static bool idpf_tx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget,
+ int *cleaned)
+{
+ u16 num_txq = q_vec->num_txq;
+ bool clean_complete = true;
+ int i, budget_per_q;
+
+ budget_per_q = num_txq ? max(budget / num_txq, 1) : 0;
+ for (i = 0; i < num_txq; i++) {
+ struct idpf_queue *q;
+
+ q = q_vec->tx[i];
+ clean_complete &= idpf_tx_singleq_clean(q, budget_per_q,
+ cleaned);
+ }
+
+ return clean_complete;
+}
+
+/**
+ * idpf_rx_singleq_test_staterr - tests bits in Rx descriptor
+ * status and error fields
+ * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @stat_err_bits: value to mask
+ *
+ * This function does some fast chicanery in order to return the
+ * value of the mask which is really only used for boolean tests.
+ * The status_error_ptype_len doesn't need to be shifted because it begins
+ * at offset zero.
+ */
+static bool idpf_rx_singleq_test_staterr(const union virtchnl2_rx_desc *rx_desc,
+ const u64 stat_err_bits)
+{
+ return !!(rx_desc->base_wb.qword1.status_error_ptype_len &
+ cpu_to_le64(stat_err_bits));
+}
+
+/**
+ * idpf_rx_singleq_is_non_eop - process handling of non-EOP buffers
+ * @rxq: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ * @ntc: next to clean
+ */
+static bool idpf_rx_singleq_is_non_eop(struct idpf_queue *rxq,
+ union virtchnl2_rx_desc *rx_desc,
+ struct sk_buff *skb, u16 ntc)
+{
+ /* if we are the last buffer then there is nothing else to do */
+ if (likely(idpf_rx_singleq_test_staterr(rx_desc, IDPF_RXD_EOF_SINGLEQ)))
+ return false;
+
+ return true;
+}
+
+/**
+ * idpf_rx_singleq_csum - Indicate in skb if checksum is good
+ * @rxq: Rx ring being processed
+ * @skb: skb currently being received and modified
+ * @csum_bits: checksum bits from descriptor
+ * @ptype: the packet type decoded by hardware
+ *
+ * skb->protocol must be set before this function is called
+ */
+static void idpf_rx_singleq_csum(struct idpf_queue *rxq, struct sk_buff *skb,
+ struct idpf_rx_csum_decoded *csum_bits,
+ u16 ptype)
+{
+ struct idpf_rx_ptype_decoded decoded;
+ bool ipv4, ipv6;
+
+ /* check if Rx checksum is enabled */
+ if (unlikely(!(rxq->vport->netdev->features & NETIF_F_RXCSUM)))
+ return;
+
+ /* check if HW has decoded the packet and checksum */
+ if (unlikely(!(csum_bits->l3l4p)))
+ return;
+
+ decoded = rxq->vport->rx_ptype_lkup[ptype];
+ if (unlikely(!(decoded.known && decoded.outer_ip)))
+ return;
+
+ ipv4 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV4);
+ ipv6 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV6);
+
+ /* Check if there were any checksum errors */
+ if (unlikely(ipv4 && (csum_bits->ipe || csum_bits->eipe)))
+ goto checksum_fail;
+
+ /* Device could not do any checksum offload for certain extension
+ * headers as indicated by setting IPV6EXADD bit
+ */
+ if (unlikely(ipv6 && csum_bits->ipv6exadd))
+ return;
+
+ /* check for L4 errors and handle packets that were not able to be
+ * checksummed due to arrival speed
+ */
+ if (unlikely(csum_bits->l4e))
+ goto checksum_fail;
+
+ if (unlikely(csum_bits->nat && csum_bits->eudpe))
+ goto checksum_fail;
+
+ /* Handle packets that were not able to be checksummed due to arrival
+ * speed, in this case the stack can compute the csum.
+ */
+ if (unlikely(csum_bits->pprs))
+ return;
+
+ /* If there is an outer header present that might contain a checksum
+ * we need to bump the checksum level by 1 to reflect the fact that
+ * we are indicating we validated the inner checksum.
+ */
+ if (decoded.tunnel_type >= IDPF_RX_PTYPE_TUNNEL_IP_GRENAT)
+ skb->csum_level = 1;
+
+ /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */
+ switch (decoded.inner_prot) {
+ case IDPF_RX_PTYPE_INNER_PROT_ICMP:
+ case IDPF_RX_PTYPE_INNER_PROT_TCP:
+ case IDPF_RX_PTYPE_INNER_PROT_UDP:
+ case IDPF_RX_PTYPE_INNER_PROT_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return;
+ default:
+ return;
+ }
+
+checksum_fail:
+ u64_stats_update_begin(&rxq->stats_sync);
+ u64_stats_inc(&rxq->q_stats.rx.hw_csum_err);
+ u64_stats_update_end(&rxq->stats_sync);
+}
+
+/**
+ * idpf_rx_singleq_base_csum - Indicate in skb if hw indicated a good cksum
+ * @rx_q: Rx completion queue
+ * @skb: skb currently being received and modified
+ * @rx_desc: the receive descriptor
+ * @ptype: Rx packet type
+ *
+ * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte
+ * descriptor writeback format.
+ **/
+static void idpf_rx_singleq_base_csum(struct idpf_queue *rx_q,
+ struct sk_buff *skb,
+ union virtchnl2_rx_desc *rx_desc,
+ u16 ptype)
+{
+ struct idpf_rx_csum_decoded csum_bits;
+ u32 rx_error, rx_status;
+ u64 qword;
+
+ qword = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len);
+
+ rx_status = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_M, qword);
+ rx_error = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M, qword);
+
+ csum_bits.ipe = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_ERROR_IPE_M, rx_error);
+ csum_bits.eipe = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_ERROR_EIPE_M,
+ rx_error);
+ csum_bits.l4e = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_ERROR_L4E_M, rx_error);
+ csum_bits.pprs = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_ERROR_PPRS_M,
+ rx_error);
+ csum_bits.l3l4p = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_STATUS_L3L4P_M,
+ rx_status);
+ csum_bits.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_STATUS_IPV6EXADD_M,
+ rx_status);
+ csum_bits.nat = 0;
+ csum_bits.eudpe = 0;
+
+ idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype);
+}
+
+/**
+ * idpf_rx_singleq_flex_csum - Indicate in skb if hw indicated a good cksum
+ * @rx_q: Rx completion queue
+ * @skb: skb currently being received and modified
+ * @rx_desc: the receive descriptor
+ * @ptype: Rx packet type
+ *
+ * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ **/
+static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q,
+ struct sk_buff *skb,
+ union virtchnl2_rx_desc *rx_desc,
+ u16 ptype)
+{
+ struct idpf_rx_csum_decoded csum_bits;
+ u16 rx_status0, rx_status1;
+
+ rx_status0 = le16_to_cpu(rx_desc->flex_nic_wb.status_error0);
+ rx_status1 = le16_to_cpu(rx_desc->flex_nic_wb.status_error1);
+
+ csum_bits.ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_M,
+ rx_status0);
+ csum_bits.eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_M,
+ rx_status0);
+ csum_bits.l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_M,
+ rx_status0);
+ csum_bits.eudpe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_M,
+ rx_status0);
+ csum_bits.l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_L3L4P_M,
+ rx_status0);
+ csum_bits.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_IPV6EXADD_M,
+ rx_status0);
+ csum_bits.nat = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS1_NAT_M,
+ rx_status1);
+ csum_bits.pprs = 0;
+
+ idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype);
+}
+
+/**
+ * idpf_rx_singleq_base_hash - set the hash value in the skb
+ * @rx_q: Rx completion queue
+ * @skb: skb currently being received and modified
+ * @rx_desc: specific descriptor
+ * @decoded: Decoded Rx packet type related fields
+ *
+ * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte
+ * descriptor writeback format.
+ **/
+static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q,
+ struct sk_buff *skb,
+ union virtchnl2_rx_desc *rx_desc,
+ struct idpf_rx_ptype_decoded *decoded)
+{
+ u64 mask, qw1;
+
+ if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH)))
+ return;
+
+ mask = VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH_M;
+ qw1 = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len);
+
+ if (FIELD_GET(mask, qw1) == mask) {
+ u32 hash = le32_to_cpu(rx_desc->base_wb.qword0.hi_dword.rss);
+
+ skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded));
+ }
+}
+
+/**
+ * idpf_rx_singleq_flex_hash - set the hash value in the skb
+ * @rx_q: Rx completion queue
+ * @skb: skb currently being received and modified
+ * @rx_desc: specific descriptor
+ * @decoded: Decoded Rx packet type related fields
+ *
+ * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ **/
+static void idpf_rx_singleq_flex_hash(struct idpf_queue *rx_q,
+ struct sk_buff *skb,
+ union virtchnl2_rx_desc *rx_desc,
+ struct idpf_rx_ptype_decoded *decoded)
+{
+ if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH)))
+ return;
+
+ if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_M,
+ le16_to_cpu(rx_desc->flex_nic_wb.status_error0)))
+ skb_set_hash(skb, le32_to_cpu(rx_desc->flex_nic_wb.rss_hash),
+ idpf_ptype_to_htype(decoded));
+}
+
+/**
+ * idpf_rx_singleq_process_skb_fields - Populate skb header fields from Rx
+ * descriptor
+ * @rx_q: Rx ring being processed
+ * @skb: pointer to current skb being populated
+ * @rx_desc: descriptor for skb
+ * @ptype: packet type
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, protocol, and
+ * other fields within the skb.
+ */
+static void idpf_rx_singleq_process_skb_fields(struct idpf_queue *rx_q,
+ struct sk_buff *skb,
+ union virtchnl2_rx_desc *rx_desc,
+ u16 ptype)
+{
+ struct idpf_rx_ptype_decoded decoded =
+ rx_q->vport->rx_ptype_lkup[ptype];
+
+ /* modifies the skb - consumes the enet header */
+ skb->protocol = eth_type_trans(skb, rx_q->vport->netdev);
+
+ /* Check if we're using base mode descriptor IDs */
+ if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) {
+ idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, &decoded);
+ idpf_rx_singleq_base_csum(rx_q, skb, rx_desc, ptype);
+ } else {
+ idpf_rx_singleq_flex_hash(rx_q, skb, rx_desc, &decoded);
+ idpf_rx_singleq_flex_csum(rx_q, skb, rx_desc, ptype);
+ }
+}
+
+/**
+ * idpf_rx_singleq_buf_hw_alloc_all - Replace used receive buffers
+ * @rx_q: queue for which the hw buffers are allocated
+ * @cleaned_count: number of buffers to replace
+ *
+ * Returns false if all allocations were successful, true if any fail
+ */
+bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
+ u16 cleaned_count)
+{
+ struct virtchnl2_singleq_rx_buf_desc *desc;
+ u16 nta = rx_q->next_to_alloc;
+ struct idpf_rx_buf *buf;
+
+ if (!cleaned_count)
+ return false;
+
+ desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, nta);
+ buf = &rx_q->rx_buf.buf[nta];
+
+ do {
+ dma_addr_t addr;
+
+ addr = idpf_alloc_page(rx_q->pp, buf, rx_q->rx_buf_size);
+ if (unlikely(addr == DMA_MAPPING_ERROR))
+ break;
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ desc->pkt_addr = cpu_to_le64(addr);
+ desc->hdr_addr = 0;
+ desc++;
+
+ buf++;
+ nta++;
+ if (unlikely(nta == rx_q->desc_count)) {
+ desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, 0);
+ buf = rx_q->rx_buf.buf;
+ nta = 0;
+ }
+
+ cleaned_count--;
+ } while (cleaned_count);
+
+ if (rx_q->next_to_alloc != nta) {
+ idpf_rx_buf_hw_update(rx_q, nta);
+ rx_q->next_to_alloc = nta;
+ }
+
+ return !!cleaned_count;
+}
+
+/**
+ * idpf_rx_singleq_extract_base_fields - Extract fields from the Rx descriptor
+ * @rx_q: Rx descriptor queue
+ * @rx_desc: the descriptor to process
+ * @fields: storage for extracted values
+ *
+ * Decode the Rx descriptor and extract relevant information including the
+ * size and Rx packet type.
+ *
+ * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte
+ * descriptor writeback format.
+ */
+static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q,
+ union virtchnl2_rx_desc *rx_desc,
+ struct idpf_rx_extracted *fields)
+{
+ u64 qword;
+
+ qword = le64_to_cpu(rx_desc->base_wb.qword1.status_error_ptype_len);
+
+ fields->size = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M, qword);
+ fields->rx_ptype = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M, qword);
+}
+
+/**
+ * idpf_rx_singleq_extract_flex_fields - Extract fields from the Rx descriptor
+ * @rx_q: Rx descriptor queue
+ * @rx_desc: the descriptor to process
+ * @fields: storage for extracted values
+ *
+ * Decode the Rx descriptor and extract relevant information including the
+ * size and Rx packet type.
+ *
+ * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible
+ * descriptor writeback format.
+ */
+static void idpf_rx_singleq_extract_flex_fields(struct idpf_queue *rx_q,
+ union virtchnl2_rx_desc *rx_desc,
+ struct idpf_rx_extracted *fields)
+{
+ fields->size = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M,
+ le16_to_cpu(rx_desc->flex_nic_wb.pkt_len));
+ fields->rx_ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PTYPE_M,
+ le16_to_cpu(rx_desc->flex_nic_wb.ptype_flex_flags0));
+}
+
+/**
+ * idpf_rx_singleq_extract_fields - Extract fields from the Rx descriptor
+ * @rx_q: Rx descriptor queue
+ * @rx_desc: the descriptor to process
+ * @fields: storage for extracted values
+ *
+ */
+static void idpf_rx_singleq_extract_fields(struct idpf_queue *rx_q,
+ union virtchnl2_rx_desc *rx_desc,
+ struct idpf_rx_extracted *fields)
+{
+ if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M)
+ idpf_rx_singleq_extract_base_fields(rx_q, rx_desc, fields);
+ else
+ idpf_rx_singleq_extract_flex_fields(rx_q, rx_desc, fields);
+}
+
+/**
+ * idpf_rx_singleq_clean - Reclaim resources after receive completes
+ * @rx_q: rx queue to clean
+ * @budget: Total limit on number of packets to process
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ */
+static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
+ struct sk_buff *skb = rx_q->skb;
+ u16 ntc = rx_q->next_to_clean;
+ u16 cleaned_count = 0;
+ bool failure = false;
+
+ /* Process Rx packets bounded by budget */
+ while (likely(total_rx_pkts < (unsigned int)budget)) {
+ struct idpf_rx_extracted fields = { };
+ union virtchnl2_rx_desc *rx_desc;
+ struct idpf_rx_buf *rx_buf;
+
+ /* get the Rx desc from Rx queue based on 'next_to_clean' */
+ rx_desc = IDPF_RX_DESC(rx_q, ntc);
+
+ /* status_error_ptype_len will always be zero for unused
+ * descriptors because it's cleared in cleanup, and overlaps
+ * with hdr_addr which is always zero because packet split
+ * isn't used, if the hardware wrote DD then the length will be
+ * non-zero
+ */
+#define IDPF_RXD_DD VIRTCHNL2_RX_BASE_DESC_STATUS_DD_M
+ if (!idpf_rx_singleq_test_staterr(rx_desc,
+ IDPF_RXD_DD))
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc
+ */
+ dma_rmb();
+
+ idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields);
+
+ rx_buf = &rx_q->rx_buf.buf[ntc];
+ if (!fields.size) {
+ idpf_rx_put_page(rx_buf);
+ goto skip_data;
+ }
+
+ idpf_rx_sync_for_cpu(rx_buf, fields.size);
+ skb = rx_q->skb;
+ if (skb)
+ idpf_rx_add_frag(rx_buf, skb, fields.size);
+ else
+ skb = idpf_rx_construct_skb(rx_q, rx_buf, fields.size);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb)
+ break;
+
+skip_data:
+ IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc);
+
+ cleaned_count++;
+
+ /* skip if it is non EOP desc */
+ if (idpf_rx_singleq_is_non_eop(rx_q, rx_desc, skb, ntc))
+ continue;
+
+#define IDPF_RXD_ERR_S FIELD_PREP(VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M, \
+ VIRTCHNL2_RX_BASE_DESC_ERROR_RXE_M)
+ if (unlikely(idpf_rx_singleq_test_staterr(rx_desc,
+ IDPF_RXD_ERR_S))) {
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ continue;
+ }
+
+ /* pad skb if needed (to make valid ethernet frame) */
+ if (eth_skb_pad(skb)) {
+ skb = NULL;
+ continue;
+ }
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+
+ /* protocol */
+ idpf_rx_singleq_process_skb_fields(rx_q, skb,
+ rx_desc, fields.rx_ptype);
+
+ /* send completed skb up the stack */
+ napi_gro_receive(&rx_q->q_vector->napi, skb);
+ skb = NULL;
+
+ /* update budget accounting */
+ total_rx_pkts++;
+ }
+
+ rx_q->skb = skb;
+
+ rx_q->next_to_clean = ntc;
+
+ if (cleaned_count)
+ failure = idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count);
+
+ u64_stats_update_begin(&rx_q->stats_sync);
+ u64_stats_add(&rx_q->q_stats.rx.packets, total_rx_pkts);
+ u64_stats_add(&rx_q->q_stats.rx.bytes, total_rx_bytes);
+ u64_stats_update_end(&rx_q->stats_sync);
+
+ /* guarantee a trip back through this routine if there was a failure */
+ return failure ? budget : (int)total_rx_pkts;
+}
+
+/**
+ * idpf_rx_singleq_clean_all - Clean all Rx queues
+ * @q_vec: queue vector
+ * @budget: Used to determine if we are in netpoll
+ * @cleaned: returns number of packets cleaned
+ *
+ * Returns false if clean is not complete else returns true
+ */
+static bool idpf_rx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget,
+ int *cleaned)
+{
+ u16 num_rxq = q_vec->num_rxq;
+ bool clean_complete = true;
+ int budget_per_q, i;
+
+ /* We attempt to distribute budget to each Rx queue fairly, but don't
+ * allow the budget to go below 1 because that would exit polling early.
+ */
+ budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0;
+ for (i = 0; i < num_rxq; i++) {
+ struct idpf_queue *rxq = q_vec->rx[i];
+ int pkts_cleaned_per_q;
+
+ pkts_cleaned_per_q = idpf_rx_singleq_clean(rxq, budget_per_q);
+
+ /* if we clean as many as budgeted, we must not be done */
+ if (pkts_cleaned_per_q >= budget_per_q)
+ clean_complete = false;
+ *cleaned += pkts_cleaned_per_q;
+ }
+
+ return clean_complete;
+}
+
+/**
+ * idpf_vport_singleq_napi_poll - NAPI handler
+ * @napi: struct from which you get q_vector
+ * @budget: budget provided by stack
+ */
+int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct idpf_q_vector *q_vector =
+ container_of(napi, struct idpf_q_vector, napi);
+ bool clean_complete;
+ int work_done = 0;
+
+ /* Handle case where we are called by netpoll with a budget of 0 */
+ if (budget <= 0) {
+ idpf_tx_singleq_clean_all(q_vector, budget, &work_done);
+
+ return budget;
+ }
+
+ clean_complete = idpf_rx_singleq_clean_all(q_vector, budget,
+ &work_done);
+ clean_complete &= idpf_tx_singleq_clean_all(q_vector, budget,
+ &work_done);
+
+ /* If work not completed, return budget and polling will return */
+ if (!clean_complete)
+ return budget;
+
+ work_done = min_t(int, work_done, budget - 1);
+
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ idpf_vport_intr_update_itr_ena_irq(q_vector);
+
+ return work_done;
+}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
new file mode 100644
index 000000000000..5e1ef70d54fe
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -0,0 +1,4292 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2023 Intel Corporation */
+
+#include "idpf.h"
+
+/**
+ * idpf_buf_lifo_push - push a buffer pointer onto stack
+ * @stack: pointer to stack struct
+ * @buf: pointer to buf to push
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int idpf_buf_lifo_push(struct idpf_buf_lifo *stack,
+ struct idpf_tx_stash *buf)
+{
+ if (unlikely(stack->top == stack->size))
+ return -ENOSPC;
+
+ stack->bufs[stack->top++] = buf;
+
+ return 0;
+}
+
+/**
+ * idpf_buf_lifo_pop - pop a buffer pointer from stack
+ * @stack: pointer to stack struct
+ **/
+static struct idpf_tx_stash *idpf_buf_lifo_pop(struct idpf_buf_lifo *stack)
+{
+ if (unlikely(!stack->top))
+ return NULL;
+
+ return stack->bufs[--stack->top];
+}
+
+/**
+ * idpf_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ * @txqueue: TX queue
+ */
+void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct idpf_adapter *adapter = idpf_netdev_to_adapter(netdev);
+
+ adapter->tx_timeout_count++;
+
+ netdev_err(netdev, "Detected Tx timeout: Count %d, Queue %d\n",
+ adapter->tx_timeout_count, txqueue);
+ if (!idpf_is_reset_in_prog(adapter)) {
+ set_bit(IDPF_HR_FUNC_RESET, adapter->flags);
+ queue_delayed_work(adapter->vc_event_wq,
+ &adapter->vc_event_task,
+ msecs_to_jiffies(10));
+ }
+}
+
+/**
+ * idpf_tx_buf_rel - Release a Tx buffer
+ * @tx_q: the queue that owns the buffer
+ * @tx_buf: the buffer to free
+ */
+static void idpf_tx_buf_rel(struct idpf_queue *tx_q, struct idpf_tx_buf *tx_buf)
+{
+ if (tx_buf->skb) {
+ if (dma_unmap_len(tx_buf, len))
+ dma_unmap_single(tx_q->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(tx_buf->skb);
+ } else if (dma_unmap_len(tx_buf, len)) {
+ dma_unmap_page(tx_q->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ }
+
+ tx_buf->next_to_watch = NULL;
+ tx_buf->skb = NULL;
+ tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+ dma_unmap_len_set(tx_buf, len, 0);
+}
+
+/**
+ * idpf_tx_buf_rel_all - Free any empty Tx buffers
+ * @txq: queue to be cleaned
+ */
+static void idpf_tx_buf_rel_all(struct idpf_queue *txq)
+{
+ u16 i;
+
+ /* Buffers already cleared, nothing to do */
+ if (!txq->tx_buf)
+ return;
+
+ /* Free all the Tx buffer sk_buffs */
+ for (i = 0; i < txq->desc_count; i++)
+ idpf_tx_buf_rel(txq, &txq->tx_buf[i]);
+
+ kfree(txq->tx_buf);
+ txq->tx_buf = NULL;
+
+ if (!txq->buf_stack.bufs)
+ return;
+
+ for (i = 0; i < txq->buf_stack.size; i++)
+ kfree(txq->buf_stack.bufs[i]);
+
+ kfree(txq->buf_stack.bufs);
+ txq->buf_stack.bufs = NULL;
+}
+
+/**
+ * idpf_tx_desc_rel - Free Tx resources per queue
+ * @txq: Tx descriptor ring for a specific queue
+ * @bufq: buffer q or completion q
+ *
+ * Free all transmit software resources
+ */
+static void idpf_tx_desc_rel(struct idpf_queue *txq, bool bufq)
+{
+ if (bufq)
+ idpf_tx_buf_rel_all(txq);
+
+ if (!txq->desc_ring)
+ return;
+
+ dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma);
+ txq->desc_ring = NULL;
+ txq->next_to_alloc = 0;
+ txq->next_to_use = 0;
+ txq->next_to_clean = 0;
+}
+
+/**
+ * idpf_tx_desc_rel_all - Free Tx Resources for All Queues
+ * @vport: virtual port structure
+ *
+ * Free all transmit software resources
+ */
+static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
+{
+ int i, j;
+
+ if (!vport->txq_grps)
+ return;
+
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+
+ for (j = 0; j < txq_grp->num_txq; j++)
+ idpf_tx_desc_rel(txq_grp->txqs[j], true);
+
+ if (idpf_is_queue_model_split(vport->txq_model))
+ idpf_tx_desc_rel(txq_grp->complq, false);
+ }
+}
+
+/**
+ * idpf_tx_buf_alloc_all - Allocate memory for all buffer resources
+ * @tx_q: queue for which the buffers are allocated
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q)
+{
+ int buf_size;
+ int i;
+
+ /* Allocate book keeping buffers only. Buffers to be supplied to HW
+ * are allocated by kernel network stack and received as part of skb
+ */
+ buf_size = sizeof(struct idpf_tx_buf) * tx_q->desc_count;
+ tx_q->tx_buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!tx_q->tx_buf)
+ return -ENOMEM;
+
+ /* Initialize tx_bufs with invalid completion tags */
+ for (i = 0; i < tx_q->desc_count; i++)
+ tx_q->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+
+ /* Initialize tx buf stack for out-of-order completions if
+ * flow scheduling offload is enabled
+ */
+ tx_q->buf_stack.bufs =
+ kcalloc(tx_q->desc_count, sizeof(struct idpf_tx_stash *),
+ GFP_KERNEL);
+ if (!tx_q->buf_stack.bufs)
+ return -ENOMEM;
+
+ tx_q->buf_stack.size = tx_q->desc_count;
+ tx_q->buf_stack.top = tx_q->desc_count;
+
+ for (i = 0; i < tx_q->desc_count; i++) {
+ tx_q->buf_stack.bufs[i] = kzalloc(sizeof(*tx_q->buf_stack.bufs[i]),
+ GFP_KERNEL);
+ if (!tx_q->buf_stack.bufs[i])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_tx_desc_alloc - Allocate the Tx descriptors
+ * @tx_q: the tx ring to set up
+ * @bufq: buffer or completion queue
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq)
+{
+ struct device *dev = tx_q->dev;
+ u32 desc_sz;
+ int err;
+
+ if (bufq) {
+ err = idpf_tx_buf_alloc_all(tx_q);
+ if (err)
+ goto err_alloc;
+
+ desc_sz = sizeof(struct idpf_base_tx_desc);
+ } else {
+ desc_sz = sizeof(struct idpf_splitq_tx_compl_desc);
+ }
+
+ tx_q->size = tx_q->desc_count * desc_sz;
+
+ /* Allocate descriptors also round up to nearest 4K */
+ tx_q->size = ALIGN(tx_q->size, 4096);
+ tx_q->desc_ring = dmam_alloc_coherent(dev, tx_q->size, &tx_q->dma,
+ GFP_KERNEL);
+ if (!tx_q->desc_ring) {
+ dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
+ tx_q->size);
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ tx_q->next_to_alloc = 0;
+ tx_q->next_to_use = 0;
+ tx_q->next_to_clean = 0;
+ set_bit(__IDPF_Q_GEN_CHK, tx_q->flags);
+
+ return 0;
+
+err_alloc:
+ idpf_tx_desc_rel(tx_q, bufq);
+
+ return err;
+}
+
+/**
+ * idpf_tx_desc_alloc_all - allocate all queues Tx resources
+ * @vport: virtual port private structure
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
+{
+ struct device *dev = &vport->adapter->pdev->dev;
+ int err = 0;
+ int i, j;
+
+ /* Setup buffer queues. In single queue model buffer queues and
+ * completion queues will be same
+ */
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ for (j = 0; j < vport->txq_grps[i].num_txq; j++) {
+ struct idpf_queue *txq = vport->txq_grps[i].txqs[j];
+ u8 gen_bits = 0;
+ u16 bufidx_mask;
+
+ err = idpf_tx_desc_alloc(txq, true);
+ if (err) {
+ dev_err(dev, "Allocation for Tx Queue %u failed\n",
+ i);
+ goto err_out;
+ }
+
+ if (!idpf_is_queue_model_split(vport->txq_model))
+ continue;
+
+ txq->compl_tag_cur_gen = 0;
+
+ /* Determine the number of bits in the bufid
+ * mask and add one to get the start of the
+ * generation bits
+ */
+ bufidx_mask = txq->desc_count - 1;
+ while (bufidx_mask >> 1) {
+ txq->compl_tag_gen_s++;
+ bufidx_mask = bufidx_mask >> 1;
+ }
+ txq->compl_tag_gen_s++;
+
+ gen_bits = IDPF_TX_SPLITQ_COMPL_TAG_WIDTH -
+ txq->compl_tag_gen_s;
+ txq->compl_tag_gen_max = GETMAXVAL(gen_bits);
+
+ /* Set bufid mask based on location of first
+ * gen bit; it cannot simply be the descriptor
+ * ring size-1 since we can have size values
+ * where not all of those bits are set.
+ */
+ txq->compl_tag_bufid_m =
+ GETMAXVAL(txq->compl_tag_gen_s);
+ }
+
+ if (!idpf_is_queue_model_split(vport->txq_model))
+ continue;
+
+ /* Setup completion queues */
+ err = idpf_tx_desc_alloc(vport->txq_grps[i].complq, false);
+ if (err) {
+ dev_err(dev, "Allocation for Tx Completion Queue %u failed\n",
+ i);
+ goto err_out;
+ }
+ }
+
+err_out:
+ if (err)
+ idpf_tx_desc_rel_all(vport);
+
+ return err;
+}
+
+/**
+ * idpf_rx_page_rel - Release an rx buffer page
+ * @rxq: the queue that owns the buffer
+ * @rx_buf: the buffer to free
+ */
+static void idpf_rx_page_rel(struct idpf_queue *rxq, struct idpf_rx_buf *rx_buf)
+{
+ if (unlikely(!rx_buf->page))
+ return;
+
+ page_pool_put_full_page(rxq->pp, rx_buf->page, false);
+
+ rx_buf->page = NULL;
+ rx_buf->page_offset = 0;
+}
+
+/**
+ * idpf_rx_hdr_buf_rel_all - Release header buffer memory
+ * @rxq: queue to use
+ */
+static void idpf_rx_hdr_buf_rel_all(struct idpf_queue *rxq)
+{
+ struct idpf_adapter *adapter = rxq->vport->adapter;
+
+ dma_free_coherent(&adapter->pdev->dev,
+ rxq->desc_count * IDPF_HDR_BUF_SIZE,
+ rxq->rx_buf.hdr_buf_va,
+ rxq->rx_buf.hdr_buf_pa);
+ rxq->rx_buf.hdr_buf_va = NULL;
+}
+
+/**
+ * idpf_rx_buf_rel_all - Free all Rx buffer resources for a queue
+ * @rxq: queue to be cleaned
+ */
+static void idpf_rx_buf_rel_all(struct idpf_queue *rxq)
+{
+ u16 i;
+
+ /* queue already cleared, nothing to do */
+ if (!rxq->rx_buf.buf)
+ return;
+
+ /* Free all the bufs allocated and given to hw on Rx queue */
+ for (i = 0; i < rxq->desc_count; i++)
+ idpf_rx_page_rel(rxq, &rxq->rx_buf.buf[i]);
+
+ if (rxq->rx_hsplit_en)
+ idpf_rx_hdr_buf_rel_all(rxq);
+
+ page_pool_destroy(rxq->pp);
+ rxq->pp = NULL;
+
+ kfree(rxq->rx_buf.buf);
+ rxq->rx_buf.buf = NULL;
+}
+
+/**
+ * idpf_rx_desc_rel - Free a specific Rx q resources
+ * @rxq: queue to clean the resources from
+ * @bufq: buffer q or completion q
+ * @q_model: single or split q model
+ *
+ * Free a specific rx queue resources
+ */
+static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model)
+{
+ if (!rxq)
+ return;
+
+ if (!bufq && idpf_is_queue_model_split(q_model) && rxq->skb) {
+ dev_kfree_skb_any(rxq->skb);
+ rxq->skb = NULL;
+ }
+
+ if (bufq || !idpf_is_queue_model_split(q_model))
+ idpf_rx_buf_rel_all(rxq);
+
+ rxq->next_to_alloc = 0;
+ rxq->next_to_clean = 0;
+ rxq->next_to_use = 0;
+ if (!rxq->desc_ring)
+ return;
+
+ dmam_free_coherent(rxq->dev, rxq->size, rxq->desc_ring, rxq->dma);
+ rxq->desc_ring = NULL;
+}
+
+/**
+ * idpf_rx_desc_rel_all - Free Rx Resources for All Queues
+ * @vport: virtual port structure
+ *
+ * Free all rx queues resources
+ */
+static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
+{
+ struct idpf_rxq_group *rx_qgrp;
+ u16 num_rxq;
+ int i, j;
+
+ if (!vport->rxq_grps)
+ return;
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ rx_qgrp = &vport->rxq_grps[i];
+
+ if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
+ idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j],
+ false, vport->rxq_model);
+ continue;
+ }
+
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+ for (j = 0; j < num_rxq; j++)
+ idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq,
+ false, vport->rxq_model);
+
+ if (!rx_qgrp->splitq.bufq_sets)
+ continue;
+
+ for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ struct idpf_bufq_set *bufq_set =
+ &rx_qgrp->splitq.bufq_sets[j];
+
+ idpf_rx_desc_rel(&bufq_set->bufq, true,
+ vport->rxq_model);
+ }
+ }
+}
+
+/**
+ * idpf_rx_buf_hw_update - Store the new tail and head values
+ * @rxq: queue to bump
+ * @val: new head index
+ */
+void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val)
+{
+ rxq->next_to_use = val;
+
+ if (unlikely(!rxq->tail))
+ return;
+
+ /* writel has an implicit memory barrier */
+ writel(val, rxq->tail);
+}
+
+/**
+ * idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers
+ * @rxq: ring to use
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int idpf_rx_hdr_buf_alloc_all(struct idpf_queue *rxq)
+{
+ struct idpf_adapter *adapter = rxq->vport->adapter;
+
+ rxq->rx_buf.hdr_buf_va =
+ dma_alloc_coherent(&adapter->pdev->dev,
+ IDPF_HDR_BUF_SIZE * rxq->desc_count,
+ &rxq->rx_buf.hdr_buf_pa,
+ GFP_KERNEL);
+ if (!rxq->rx_buf.hdr_buf_va)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * idpf_rx_post_buf_refill - Post buffer id to refill queue
+ * @refillq: refill queue to post to
+ * @buf_id: buffer id to post
+ */
+static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
+{
+ u16 nta = refillq->next_to_alloc;
+
+ /* store the buffer ID and the SW maintained GEN bit to the refillq */
+ refillq->ring[nta] =
+ ((buf_id << IDPF_RX_BI_BUFID_S) & IDPF_RX_BI_BUFID_M) |
+ (!!(test_bit(__IDPF_Q_GEN_CHK, refillq->flags)) <<
+ IDPF_RX_BI_GEN_S);
+
+ if (unlikely(++nta == refillq->desc_count)) {
+ nta = 0;
+ change_bit(__IDPF_Q_GEN_CHK, refillq->flags);
+ }
+ refillq->next_to_alloc = nta;
+}
+
+/**
+ * idpf_rx_post_buf_desc - Post buffer to bufq descriptor ring
+ * @bufq: buffer queue to post to
+ * @buf_id: buffer id to post
+ *
+ * Returns false if buffer could not be allocated, true otherwise.
+ */
+static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id)
+{
+ struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL;
+ u16 nta = bufq->next_to_alloc;
+ struct idpf_rx_buf *buf;
+ dma_addr_t addr;
+
+ splitq_rx_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, nta);
+ buf = &bufq->rx_buf.buf[buf_id];
+
+ if (bufq->rx_hsplit_en) {
+ splitq_rx_desc->hdr_addr =
+ cpu_to_le64(bufq->rx_buf.hdr_buf_pa +
+ (u32)buf_id * IDPF_HDR_BUF_SIZE);
+ }
+
+ addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size);
+ if (unlikely(addr == DMA_MAPPING_ERROR))
+ return false;
+
+ splitq_rx_desc->pkt_addr = cpu_to_le64(addr);
+ splitq_rx_desc->qword0.buf_id = cpu_to_le16(buf_id);
+
+ nta++;
+ if (unlikely(nta == bufq->desc_count))
+ nta = 0;
+ bufq->next_to_alloc = nta;
+
+ return true;
+}
+
+/**
+ * idpf_rx_post_init_bufs - Post initial buffers to bufq
+ * @bufq: buffer queue to post working set to
+ * @working_set: number of buffers to put in working set
+ *
+ * Returns true if @working_set bufs were posted successfully, false otherwise.
+ */
+static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set)
+{
+ int i;
+
+ for (i = 0; i < working_set; i++) {
+ if (!idpf_rx_post_buf_desc(bufq, i))
+ return false;
+ }
+
+ idpf_rx_buf_hw_update(bufq,
+ bufq->next_to_alloc & ~(bufq->rx_buf_stride - 1));
+
+ return true;
+}
+
+/**
+ * idpf_rx_create_page_pool - Create a page pool
+ * @rxbufq: RX queue to create page pool for
+ *
+ * Returns &page_pool on success, casted -errno on failure
+ */
+static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq)
+{
+ struct page_pool_params pp = {
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .order = 0,
+ .pool_size = rxbufq->desc_count,
+ .nid = NUMA_NO_NODE,
+ .dev = rxbufq->vport->netdev->dev.parent,
+ .max_len = PAGE_SIZE,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = 0,
+ };
+
+ return page_pool_create(&pp);
+}
+
+/**
+ * idpf_rx_buf_alloc_all - Allocate memory for all buffer resources
+ * @rxbufq: queue for which the buffers are allocated; equivalent to
+ * rxq when operating in singleq mode
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_rx_buf_alloc_all(struct idpf_queue *rxbufq)
+{
+ int err = 0;
+
+ /* Allocate book keeping buffers */
+ rxbufq->rx_buf.buf = kcalloc(rxbufq->desc_count,
+ sizeof(struct idpf_rx_buf), GFP_KERNEL);
+ if (!rxbufq->rx_buf.buf) {
+ err = -ENOMEM;
+ goto rx_buf_alloc_all_out;
+ }
+
+ if (rxbufq->rx_hsplit_en) {
+ err = idpf_rx_hdr_buf_alloc_all(rxbufq);
+ if (err)
+ goto rx_buf_alloc_all_out;
+ }
+
+ /* Allocate buffers to be given to HW. */
+ if (idpf_is_queue_model_split(rxbufq->vport->rxq_model)) {
+ int working_set = IDPF_RX_BUFQ_WORKING_SET(rxbufq);
+
+ if (!idpf_rx_post_init_bufs(rxbufq, working_set))
+ err = -ENOMEM;
+ } else {
+ if (idpf_rx_singleq_buf_hw_alloc_all(rxbufq,
+ rxbufq->desc_count - 1))
+ err = -ENOMEM;
+ }
+
+rx_buf_alloc_all_out:
+ if (err)
+ idpf_rx_buf_rel_all(rxbufq);
+
+ return err;
+}
+
+/**
+ * idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW
+ * @rxbufq: RX queue to create page pool for
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_rx_bufs_init(struct idpf_queue *rxbufq)
+{
+ struct page_pool *pool;
+
+ pool = idpf_rx_create_page_pool(rxbufq);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ rxbufq->pp = pool;
+
+ return idpf_rx_buf_alloc_all(rxbufq);
+}
+
+/**
+ * idpf_rx_bufs_init_all - Initialize all RX bufs
+ * @vport: virtual port struct
+ *
+ * Returns 0 on success, negative on failure
+ */
+int idpf_rx_bufs_init_all(struct idpf_vport *vport)
+{
+ struct idpf_rxq_group *rx_qgrp;
+ struct idpf_queue *q;
+ int i, j, err;
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ rx_qgrp = &vport->rxq_grps[i];
+
+ /* Allocate bufs for the rxq itself in singleq */
+ if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ int num_rxq = rx_qgrp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq; j++) {
+ q = rx_qgrp->singleq.rxqs[j];
+ err = idpf_rx_bufs_init(q);
+ if (err)
+ return err;
+ }
+
+ continue;
+ }
+
+ /* Otherwise, allocate bufs for the buffer queues */
+ for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ q = &rx_qgrp->splitq.bufq_sets[j].bufq;
+ err = idpf_rx_bufs_init(q);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_rx_desc_alloc - Allocate queue Rx resources
+ * @rxq: Rx queue for which the resources are setup
+ * @bufq: buffer or completion queue
+ * @q_model: single or split queue model
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model)
+{
+ struct device *dev = rxq->dev;
+
+ if (bufq)
+ rxq->size = rxq->desc_count *
+ sizeof(struct virtchnl2_splitq_rx_buf_desc);
+ else
+ rxq->size = rxq->desc_count *
+ sizeof(union virtchnl2_rx_desc);
+
+ /* Allocate descriptors and also round up to nearest 4K */
+ rxq->size = ALIGN(rxq->size, 4096);
+ rxq->desc_ring = dmam_alloc_coherent(dev, rxq->size,
+ &rxq->dma, GFP_KERNEL);
+ if (!rxq->desc_ring) {
+ dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
+ rxq->size);
+ return -ENOMEM;
+ }
+
+ rxq->next_to_alloc = 0;
+ rxq->next_to_clean = 0;
+ rxq->next_to_use = 0;
+ set_bit(__IDPF_Q_GEN_CHK, rxq->flags);
+
+ return 0;
+}
+
+/**
+ * idpf_rx_desc_alloc_all - allocate all RX queues resources
+ * @vport: virtual port structure
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
+{
+ struct device *dev = &vport->adapter->pdev->dev;
+ struct idpf_rxq_group *rx_qgrp;
+ struct idpf_queue *q;
+ int i, j, err;
+ u16 num_rxq;
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ rx_qgrp = &vport->rxq_grps[i];
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+ else
+ num_rxq = rx_qgrp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq; j++) {
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ else
+ q = rx_qgrp->singleq.rxqs[j];
+ err = idpf_rx_desc_alloc(q, false, vport->rxq_model);
+ if (err) {
+ dev_err(dev, "Memory allocation for Rx Queue %u failed\n",
+ i);
+ goto err_out;
+ }
+ }
+
+ if (!idpf_is_queue_model_split(vport->rxq_model))
+ continue;
+
+ for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ q = &rx_qgrp->splitq.bufq_sets[j].bufq;
+ err = idpf_rx_desc_alloc(q, true, vport->rxq_model);
+ if (err) {
+ dev_err(dev, "Memory allocation for Rx Buffer Queue %u failed\n",
+ i);
+ goto err_out;
+ }
+ }
+ }
+
+ return 0;
+
+err_out:
+ idpf_rx_desc_rel_all(vport);
+
+ return err;
+}
+
+/**
+ * idpf_txq_group_rel - Release all resources for txq groups
+ * @vport: vport to release txq groups on
+ */
+static void idpf_txq_group_rel(struct idpf_vport *vport)
+{
+ int i, j;
+
+ if (!vport->txq_grps)
+ return;
+
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
+
+ for (j = 0; j < txq_grp->num_txq; j++) {
+ kfree(txq_grp->txqs[j]);
+ txq_grp->txqs[j] = NULL;
+ }
+ kfree(txq_grp->complq);
+ txq_grp->complq = NULL;
+ }
+ kfree(vport->txq_grps);
+ vport->txq_grps = NULL;
+}
+
+/**
+ * idpf_rxq_sw_queue_rel - Release software queue resources
+ * @rx_qgrp: rx queue group with software queues
+ */
+static void idpf_rxq_sw_queue_rel(struct idpf_rxq_group *rx_qgrp)
+{
+ int i, j;
+
+ for (i = 0; i < rx_qgrp->vport->num_bufqs_per_qgrp; i++) {
+ struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[i];
+
+ for (j = 0; j < bufq_set->num_refillqs; j++) {
+ kfree(bufq_set->refillqs[j].ring);
+ bufq_set->refillqs[j].ring = NULL;
+ }
+ kfree(bufq_set->refillqs);
+ bufq_set->refillqs = NULL;
+ }
+}
+
+/**
+ * idpf_rxq_group_rel - Release all resources for rxq groups
+ * @vport: vport to release rxq groups on
+ */
+static void idpf_rxq_group_rel(struct idpf_vport *vport)
+{
+ int i;
+
+ if (!vport->rxq_grps)
+ return;
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u16 num_rxq;
+ int j;
+
+ if (idpf_is_queue_model_split(vport->rxq_model)) {
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+ for (j = 0; j < num_rxq; j++) {
+ kfree(rx_qgrp->splitq.rxq_sets[j]);
+ rx_qgrp->splitq.rxq_sets[j] = NULL;
+ }
+
+ idpf_rxq_sw_queue_rel(rx_qgrp);
+ kfree(rx_qgrp->splitq.bufq_sets);
+ rx_qgrp->splitq.bufq_sets = NULL;
+ } else {
+ num_rxq = rx_qgrp->singleq.num_rxq;
+ for (j = 0; j < num_rxq; j++) {
+ kfree(rx_qgrp->singleq.rxqs[j]);
+ rx_qgrp->singleq.rxqs[j] = NULL;
+ }
+ }
+ }
+ kfree(vport->rxq_grps);
+ vport->rxq_grps = NULL;
+}
+
+/**
+ * idpf_vport_queue_grp_rel_all - Release all queue groups
+ * @vport: vport to release queue groups for
+ */
+static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport)
+{
+ idpf_txq_group_rel(vport);
+ idpf_rxq_group_rel(vport);
+}
+
+/**
+ * idpf_vport_queues_rel - Free memory for all queues
+ * @vport: virtual port
+ *
+ * Free the memory allocated for queues associated to a vport
+ */
+void idpf_vport_queues_rel(struct idpf_vport *vport)
+{
+ idpf_tx_desc_rel_all(vport);
+ idpf_rx_desc_rel_all(vport);
+ idpf_vport_queue_grp_rel_all(vport);
+
+ kfree(vport->txqs);
+ vport->txqs = NULL;
+}
+
+/**
+ * idpf_vport_init_fast_path_txqs - Initialize fast path txq array
+ * @vport: vport to init txqs on
+ *
+ * We get a queue index from skb->queue_mapping and we need a fast way to
+ * dereference the queue from queue groups. This allows us to quickly pull a
+ * txq based on a queue index.
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
+{
+ int i, j, k = 0;
+
+ vport->txqs = kcalloc(vport->num_txq, sizeof(struct idpf_queue *),
+ GFP_KERNEL);
+
+ if (!vport->txqs)
+ return -ENOMEM;
+
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_grp = &vport->txq_grps[i];
+
+ for (j = 0; j < tx_grp->num_txq; j++, k++) {
+ vport->txqs[k] = tx_grp->txqs[j];
+ vport->txqs[k]->idx = k;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_vport_init_num_qs - Initialize number of queues
+ * @vport: vport to initialize queues
+ * @vport_msg: data to be filled into vport
+ */
+void idpf_vport_init_num_qs(struct idpf_vport *vport,
+ struct virtchnl2_create_vport *vport_msg)
+{
+ struct idpf_vport_user_config_data *config_data;
+ u16 idx = vport->idx;
+
+ config_data = &vport->adapter->vport_config[idx]->user_config;
+ vport->num_txq = le16_to_cpu(vport_msg->num_tx_q);
+ vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q);
+ /* number of txqs and rxqs in config data will be zeros only in the
+ * driver load path and we dont update them there after
+ */
+ if (!config_data->num_req_tx_qs && !config_data->num_req_rx_qs) {
+ config_data->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q);
+ config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q);
+ }
+
+ if (idpf_is_queue_model_split(vport->txq_model))
+ vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq);
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq);
+
+ /* Adjust number of buffer queues per Rx queue group. */
+ if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ vport->num_bufqs_per_qgrp = 0;
+ vport->bufq_size[0] = IDPF_RX_BUF_2048;
+
+ return;
+ }
+
+ vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
+ /* Bufq[0] default buffer size is 4K
+ * Bufq[1] default buffer size is 2K
+ */
+ vport->bufq_size[0] = IDPF_RX_BUF_4096;
+ vport->bufq_size[1] = IDPF_RX_BUF_2048;
+}
+
+/**
+ * idpf_vport_calc_num_q_desc - Calculate number of queue groups
+ * @vport: vport to calculate q groups for
+ */
+void idpf_vport_calc_num_q_desc(struct idpf_vport *vport)
+{
+ struct idpf_vport_user_config_data *config_data;
+ int num_bufqs = vport->num_bufqs_per_qgrp;
+ u32 num_req_txq_desc, num_req_rxq_desc;
+ u16 idx = vport->idx;
+ int i;
+
+ config_data = &vport->adapter->vport_config[idx]->user_config;
+ num_req_txq_desc = config_data->num_req_txq_desc;
+ num_req_rxq_desc = config_data->num_req_rxq_desc;
+
+ vport->complq_desc_count = 0;
+ if (num_req_txq_desc) {
+ vport->txq_desc_count = num_req_txq_desc;
+ if (idpf_is_queue_model_split(vport->txq_model)) {
+ vport->complq_desc_count = num_req_txq_desc;
+ if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC)
+ vport->complq_desc_count =
+ IDPF_MIN_TXQ_COMPLQ_DESC;
+ }
+ } else {
+ vport->txq_desc_count = IDPF_DFLT_TX_Q_DESC_COUNT;
+ if (idpf_is_queue_model_split(vport->txq_model))
+ vport->complq_desc_count =
+ IDPF_DFLT_TX_COMPLQ_DESC_COUNT;
+ }
+
+ if (num_req_rxq_desc)
+ vport->rxq_desc_count = num_req_rxq_desc;
+ else
+ vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT;
+
+ for (i = 0; i < num_bufqs; i++) {
+ if (!vport->bufq_desc_count[i])
+ vport->bufq_desc_count[i] =
+ IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count,
+ num_bufqs);
+ }
+}
+
+/**
+ * idpf_vport_calc_total_qs - Calculate total number of queues
+ * @adapter: private data struct
+ * @vport_idx: vport idx to retrieve vport pointer
+ * @vport_msg: message to fill with data
+ * @max_q: vport max queue info
+ *
+ * Return 0 on success, error value on failure.
+ */
+int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_idx,
+ struct virtchnl2_create_vport *vport_msg,
+ struct idpf_vport_max_q *max_q)
+{
+ int dflt_splitq_txq_grps = 0, dflt_singleq_txqs = 0;
+ int dflt_splitq_rxq_grps = 0, dflt_singleq_rxqs = 0;
+ u16 num_req_tx_qs = 0, num_req_rx_qs = 0;
+ struct idpf_vport_config *vport_config;
+ u16 num_txq_grps, num_rxq_grps;
+ u32 num_qs;
+
+ vport_config = adapter->vport_config[vport_idx];
+ if (vport_config) {
+ num_req_tx_qs = vport_config->user_config.num_req_tx_qs;
+ num_req_rx_qs = vport_config->user_config.num_req_rx_qs;
+ } else {
+ int num_cpus;
+
+ /* Restrict num of queues to cpus online as a default
+ * configuration to give best performance. User can always
+ * override to a max number of queues via ethtool.
+ */
+ num_cpus = num_online_cpus();
+
+ dflt_splitq_txq_grps = min_t(int, max_q->max_txq, num_cpus);
+ dflt_singleq_txqs = min_t(int, max_q->max_txq, num_cpus);
+ dflt_splitq_rxq_grps = min_t(int, max_q->max_rxq, num_cpus);
+ dflt_singleq_rxqs = min_t(int, max_q->max_rxq, num_cpus);
+ }
+
+ if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->txq_model))) {
+ num_txq_grps = num_req_tx_qs ? num_req_tx_qs : dflt_splitq_txq_grps;
+ vport_msg->num_tx_complq = cpu_to_le16(num_txq_grps *
+ IDPF_COMPLQ_PER_GROUP);
+ vport_msg->num_tx_q = cpu_to_le16(num_txq_grps *
+ IDPF_DFLT_SPLITQ_TXQ_PER_GROUP);
+ } else {
+ num_txq_grps = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
+ num_qs = num_txq_grps * (num_req_tx_qs ? num_req_tx_qs :
+ dflt_singleq_txqs);
+ vport_msg->num_tx_q = cpu_to_le16(num_qs);
+ vport_msg->num_tx_complq = 0;
+ }
+ if (idpf_is_queue_model_split(le16_to_cpu(vport_msg->rxq_model))) {
+ num_rxq_grps = num_req_rx_qs ? num_req_rx_qs : dflt_splitq_rxq_grps;
+ vport_msg->num_rx_bufq = cpu_to_le16(num_rxq_grps *
+ IDPF_MAX_BUFQS_PER_RXQ_GRP);
+ vport_msg->num_rx_q = cpu_to_le16(num_rxq_grps *
+ IDPF_DFLT_SPLITQ_RXQ_PER_GROUP);
+ } else {
+ num_rxq_grps = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
+ num_qs = num_rxq_grps * (num_req_rx_qs ? num_req_rx_qs :
+ dflt_singleq_rxqs);
+ vport_msg->num_rx_q = cpu_to_le16(num_qs);
+ vport_msg->num_rx_bufq = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_vport_calc_num_q_groups - Calculate number of queue groups
+ * @vport: vport to calculate q groups for
+ */
+void idpf_vport_calc_num_q_groups(struct idpf_vport *vport)
+{
+ if (idpf_is_queue_model_split(vport->txq_model))
+ vport->num_txq_grp = vport->num_txq;
+ else
+ vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS;
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ vport->num_rxq_grp = vport->num_rxq;
+ else
+ vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS;
+}
+
+/**
+ * idpf_vport_calc_numq_per_grp - Calculate number of queues per group
+ * @vport: vport to calculate queues for
+ * @num_txq: return parameter for number of TX queues
+ * @num_rxq: return parameter for number of RX queues
+ */
+static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport,
+ u16 *num_txq, u16 *num_rxq)
+{
+ if (idpf_is_queue_model_split(vport->txq_model))
+ *num_txq = IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
+ else
+ *num_txq = vport->num_txq;
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ *num_rxq = IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
+ else
+ *num_rxq = vport->num_rxq;
+}
+
+/**
+ * idpf_rxq_set_descids - set the descids supported by this queue
+ * @vport: virtual port data structure
+ * @q: rx queue for which descids are set
+ *
+ */
+static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q)
+{
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+ } else {
+ if (vport->base_rxd)
+ q->rxdids = VIRTCHNL2_RXDID_1_32B_BASE_M;
+ else
+ q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+ }
+}
+
+/**
+ * idpf_txq_group_alloc - Allocate all txq group resources
+ * @vport: vport to allocate txq groups for
+ * @num_txq: number of txqs to allocate for each group
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
+{
+ bool flow_sch_en;
+ int err, i;
+
+ vport->txq_grps = kcalloc(vport->num_txq_grp,
+ sizeof(*vport->txq_grps), GFP_KERNEL);
+ if (!vport->txq_grps)
+ return -ENOMEM;
+
+ flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
+ VIRTCHNL2_CAP_SPLITQ_QSCHED);
+
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ struct idpf_adapter *adapter = vport->adapter;
+ int j;
+
+ tx_qgrp->vport = vport;
+ tx_qgrp->num_txq = num_txq;
+
+ for (j = 0; j < tx_qgrp->num_txq; j++) {
+ tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]),
+ GFP_KERNEL);
+ if (!tx_qgrp->txqs[j]) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+ }
+
+ for (j = 0; j < tx_qgrp->num_txq; j++) {
+ struct idpf_queue *q = tx_qgrp->txqs[j];
+
+ q->dev = &adapter->pdev->dev;
+ q->desc_count = vport->txq_desc_count;
+ q->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
+ q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
+ q->vport = vport;
+ q->txq_grp = tx_qgrp;
+ hash_init(q->sched_buf_hash);
+
+ if (flow_sch_en)
+ set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags);
+ }
+
+ if (!idpf_is_queue_model_split(vport->txq_model))
+ continue;
+
+ tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP,
+ sizeof(*tx_qgrp->complq),
+ GFP_KERNEL);
+ if (!tx_qgrp->complq) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ tx_qgrp->complq->dev = &adapter->pdev->dev;
+ tx_qgrp->complq->desc_count = vport->complq_desc_count;
+ tx_qgrp->complq->vport = vport;
+ tx_qgrp->complq->txq_grp = tx_qgrp;
+
+ if (flow_sch_en)
+ __set_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags);
+ }
+
+ return 0;
+
+err_alloc:
+ idpf_txq_group_rel(vport);
+
+ return err;
+}
+
+/**
+ * idpf_rxq_group_alloc - Allocate all rxq group resources
+ * @vport: vport to allocate rxq groups for
+ * @num_rxq: number of rxqs to allocate for each group
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_queue *q;
+ int i, k, err = 0;
+
+ vport->rxq_grps = kcalloc(vport->num_rxq_grp,
+ sizeof(struct idpf_rxq_group), GFP_KERNEL);
+ if (!vport->rxq_grps)
+ return -ENOMEM;
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ int j;
+
+ rx_qgrp->vport = vport;
+ if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ rx_qgrp->singleq.num_rxq = num_rxq;
+ for (j = 0; j < num_rxq; j++) {
+ rx_qgrp->singleq.rxqs[j] =
+ kzalloc(sizeof(*rx_qgrp->singleq.rxqs[j]),
+ GFP_KERNEL);
+ if (!rx_qgrp->singleq.rxqs[j]) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+ }
+ goto skip_splitq_rx_init;
+ }
+ rx_qgrp->splitq.num_rxq_sets = num_rxq;
+
+ for (j = 0; j < num_rxq; j++) {
+ rx_qgrp->splitq.rxq_sets[j] =
+ kzalloc(sizeof(struct idpf_rxq_set),
+ GFP_KERNEL);
+ if (!rx_qgrp->splitq.rxq_sets[j]) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+ }
+
+ rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp,
+ sizeof(struct idpf_bufq_set),
+ GFP_KERNEL);
+ if (!rx_qgrp->splitq.bufq_sets) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ struct idpf_bufq_set *bufq_set =
+ &rx_qgrp->splitq.bufq_sets[j];
+ int swq_size = sizeof(struct idpf_sw_queue);
+
+ q = &rx_qgrp->splitq.bufq_sets[j].bufq;
+ q->dev = &adapter->pdev->dev;
+ q->desc_count = vport->bufq_desc_count[j];
+ q->vport = vport;
+ q->rxq_grp = rx_qgrp;
+ q->idx = j;
+ q->rx_buf_size = vport->bufq_size[j];
+ q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
+ q->rx_buf_stride = IDPF_RX_BUF_STRIDE;
+ if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS,
+ IDPF_CAP_HSPLIT) &&
+ idpf_is_queue_model_split(vport->rxq_model)) {
+ q->rx_hsplit_en = true;
+ q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
+ }
+
+ bufq_set->num_refillqs = num_rxq;
+ bufq_set->refillqs = kcalloc(num_rxq, swq_size,
+ GFP_KERNEL);
+ if (!bufq_set->refillqs) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+ for (k = 0; k < bufq_set->num_refillqs; k++) {
+ struct idpf_sw_queue *refillq =
+ &bufq_set->refillqs[k];
+
+ refillq->dev = &vport->adapter->pdev->dev;
+ refillq->desc_count =
+ vport->bufq_desc_count[j];
+ set_bit(__IDPF_Q_GEN_CHK, refillq->flags);
+ set_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags);
+ refillq->ring = kcalloc(refillq->desc_count,
+ sizeof(u16),
+ GFP_KERNEL);
+ if (!refillq->ring) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+ }
+ }
+
+skip_splitq_rx_init:
+ for (j = 0; j < num_rxq; j++) {
+ if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ q = rx_qgrp->singleq.rxqs[j];
+ goto setup_rxq;
+ }
+ q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ rx_qgrp->splitq.rxq_sets[j]->refillq0 =
+ &rx_qgrp->splitq.bufq_sets[0].refillqs[j];
+ if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
+ rx_qgrp->splitq.rxq_sets[j]->refillq1 =
+ &rx_qgrp->splitq.bufq_sets[1].refillqs[j];
+
+ if (idpf_is_cap_ena_all(adapter, IDPF_HSPLIT_CAPS,
+ IDPF_CAP_HSPLIT) &&
+ idpf_is_queue_model_split(vport->rxq_model)) {
+ q->rx_hsplit_en = true;
+ q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
+ }
+
+setup_rxq:
+ q->dev = &adapter->pdev->dev;
+ q->desc_count = vport->rxq_desc_count;
+ q->vport = vport;
+ q->rxq_grp = rx_qgrp;
+ q->idx = (i * num_rxq) + j;
+ /* In splitq mode, RXQ buffer size should be
+ * set to that of the first buffer queue
+ * associated with this RXQ
+ */
+ q->rx_buf_size = vport->bufq_size[0];
+ q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
+ q->rx_max_pkt_size = vport->netdev->mtu +
+ IDPF_PACKET_HDR_PAD;
+ idpf_rxq_set_descids(vport, q);
+ }
+ }
+
+err_alloc:
+ if (err)
+ idpf_rxq_group_rel(vport);
+
+ return err;
+}
+
+/**
+ * idpf_vport_queue_grp_alloc_all - Allocate all queue groups/resources
+ * @vport: vport with qgrps to allocate
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport)
+{
+ u16 num_txq, num_rxq;
+ int err;
+
+ idpf_vport_calc_numq_per_grp(vport, &num_txq, &num_rxq);
+
+ err = idpf_txq_group_alloc(vport, num_txq);
+ if (err)
+ goto err_out;
+
+ err = idpf_rxq_group_alloc(vport, num_rxq);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ idpf_vport_queue_grp_rel_all(vport);
+
+ return err;
+}
+
+/**
+ * idpf_vport_queues_alloc - Allocate memory for all queues
+ * @vport: virtual port
+ *
+ * Allocate memory for queues associated with a vport. Returns 0 on success,
+ * negative on failure.
+ */
+int idpf_vport_queues_alloc(struct idpf_vport *vport)
+{
+ int err;
+
+ err = idpf_vport_queue_grp_alloc_all(vport);
+ if (err)
+ goto err_out;
+
+ err = idpf_tx_desc_alloc_all(vport);
+ if (err)
+ goto err_out;
+
+ err = idpf_rx_desc_alloc_all(vport);
+ if (err)
+ goto err_out;
+
+ err = idpf_vport_init_fast_path_txqs(vport);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ idpf_vport_queues_rel(vport);
+
+ return err;
+}
+
+/**
+ * idpf_tx_handle_sw_marker - Handle queue marker packet
+ * @tx_q: tx queue to handle software marker
+ */
+static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q)
+{
+ struct idpf_vport *vport = tx_q->vport;
+ int i;
+
+ clear_bit(__IDPF_Q_SW_MARKER, tx_q->flags);
+ /* Hardware must write marker packets to all queues associated with
+ * completion queues. So check if all queues received marker packets
+ */
+ for (i = 0; i < vport->num_txq; i++)
+ /* If we're still waiting on any other TXQ marker completions,
+ * just return now since we cannot wake up the marker_wq yet.
+ */
+ if (test_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags))
+ return;
+
+ /* Drain complete */
+ set_bit(IDPF_VPORT_SW_MARKER, vport->flags);
+ wake_up(&vport->sw_marker_wq);
+}
+
+/**
+ * idpf_tx_splitq_clean_hdr - Clean TX buffer resources for header portion of
+ * packet
+ * @tx_q: tx queue to clean buffer from
+ * @tx_buf: buffer to be cleaned
+ * @cleaned: pointer to stats struct to track cleaned packets/bytes
+ * @napi_budget: Used to determine if we are in netpoll
+ */
+static void idpf_tx_splitq_clean_hdr(struct idpf_queue *tx_q,
+ struct idpf_tx_buf *tx_buf,
+ struct idpf_cleaned_stats *cleaned,
+ int napi_budget)
+{
+ napi_consume_skb(tx_buf->skb, napi_budget);
+
+ if (dma_unmap_len(tx_buf, len)) {
+ dma_unmap_single(tx_q->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+
+ dma_unmap_len_set(tx_buf, len, 0);
+ }
+
+ /* clear tx_buf data */
+ tx_buf->skb = NULL;
+
+ cleaned->bytes += tx_buf->bytecount;
+ cleaned->packets += tx_buf->gso_segs;
+}
+
+/**
+ * idpf_tx_clean_stashed_bufs - clean bufs that were stored for
+ * out of order completions
+ * @txq: queue to clean
+ * @compl_tag: completion tag of packet to clean (from completion descriptor)
+ * @cleaned: pointer to stats struct to track cleaned packets/bytes
+ * @budget: Used to determine if we are in netpoll
+ */
+static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag,
+ struct idpf_cleaned_stats *cleaned,
+ int budget)
+{
+ struct idpf_tx_stash *stash;
+ struct hlist_node *tmp_buf;
+
+ /* Buffer completion */
+ hash_for_each_possible_safe(txq->sched_buf_hash, stash, tmp_buf,
+ hlist, compl_tag) {
+ if (unlikely(stash->buf.compl_tag != (int)compl_tag))
+ continue;
+
+ if (stash->buf.skb) {
+ idpf_tx_splitq_clean_hdr(txq, &stash->buf, cleaned,
+ budget);
+ } else if (dma_unmap_len(&stash->buf, len)) {
+ dma_unmap_page(txq->dev,
+ dma_unmap_addr(&stash->buf, dma),
+ dma_unmap_len(&stash->buf, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(&stash->buf, len, 0);
+ }
+
+ /* Push shadow buf back onto stack */
+ idpf_buf_lifo_push(&txq->buf_stack, stash);
+
+ hash_del(&stash->hlist);
+ }
+}
+
+/**
+ * idpf_stash_flow_sch_buffers - store buffer parameters info to be freed at a
+ * later time (only relevant for flow scheduling mode)
+ * @txq: Tx queue to clean
+ * @tx_buf: buffer to store
+ */
+static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq,
+ struct idpf_tx_buf *tx_buf)
+{
+ struct idpf_tx_stash *stash;
+
+ if (unlikely(!dma_unmap_addr(tx_buf, dma) &&
+ !dma_unmap_len(tx_buf, len)))
+ return 0;
+
+ stash = idpf_buf_lifo_pop(&txq->buf_stack);
+ if (unlikely(!stash)) {
+ net_err_ratelimited("%s: No out-of-order TX buffers left!\n",
+ txq->vport->netdev->name);
+
+ return -ENOMEM;
+ }
+
+ /* Store buffer params in shadow buffer */
+ stash->buf.skb = tx_buf->skb;
+ stash->buf.bytecount = tx_buf->bytecount;
+ stash->buf.gso_segs = tx_buf->gso_segs;
+ dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma));
+ dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len));
+ stash->buf.compl_tag = tx_buf->compl_tag;
+
+ /* Add buffer to buf_hash table to be freed later */
+ hash_add(txq->sched_buf_hash, &stash->hlist, stash->buf.compl_tag);
+
+ memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
+
+ /* Reinitialize buf_id portion of tag */
+ tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+
+ return 0;
+}
+
+#define idpf_tx_splitq_clean_bump_ntc(txq, ntc, desc, buf) \
+do { \
+ (ntc)++; \
+ if (unlikely(!(ntc))) { \
+ ntc -= (txq)->desc_count; \
+ buf = (txq)->tx_buf; \
+ desc = IDPF_FLEX_TX_DESC(txq, 0); \
+ } else { \
+ (buf)++; \
+ (desc)++; \
+ } \
+} while (0)
+
+/**
+ * idpf_tx_splitq_clean - Reclaim resources from buffer queue
+ * @tx_q: Tx queue to clean
+ * @end: queue index until which it should be cleaned
+ * @napi_budget: Used to determine if we are in netpoll
+ * @cleaned: pointer to stats struct to track cleaned packets/bytes
+ * @descs_only: true if queue is using flow-based scheduling and should
+ * not clean buffers at this time
+ *
+ * Cleans the queue descriptor ring. If the queue is using queue-based
+ * scheduling, the buffers will be cleaned as well. If the queue is using
+ * flow-based scheduling, only the descriptors are cleaned at this time.
+ * Separate packet completion events will be reported on the completion queue,
+ * and the buffers will be cleaned separately. The stats are not updated from
+ * this function when using flow-based scheduling.
+ */
+static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end,
+ int napi_budget,
+ struct idpf_cleaned_stats *cleaned,
+ bool descs_only)
+{
+ union idpf_tx_flex_desc *next_pending_desc = NULL;
+ union idpf_tx_flex_desc *tx_desc;
+ s16 ntc = tx_q->next_to_clean;
+ struct idpf_tx_buf *tx_buf;
+
+ tx_desc = IDPF_FLEX_TX_DESC(tx_q, ntc);
+ next_pending_desc = IDPF_FLEX_TX_DESC(tx_q, end);
+ tx_buf = &tx_q->tx_buf[ntc];
+ ntc -= tx_q->desc_count;
+
+ while (tx_desc != next_pending_desc) {
+ union idpf_tx_flex_desc *eop_desc;
+
+ /* If this entry in the ring was used as a context descriptor,
+ * it's corresponding entry in the buffer ring will have an
+ * invalid completion tag since no buffer was used. We can
+ * skip this descriptor since there is no buffer to clean.
+ */
+ if (unlikely(tx_buf->compl_tag == IDPF_SPLITQ_TX_INVAL_COMPL_TAG))
+ goto fetch_next_txq_desc;
+
+ eop_desc = (union idpf_tx_flex_desc *)tx_buf->next_to_watch;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buf->next_to_watch = NULL;
+
+ if (descs_only) {
+ if (idpf_stash_flow_sch_buffers(tx_q, tx_buf))
+ goto tx_splitq_clean_out;
+
+ while (tx_desc != eop_desc) {
+ idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
+ tx_desc, tx_buf);
+
+ if (dma_unmap_len(tx_buf, len)) {
+ if (idpf_stash_flow_sch_buffers(tx_q,
+ tx_buf))
+ goto tx_splitq_clean_out;
+ }
+ }
+ } else {
+ idpf_tx_splitq_clean_hdr(tx_q, tx_buf, cleaned,
+ napi_budget);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ idpf_tx_splitq_clean_bump_ntc(tx_q, ntc,
+ tx_desc, tx_buf);
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buf, len)) {
+ dma_unmap_page(tx_q->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ }
+ }
+ }
+
+fetch_next_txq_desc:
+ idpf_tx_splitq_clean_bump_ntc(tx_q, ntc, tx_desc, tx_buf);
+ }
+
+tx_splitq_clean_out:
+ ntc += tx_q->desc_count;
+ tx_q->next_to_clean = ntc;
+}
+
+#define idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, buf) \
+do { \
+ (buf)++; \
+ (ntc)++; \
+ if (unlikely((ntc) == (txq)->desc_count)) { \
+ buf = (txq)->tx_buf; \
+ ntc = 0; \
+ } \
+} while (0)
+
+/**
+ * idpf_tx_clean_buf_ring - clean flow scheduling TX queue buffers
+ * @txq: queue to clean
+ * @compl_tag: completion tag of packet to clean (from completion descriptor)
+ * @cleaned: pointer to stats struct to track cleaned packets/bytes
+ * @budget: Used to determine if we are in netpoll
+ *
+ * Cleans all buffers associated with the input completion tag either from the
+ * TX buffer ring or from the hash table if the buffers were previously
+ * stashed. Returns the byte/segment count for the cleaned packet associated
+ * this completion tag.
+ */
+static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag,
+ struct idpf_cleaned_stats *cleaned,
+ int budget)
+{
+ u16 idx = compl_tag & txq->compl_tag_bufid_m;
+ struct idpf_tx_buf *tx_buf = NULL;
+ u16 ntc = txq->next_to_clean;
+ u16 num_descs_cleaned = 0;
+ u16 orig_idx = idx;
+
+ tx_buf = &txq->tx_buf[idx];
+
+ while (tx_buf->compl_tag == (int)compl_tag) {
+ if (tx_buf->skb) {
+ idpf_tx_splitq_clean_hdr(txq, tx_buf, cleaned, budget);
+ } else if (dma_unmap_len(tx_buf, len)) {
+ dma_unmap_page(txq->dev,
+ dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ }
+
+ memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
+ tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+
+ num_descs_cleaned++;
+ idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
+ }
+
+ /* If we didn't clean anything on the ring for this completion, there's
+ * nothing more to do.
+ */
+ if (unlikely(!num_descs_cleaned))
+ return false;
+
+ /* Otherwise, if we did clean a packet on the ring directly, it's safe
+ * to assume that the descriptors starting from the original
+ * next_to_clean up until the previously cleaned packet can be reused.
+ * Therefore, we will go back in the ring and stash any buffers still
+ * in the ring into the hash table to be cleaned later.
+ */
+ tx_buf = &txq->tx_buf[ntc];
+ while (tx_buf != &txq->tx_buf[orig_idx]) {
+ idpf_stash_flow_sch_buffers(txq, tx_buf);
+ idpf_tx_clean_buf_ring_bump_ntc(txq, ntc, tx_buf);
+ }
+
+ /* Finally, update next_to_clean to reflect the work that was just done
+ * on the ring, if any. If the packet was only cleaned from the hash
+ * table, the ring will not be impacted, therefore we should not touch
+ * next_to_clean. The updated idx is used here
+ */
+ txq->next_to_clean = idx;
+
+ return true;
+}
+
+/**
+ * idpf_tx_handle_rs_completion - clean a single packet and all of its buffers
+ * whether on the buffer ring or in the hash table
+ * @txq: Tx ring to clean
+ * @desc: pointer to completion queue descriptor to extract completion
+ * information from
+ * @cleaned: pointer to stats struct to track cleaned packets/bytes
+ * @budget: Used to determine if we are in netpoll
+ *
+ * Returns bytes/packets cleaned
+ */
+static void idpf_tx_handle_rs_completion(struct idpf_queue *txq,
+ struct idpf_splitq_tx_compl_desc *desc,
+ struct idpf_cleaned_stats *cleaned,
+ int budget)
+{
+ u16 compl_tag;
+
+ if (!test_bit(__IDPF_Q_FLOW_SCH_EN, txq->flags)) {
+ u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head);
+
+ return idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
+ }
+
+ compl_tag = le16_to_cpu(desc->q_head_compl_tag.compl_tag);
+
+ /* If we didn't clean anything on the ring, this packet must be
+ * in the hash table. Go clean it there.
+ */
+ if (!idpf_tx_clean_buf_ring(txq, compl_tag, cleaned, budget))
+ idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget);
+}
+
+/**
+ * idpf_tx_clean_complq - Reclaim resources on completion queue
+ * @complq: Tx ring to clean
+ * @budget: Used to determine if we are in netpoll
+ * @cleaned: returns number of packets cleaned
+ *
+ * Returns true if there's any budget left (e.g. the clean is finished)
+ */
+static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
+ int *cleaned)
+{
+ struct idpf_splitq_tx_compl_desc *tx_desc;
+ struct idpf_vport *vport = complq->vport;
+ s16 ntc = complq->next_to_clean;
+ struct idpf_netdev_priv *np;
+ unsigned int complq_budget;
+ bool complq_ok = true;
+ int i;
+
+ complq_budget = vport->compln_clean_budget;
+ tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, ntc);
+ ntc -= complq->desc_count;
+
+ do {
+ struct idpf_cleaned_stats cleaned_stats = { };
+ struct idpf_queue *tx_q;
+ int rel_tx_qid;
+ u16 hw_head;
+ u8 ctype; /* completion type */
+ u16 gen;
+
+ /* if the descriptor isn't done, no work yet to do */
+ gen = (le16_to_cpu(tx_desc->qid_comptype_gen) &
+ IDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S;
+ if (test_bit(__IDPF_Q_GEN_CHK, complq->flags) != gen)
+ break;
+
+ /* Find necessary info of TX queue to clean buffers */
+ rel_tx_qid = (le16_to_cpu(tx_desc->qid_comptype_gen) &
+ IDPF_TXD_COMPLQ_QID_M) >> IDPF_TXD_COMPLQ_QID_S;
+ if (rel_tx_qid >= complq->txq_grp->num_txq ||
+ !complq->txq_grp->txqs[rel_tx_qid]) {
+ dev_err(&complq->vport->adapter->pdev->dev,
+ "TxQ not found\n");
+ goto fetch_next_desc;
+ }
+ tx_q = complq->txq_grp->txqs[rel_tx_qid];
+
+ /* Determine completion type */
+ ctype = (le16_to_cpu(tx_desc->qid_comptype_gen) &
+ IDPF_TXD_COMPLQ_COMPL_TYPE_M) >>
+ IDPF_TXD_COMPLQ_COMPL_TYPE_S;
+ switch (ctype) {
+ case IDPF_TXD_COMPLT_RE:
+ hw_head = le16_to_cpu(tx_desc->q_head_compl_tag.q_head);
+
+ idpf_tx_splitq_clean(tx_q, hw_head, budget,
+ &cleaned_stats, true);
+ break;
+ case IDPF_TXD_COMPLT_RS:
+ idpf_tx_handle_rs_completion(tx_q, tx_desc,
+ &cleaned_stats, budget);
+ break;
+ case IDPF_TXD_COMPLT_SW_MARKER:
+ idpf_tx_handle_sw_marker(tx_q);
+ break;
+ default:
+ dev_err(&tx_q->vport->adapter->pdev->dev,
+ "Unknown TX completion type: %d\n",
+ ctype);
+ goto fetch_next_desc;
+ }
+
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_add(&tx_q->q_stats.tx.packets, cleaned_stats.packets);
+ u64_stats_add(&tx_q->q_stats.tx.bytes, cleaned_stats.bytes);
+ tx_q->cleaned_pkts += cleaned_stats.packets;
+ tx_q->cleaned_bytes += cleaned_stats.bytes;
+ complq->num_completions++;
+ u64_stats_update_end(&tx_q->stats_sync);
+
+fetch_next_desc:
+ tx_desc++;
+ ntc++;
+ if (unlikely(!ntc)) {
+ ntc -= complq->desc_count;
+ tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, 0);
+ change_bit(__IDPF_Q_GEN_CHK, complq->flags);
+ }
+
+ prefetch(tx_desc);
+
+ /* update budget accounting */
+ complq_budget--;
+ } while (likely(complq_budget));
+
+ /* Store the state of the complq to be used later in deciding if a
+ * TXQ can be started again
+ */
+ if (unlikely(IDPF_TX_COMPLQ_PENDING(complq->txq_grp) >
+ IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq)))
+ complq_ok = false;
+
+ np = netdev_priv(complq->vport->netdev);
+ for (i = 0; i < complq->txq_grp->num_txq; ++i) {
+ struct idpf_queue *tx_q = complq->txq_grp->txqs[i];
+ struct netdev_queue *nq;
+ bool dont_wake;
+
+ /* We didn't clean anything on this queue, move along */
+ if (!tx_q->cleaned_bytes)
+ continue;
+
+ *cleaned += tx_q->cleaned_pkts;
+
+ /* Update BQL */
+ nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+
+ dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) ||
+ np->state != __IDPF_VPORT_UP ||
+ !netif_carrier_ok(tx_q->vport->netdev);
+ /* Check if the TXQ needs to and can be restarted */
+ __netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
+ IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
+ dont_wake);
+
+ /* Reset cleaned stats for the next time this queue is
+ * cleaned
+ */
+ tx_q->cleaned_bytes = 0;
+ tx_q->cleaned_pkts = 0;
+ }
+
+ ntc += complq->desc_count;
+ complq->next_to_clean = ntc;
+
+ return !!complq_budget;
+}
+
+/**
+ * idpf_tx_splitq_build_ctb - populate command tag and size for queue
+ * based scheduling descriptors
+ * @desc: descriptor to populate
+ * @params: pointer to tx params struct
+ * @td_cmd: command to be filled in desc
+ * @size: size of buffer
+ */
+void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
+ struct idpf_tx_splitq_params *params,
+ u16 td_cmd, u16 size)
+{
+ desc->q.qw1.cmd_dtype =
+ cpu_to_le16(params->dtype & IDPF_FLEX_TXD_QW1_DTYPE_M);
+ desc->q.qw1.cmd_dtype |=
+ cpu_to_le16((td_cmd << IDPF_FLEX_TXD_QW1_CMD_S) &
+ IDPF_FLEX_TXD_QW1_CMD_M);
+ desc->q.qw1.buf_size = cpu_to_le16((u16)size);
+ desc->q.qw1.l2tags.l2tag1 = cpu_to_le16(params->td_tag);
+}
+
+/**
+ * idpf_tx_splitq_build_flow_desc - populate command tag and size for flow
+ * scheduling descriptors
+ * @desc: descriptor to populate
+ * @params: pointer to tx params struct
+ * @td_cmd: command to be filled in desc
+ * @size: size of buffer
+ */
+void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
+ struct idpf_tx_splitq_params *params,
+ u16 td_cmd, u16 size)
+{
+ desc->flow.qw1.cmd_dtype = (u16)params->dtype | td_cmd;
+ desc->flow.qw1.rxr_bufsize = cpu_to_le16((u16)size);
+ desc->flow.qw1.compl_tag = cpu_to_le16(params->compl_tag);
+}
+
+/**
+ * idpf_tx_maybe_stop_common - 1st level check for common Tx stop conditions
+ * @tx_q: the queue to be checked
+ * @size: number of descriptors we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ */
+int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size)
+{
+ struct netdev_queue *nq;
+
+ if (likely(IDPF_DESC_UNUSED(tx_q) >= size))
+ return 0;
+
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_inc(&tx_q->q_stats.tx.q_busy);
+ u64_stats_update_end(&tx_q->stats_sync);
+
+ nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+
+ return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size);
+}
+
+/**
+ * idpf_tx_maybe_stop_splitq - 1st level check for Tx splitq stop conditions
+ * @tx_q: the queue to be checked
+ * @descs_needed: number of descriptors required for this packet
+ *
+ * Returns 0 if stop is not needed
+ */
+static int idpf_tx_maybe_stop_splitq(struct idpf_queue *tx_q,
+ unsigned int descs_needed)
+{
+ if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
+ goto splitq_stop;
+
+ /* If there are too many outstanding completions expected on the
+ * completion queue, stop the TX queue to give the device some time to
+ * catch up
+ */
+ if (unlikely(IDPF_TX_COMPLQ_PENDING(tx_q->txq_grp) >
+ IDPF_TX_COMPLQ_OVERFLOW_THRESH(tx_q->txq_grp->complq)))
+ goto splitq_stop;
+
+ /* Also check for available book keeping buffers; if we are low, stop
+ * the queue to wait for more completions
+ */
+ if (unlikely(IDPF_TX_BUF_RSV_LOW(tx_q)))
+ goto splitq_stop;
+
+ return 0;
+
+splitq_stop:
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_inc(&tx_q->q_stats.tx.q_busy);
+ u64_stats_update_end(&tx_q->stats_sync);
+ netif_stop_subqueue(tx_q->vport->netdev, tx_q->idx);
+
+ return -EBUSY;
+}
+
+/**
+ * idpf_tx_buf_hw_update - Store the new tail value
+ * @tx_q: queue to bump
+ * @val: new tail index
+ * @xmit_more: more skb's pending
+ *
+ * The naming here is special in that 'hw' signals that this function is about
+ * to do a register write to update our queue status. We know this can only
+ * mean tail here as HW should be owning head for TX.
+ */
+void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
+ bool xmit_more)
+{
+ struct netdev_queue *nq;
+
+ nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+ tx_q->next_to_use = val;
+
+ idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED);
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ /* notify HW of packet */
+ if (netif_xmit_stopped(nq) || !xmit_more)
+ writel(val, tx_q->tail);
+}
+
+/**
+ * idpf_tx_desc_count_required - calculate number of Tx descriptors needed
+ * @txq: queue to send buffer on
+ * @skb: send buffer
+ *
+ * Returns number of data descriptors needed for this skb.
+ */
+unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
+ struct sk_buff *skb)
+{
+ const struct skb_shared_info *shinfo;
+ unsigned int count = 0, i;
+
+ count += !!skb_headlen(skb);
+
+ if (!skb_is_nonlinear(skb))
+ return count;
+
+ shinfo = skb_shinfo(skb);
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ unsigned int size;
+
+ size = skb_frag_size(&shinfo->frags[i]);
+
+ /* We only need to use the idpf_size_to_txd_count check if the
+ * fragment is going to span multiple descriptors,
+ * i.e. size >= 16K.
+ */
+ if (size >= SZ_16K)
+ count += idpf_size_to_txd_count(size);
+ else
+ count++;
+ }
+
+ if (idpf_chk_linearize(skb, txq->tx_max_bufs, count)) {
+ if (__skb_linearize(skb))
+ return 0;
+
+ count = idpf_size_to_txd_count(skb->len);
+ u64_stats_update_begin(&txq->stats_sync);
+ u64_stats_inc(&txq->q_stats.tx.linearize);
+ u64_stats_update_end(&txq->stats_sync);
+ }
+
+ return count;
+}
+
+/**
+ * idpf_tx_dma_map_error - handle TX DMA map errors
+ * @txq: queue to send buffer on
+ * @skb: send buffer
+ * @first: original first buffer info buffer for packet
+ * @idx: starting point on ring to unwind
+ */
+void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
+ struct idpf_tx_buf *first, u16 idx)
+{
+ u64_stats_update_begin(&txq->stats_sync);
+ u64_stats_inc(&txq->q_stats.tx.dma_map_errs);
+ u64_stats_update_end(&txq->stats_sync);
+
+ /* clear dma mappings for failed tx_buf map */
+ for (;;) {
+ struct idpf_tx_buf *tx_buf;
+
+ tx_buf = &txq->tx_buf[idx];
+ idpf_tx_buf_rel(txq, tx_buf);
+ if (tx_buf == first)
+ break;
+ if (idx == 0)
+ idx = txq->desc_count;
+ idx--;
+ }
+
+ if (skb_is_gso(skb)) {
+ union idpf_tx_flex_desc *tx_desc;
+
+ /* If we failed a DMA mapping for a TSO packet, we will have
+ * used one additional descriptor for a context
+ * descriptor. Reset that here.
+ */
+ tx_desc = IDPF_FLEX_TX_DESC(txq, idx);
+ memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc));
+ if (idx == 0)
+ idx = txq->desc_count;
+ idx--;
+ }
+
+ /* Update tail in case netdev_xmit_more was previously true */
+ idpf_tx_buf_hw_update(txq, idx, false);
+}
+
+/**
+ * idpf_tx_splitq_bump_ntu - adjust NTU and generation
+ * @txq: the tx ring to wrap
+ * @ntu: ring index to bump
+ */
+static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_queue *txq, u16 ntu)
+{
+ ntu++;
+
+ if (ntu == txq->desc_count) {
+ ntu = 0;
+ txq->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(txq);
+ }
+
+ return ntu;
+}
+
+/**
+ * idpf_tx_splitq_map - Build the Tx flex descriptor
+ * @tx_q: queue to send buffer on
+ * @params: pointer to splitq params struct
+ * @first: first buffer info buffer to use
+ *
+ * This function loops over the skb data pointed to by *first
+ * and gets a physical address for each memory location and programs
+ * it and the length into the transmit flex descriptor.
+ */
+static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
+ struct idpf_tx_splitq_params *params,
+ struct idpf_tx_buf *first)
+{
+ union idpf_tx_flex_desc *tx_desc;
+ unsigned int data_len, size;
+ struct idpf_tx_buf *tx_buf;
+ u16 i = tx_q->next_to_use;
+ struct netdev_queue *nq;
+ struct sk_buff *skb;
+ skb_frag_t *frag;
+ u16 td_cmd = 0;
+ dma_addr_t dma;
+
+ skb = first->skb;
+
+ td_cmd = params->offload.td_cmd;
+
+ data_len = skb->data_len;
+ size = skb_headlen(skb);
+
+ tx_desc = IDPF_FLEX_TX_DESC(tx_q, i);
+
+ dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
+
+ tx_buf = first;
+
+ params->compl_tag =
+ (tx_q->compl_tag_cur_gen << tx_q->compl_tag_gen_s) | i;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ unsigned int max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
+
+ if (dma_mapping_error(tx_q->dev, dma))
+ return idpf_tx_dma_map_error(tx_q, skb, first, i);
+
+ tx_buf->compl_tag = params->compl_tag;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buf, len, size);
+ dma_unmap_addr_set(tx_buf, dma, dma);
+
+ /* buf_addr is in same location for both desc types */
+ tx_desc->q.buf_addr = cpu_to_le64(dma);
+
+ /* The stack can send us fragments that are too large for a
+ * single descriptor i.e. frag size > 16K-1. We will need to
+ * split the fragment across multiple descriptors in this case.
+ * To adhere to HW alignment restrictions, the fragment needs
+ * to be split such that the first chunk ends on a 4K boundary
+ * and all subsequent chunks start on a 4K boundary. We still
+ * want to send as much data as possible though, so our
+ * intermediate descriptor chunk size will be 12K.
+ *
+ * For example, consider a 32K fragment mapped to DMA addr 2600.
+ * ------------------------------------------------------------
+ * | frag_size = 32K |
+ * ------------------------------------------------------------
+ * |2600 |16384 |28672
+ *
+ * 3 descriptors will be used for this fragment. The HW expects
+ * the descriptors to contain the following:
+ * ------------------------------------------------------------
+ * | size = 13784 | size = 12K | size = 6696 |
+ * | dma = 2600 | dma = 16384 | dma = 28672 |
+ * ------------------------------------------------------------
+ *
+ * We need to first adjust the max_data for the first chunk so
+ * that it ends on a 4K boundary. By negating the value of the
+ * DMA address and taking only the low order bits, we're
+ * effectively calculating
+ * 4K - (DMA addr lower order bits) =
+ * bytes to next boundary.
+ *
+ * Add that to our base aligned max_data (12K) and we have
+ * our first chunk size. In the example above,
+ * 13784 = 12K + (4096-2600)
+ *
+ * After guaranteeing the first chunk ends on a 4K boundary, we
+ * will give the intermediate descriptors 12K chunks and
+ * whatever is left to the final descriptor. This ensures that
+ * all descriptors used for the remaining chunks of the
+ * fragment start on a 4K boundary and we use as few
+ * descriptors as possible.
+ */
+ max_data += -dma & (IDPF_TX_MAX_READ_REQ_SIZE - 1);
+ while (unlikely(size > IDPF_TX_MAX_DESC_DATA)) {
+ idpf_tx_splitq_build_desc(tx_desc, params, td_cmd,
+ max_data);
+
+ tx_desc++;
+ i++;
+
+ if (i == tx_q->desc_count) {
+ tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
+ i = 0;
+ tx_q->compl_tag_cur_gen =
+ IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
+ }
+
+ /* Since this packet has a buffer that is going to span
+ * multiple descriptors, it's going to leave holes in
+ * to the TX buffer ring. To ensure these holes do not
+ * cause issues in the cleaning routines, we will clear
+ * them of any stale data and assign them the same
+ * completion tag as the current packet. Then when the
+ * packet is being cleaned, the cleaning routines will
+ * simply pass over these holes and finish cleaning the
+ * rest of the packet.
+ */
+ memset(&tx_q->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
+ tx_q->tx_buf[i].compl_tag = params->compl_tag;
+
+ /* Adjust the DMA offset and the remaining size of the
+ * fragment. On the first iteration of this loop,
+ * max_data will be >= 12K and <= 16K-1. On any
+ * subsequent iteration of this loop, max_data will
+ * always be 12K.
+ */
+ dma += max_data;
+ size -= max_data;
+
+ /* Reset max_data since remaining chunks will be 12K
+ * at most
+ */
+ max_data = IDPF_TX_MAX_DESC_DATA_ALIGNED;
+
+ /* buf_addr is in same location for both desc types */
+ tx_desc->q.buf_addr = cpu_to_le64(dma);
+ }
+
+ if (!data_len)
+ break;
+
+ idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
+ tx_desc++;
+ i++;
+
+ if (i == tx_q->desc_count) {
+ tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
+ i = 0;
+ tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
+ }
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+
+ dma = skb_frag_dma_map(tx_q->dev, frag, 0, size,
+ DMA_TO_DEVICE);
+
+ tx_buf = &tx_q->tx_buf[i];
+ }
+
+ /* record SW timestamp if HW timestamp is not available */
+ skb_tx_timestamp(skb);
+
+ /* write last descriptor with RS and EOP bits */
+ td_cmd |= params->eop_cmd;
+ idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
+ i = idpf_tx_splitq_bump_ntu(tx_q, i);
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ tx_q->txq_grp->num_completions_pending++;
+
+ /* record bytecount for BQL */
+ nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+ netdev_tx_sent_queue(nq, first->bytecount);
+
+ idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
+}
+
+/**
+ * idpf_tso - computes mss and TSO length to prepare for TSO
+ * @skb: pointer to skb
+ * @off: pointer to struct that holds offload parameters
+ *
+ * Returns error (negative) if TSO was requested but cannot be applied to the
+ * given skb, 0 if TSO does not apply to the given skb, or 1 otherwise.
+ */
+int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
+{
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_start;
+ int err;
+
+ if (!shinfo->gso_size)
+ return 0;
+
+ err = skb_cow_head(skb, 0);
+ if (err < 0)
+ return err;
+
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ ip.v4->tot_len = 0;
+ ip.v4->check = 0;
+ } else if (ip.v6->version == 6) {
+ ip.v6->payload_len = 0;
+ }
+
+ l4_start = skb_transport_offset(skb);
+
+ /* remove payload length from checksum */
+ paylen = skb->len - l4_start;
+
+ switch (shinfo->gso_type & ~SKB_GSO_DODGY) {
+ case SKB_GSO_TCPV4:
+ case SKB_GSO_TCPV6:
+ csum_replace_by_diff(&l4.tcp->check,
+ (__force __wsum)htonl(paylen));
+ off->tso_hdr_len = __tcp_hdrlen(l4.tcp) + l4_start;
+ break;
+ case SKB_GSO_UDP_L4:
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(paylen));
+ /* compute length of segmentation header */
+ off->tso_hdr_len = sizeof(struct udphdr) + l4_start;
+ l4.udp->len = htons(shinfo->gso_size + sizeof(struct udphdr));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ off->tso_len = skb->len - off->tso_hdr_len;
+ off->mss = shinfo->gso_size;
+ off->tso_segs = shinfo->gso_segs;
+
+ off->tx_flags |= IDPF_TX_FLAGS_TSO;
+
+ return 1;
+}
+
+/**
+ * __idpf_chk_linearize - Check skb is not using too many buffers
+ * @skb: send buffer
+ * @max_bufs: maximum number of buffers
+ *
+ * For TSO we need to count the TSO header and segment payload separately. As
+ * such we need to check cases where we have max_bufs-1 fragments or more as we
+ * can potentially require max_bufs+1 DMA transactions, 1 for the TSO header, 1
+ * for the segment payload in the first descriptor, and another max_buf-1 for
+ * the fragments.
+ */
+static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs)
+{
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+ const skb_frag_t *frag, *stale;
+ int nr_frags, sum;
+
+ /* no need to check if number of frags is less than max_bufs - 1 */
+ nr_frags = shinfo->nr_frags;
+ if (nr_frags < (max_bufs - 1))
+ return false;
+
+ /* We need to walk through the list and validate that each group
+ * of max_bufs-2 fragments totals at least gso_size.
+ */
+ nr_frags -= max_bufs - 2;
+ frag = &shinfo->frags[0];
+
+ /* Initialize size to the negative value of gso_size minus 1. We use
+ * this as the worst case scenario in which the frag ahead of us only
+ * provides one byte which is why we are limited to max_bufs-2
+ * descriptors for a single transmit as the header and previous
+ * fragment are already consuming 2 descriptors.
+ */
+ sum = 1 - shinfo->gso_size;
+
+ /* Add size of frags 0 through 4 to create our initial sum */
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+ sum += skb_frag_size(frag++);
+
+ /* Walk through fragments adding latest fragment, testing it, and
+ * then removing stale fragments from the sum.
+ */
+ for (stale = &shinfo->frags[0];; stale++) {
+ int stale_size = skb_frag_size(stale);
+
+ sum += skb_frag_size(frag++);
+
+ /* The stale fragment may present us with a smaller
+ * descriptor than the actual fragment size. To account
+ * for that we need to remove all the data on the front and
+ * figure out what the remainder would be in the last
+ * descriptor associated with the fragment.
+ */
+ if (stale_size > IDPF_TX_MAX_DESC_DATA) {
+ int align_pad = -(skb_frag_off(stale)) &
+ (IDPF_TX_MAX_READ_REQ_SIZE - 1);
+
+ sum -= align_pad;
+ stale_size -= align_pad;
+
+ do {
+ sum -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
+ stale_size -= IDPF_TX_MAX_DESC_DATA_ALIGNED;
+ } while (stale_size > IDPF_TX_MAX_DESC_DATA);
+ }
+
+ /* if sum is negative we failed to make sufficient progress */
+ if (sum < 0)
+ return true;
+
+ if (!nr_frags--)
+ break;
+
+ sum -= stale_size;
+ }
+
+ return false;
+}
+
+/**
+ * idpf_chk_linearize - Check if skb exceeds max descriptors per packet
+ * @skb: send buffer
+ * @max_bufs: maximum scatter gather buffers for single packet
+ * @count: number of buffers this packet needs
+ *
+ * Make sure we don't exceed maximum scatter gather buffers for a single
+ * packet. We have to do some special checking around the boundary (max_bufs-1)
+ * if TSO is on since we need count the TSO header and payload separately.
+ * E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO
+ * header, 1 for segment payload, and then 7 for the fragments.
+ */
+bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
+ unsigned int count)
+{
+ if (likely(count < max_bufs))
+ return false;
+ if (skb_is_gso(skb))
+ return __idpf_chk_linearize(skb, max_bufs);
+
+ return count > max_bufs;
+}
+
+/**
+ * idpf_tx_splitq_get_ctx_desc - grab next desc and update buffer ring
+ * @txq: queue to put context descriptor on
+ *
+ * Since the TX buffer rings mimics the descriptor ring, update the tx buffer
+ * ring entry to reflect that this index is a context descriptor
+ */
+static struct idpf_flex_tx_ctx_desc *
+idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq)
+{
+ struct idpf_flex_tx_ctx_desc *desc;
+ int i = txq->next_to_use;
+
+ memset(&txq->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
+ txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+
+ /* grab the next descriptor */
+ desc = IDPF_FLEX_TX_CTX_DESC(txq, i);
+ txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
+
+ return desc;
+}
+
+/**
+ * idpf_tx_drop_skb - free the SKB and bump tail if necessary
+ * @tx_q: queue to send buffer on
+ * @skb: pointer to skb
+ */
+netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb)
+{
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_inc(&tx_q->q_stats.tx.skb_drops);
+ u64_stats_update_end(&tx_q->stats_sync);
+
+ idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
+
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * idpf_tx_splitq_frame - Sends buffer on Tx ring using flex descriptors
+ * @skb: send buffer
+ * @tx_q: queue to send buffer on
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ */
+static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
+ struct idpf_queue *tx_q)
+{
+ struct idpf_tx_splitq_params tx_params = { };
+ struct idpf_tx_buf *first;
+ unsigned int count;
+ int tso;
+
+ count = idpf_tx_desc_count_required(tx_q, skb);
+ if (unlikely(!count))
+ return idpf_tx_drop_skb(tx_q, skb);
+
+ tso = idpf_tso(skb, &tx_params.offload);
+ if (unlikely(tso < 0))
+ return idpf_tx_drop_skb(tx_q, skb);
+
+ /* Check for splitq specific TX resources */
+ count += (IDPF_TX_DESCS_PER_CACHE_LINE + tso);
+ if (idpf_tx_maybe_stop_splitq(tx_q, count)) {
+ idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ if (tso) {
+ /* If tso is needed, set up context desc */
+ struct idpf_flex_tx_ctx_desc *ctx_desc =
+ idpf_tx_splitq_get_ctx_desc(tx_q);
+
+ ctx_desc->tso.qw1.cmd_dtype =
+ cpu_to_le16(IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
+ IDPF_TX_FLEX_CTX_DESC_CMD_TSO);
+ ctx_desc->tso.qw0.flex_tlen =
+ cpu_to_le32(tx_params.offload.tso_len &
+ IDPF_TXD_FLEX_CTX_TLEN_M);
+ ctx_desc->tso.qw0.mss_rt =
+ cpu_to_le16(tx_params.offload.mss &
+ IDPF_TXD_FLEX_CTX_MSS_RT_M);
+ ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len;
+
+ u64_stats_update_begin(&tx_q->stats_sync);
+ u64_stats_inc(&tx_q->q_stats.tx.lso_pkts);
+ u64_stats_update_end(&tx_q->stats_sync);
+ }
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_q->tx_buf[tx_q->next_to_use];
+ first->skb = skb;
+
+ if (tso) {
+ first->gso_segs = tx_params.offload.tso_segs;
+ first->bytecount = skb->len +
+ ((first->gso_segs - 1) * tx_params.offload.tso_hdr_len);
+ } else {
+ first->gso_segs = 1;
+ first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
+ }
+
+ if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_q->flags)) {
+ tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
+ tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
+ /* Set the RE bit to catch any packets that may have not been
+ * stashed during RS completion cleaning. MIN_GAP is set to
+ * MIN_RING size to ensure it will be set at least once each
+ * time around the ring.
+ */
+ if (!(tx_q->next_to_use % IDPF_TX_SPLITQ_RE_MIN_GAP)) {
+ tx_params.eop_cmd |= IDPF_TXD_FLEX_FLOW_CMD_RE;
+ tx_q->txq_grp->num_completions_pending++;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ tx_params.offload.td_cmd |= IDPF_TXD_FLEX_FLOW_CMD_CS_EN;
+
+ } else {
+ tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2;
+ tx_params.eop_cmd = IDPF_TXD_LAST_DESC_CMD;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ tx_params.offload.td_cmd |= IDPF_TX_FLEX_DESC_CMD_CS_EN;
+ }
+
+ idpf_tx_splitq_map(tx_q, &tx_params, first);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * idpf_tx_splitq_start - Selects the right Tx queue to send buffer
+ * @skb: send buffer
+ * @netdev: network interface device structure
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ */
+netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
+ struct idpf_queue *tx_q;
+
+ if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) {
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+ }
+
+ tx_q = vport->txqs[skb_get_queue_mapping(skb)];
+
+ /* hardware can't handle really short frames, hardware padding works
+ * beyond this point
+ */
+ if (skb_put_padto(skb, tx_q->tx_min_pkt_len)) {
+ idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
+
+ return NETDEV_TX_OK;
+ }
+
+ return idpf_tx_splitq_frame(skb, tx_q);
+}
+
+/**
+ * idpf_ptype_to_htype - get a hash type
+ * @decoded: Decoded Rx packet type related fields
+ *
+ * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by
+ * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of
+ * Rx desc.
+ */
+enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded)
+{
+ if (!decoded->known)
+ return PKT_HASH_TYPE_NONE;
+ if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 &&
+ decoded->inner_prot)
+ return PKT_HASH_TYPE_L4;
+ if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 &&
+ decoded->outer_ip)
+ return PKT_HASH_TYPE_L3;
+ if (decoded->outer_ip == IDPF_RX_PTYPE_OUTER_L2)
+ return PKT_HASH_TYPE_L2;
+
+ return PKT_HASH_TYPE_NONE;
+}
+
+/**
+ * idpf_rx_hash - set the hash value in the skb
+ * @rxq: Rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being populated
+ * @rx_desc: Receive descriptor
+ * @decoded: Decoded Rx packet type related fields
+ */
+static void idpf_rx_hash(struct idpf_queue *rxq, struct sk_buff *skb,
+ struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
+ struct idpf_rx_ptype_decoded *decoded)
+{
+ u32 hash;
+
+ if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXHASH)))
+ return;
+
+ hash = le16_to_cpu(rx_desc->hash1) |
+ (rx_desc->ff2_mirrid_hash2.hash2 << 16) |
+ (rx_desc->hash3 << 24);
+
+ skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded));
+}
+
+/**
+ * idpf_rx_csum - Indicate in skb if checksum is good
+ * @rxq: Rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being populated
+ * @csum_bits: checksum fields extracted from the descriptor
+ * @decoded: Decoded Rx packet type related fields
+ *
+ * skb->protocol must be set before this function is called
+ */
+static void idpf_rx_csum(struct idpf_queue *rxq, struct sk_buff *skb,
+ struct idpf_rx_csum_decoded *csum_bits,
+ struct idpf_rx_ptype_decoded *decoded)
+{
+ bool ipv4, ipv6;
+
+ /* check if Rx checksum is enabled */
+ if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXCSUM)))
+ return;
+
+ /* check if HW has decoded the packet and checksum */
+ if (!(csum_bits->l3l4p))
+ return;
+
+ ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4);
+ ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6);
+
+ if (ipv4 && (csum_bits->ipe || csum_bits->eipe))
+ goto checksum_fail;
+
+ if (ipv6 && csum_bits->ipv6exadd)
+ return;
+
+ /* check for L4 errors and handle packets that were not able to be
+ * checksummed
+ */
+ if (csum_bits->l4e)
+ goto checksum_fail;
+
+ /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */
+ switch (decoded->inner_prot) {
+ case IDPF_RX_PTYPE_INNER_PROT_ICMP:
+ case IDPF_RX_PTYPE_INNER_PROT_TCP:
+ case IDPF_RX_PTYPE_INNER_PROT_UDP:
+ if (!csum_bits->raw_csum_inv) {
+ u16 csum = csum_bits->raw_csum;
+
+ skb->csum = csum_unfold((__force __sum16)~swab16(csum));
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ } else {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ break;
+ case IDPF_RX_PTYPE_INNER_PROT_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ default:
+ break;
+ }
+
+ return;
+
+checksum_fail:
+ u64_stats_update_begin(&rxq->stats_sync);
+ u64_stats_inc(&rxq->q_stats.rx.hw_csum_err);
+ u64_stats_update_end(&rxq->stats_sync);
+}
+
+/**
+ * idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor
+ * @rx_desc: receive descriptor
+ * @csum: structure to extract checksum fields
+ *
+ **/
+static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
+ struct idpf_rx_csum_decoded *csum)
+{
+ u8 qword0, qword1;
+
+ qword0 = rx_desc->status_err0_qw0;
+ qword1 = rx_desc->status_err0_qw1;
+
+ csum->ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M,
+ qword1);
+ csum->eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M,
+ qword1);
+ csum->l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M,
+ qword1);
+ csum->l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M,
+ qword1);
+ csum->ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
+ qword0);
+ csum->raw_csum_inv = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M,
+ le16_to_cpu(rx_desc->ptype_err_fflags0));
+ csum->raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
+}
+
+/**
+ * idpf_rx_rsc - Set the RSC fields in the skb
+ * @rxq : Rx descriptor ring packet is being transacted on
+ * @skb : pointer to current skb being populated
+ * @rx_desc: Receive descriptor
+ * @decoded: Decoded Rx packet type related fields
+ *
+ * Return 0 on success and error code on failure
+ *
+ * Populate the skb fields with the total number of RSC segments, RSC payload
+ * length and packet type.
+ */
+static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb,
+ struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
+ struct idpf_rx_ptype_decoded *decoded)
+{
+ u16 rsc_segments, rsc_seg_len;
+ bool ipv4, ipv6;
+ int len;
+
+ if (unlikely(!decoded->outer_ip))
+ return -EINVAL;
+
+ rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen);
+ if (unlikely(!rsc_seg_len))
+ return -EINVAL;
+
+ ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4);
+ ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6);
+
+ if (unlikely(!(ipv4 ^ ipv6)))
+ return -EINVAL;
+
+ rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len);
+ if (unlikely(rsc_segments == 1))
+ return 0;
+
+ NAPI_GRO_CB(skb)->count = rsc_segments;
+ skb_shinfo(skb)->gso_size = rsc_seg_len;
+
+ skb_reset_network_header(skb);
+ len = skb->len - skb_transport_offset(skb);
+
+ if (ipv4) {
+ struct iphdr *ipv4h = ip_hdr(skb);
+
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+
+ /* Reset and set transport header offset in skb */
+ skb_set_transport_header(skb, sizeof(struct iphdr));
+
+ /* Compute the TCP pseudo header checksum*/
+ tcp_hdr(skb)->check =
+ ~tcp_v4_check(len, ipv4h->saddr, ipv4h->daddr, 0);
+ } else {
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+ tcp_hdr(skb)->check =
+ ~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0);
+ }
+
+ tcp_gro_complete(skb);
+
+ u64_stats_update_begin(&rxq->stats_sync);
+ u64_stats_inc(&rxq->q_stats.rx.rsc_pkts);
+ u64_stats_update_end(&rxq->stats_sync);
+
+ return 0;
+}
+
+/**
+ * idpf_rx_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rxq: Rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being populated
+ * @rx_desc: Receive descriptor
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, protocol, and
+ * other fields within the skb.
+ */
+static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
+ struct sk_buff *skb,
+ struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
+{
+ struct idpf_rx_csum_decoded csum_bits = { };
+ struct idpf_rx_ptype_decoded decoded;
+ u16 rx_ptype;
+
+ rx_ptype = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M,
+ le16_to_cpu(rx_desc->ptype_err_fflags0));
+
+ decoded = rxq->vport->rx_ptype_lkup[rx_ptype];
+ /* If we don't know the ptype we can't do anything else with it. Just
+ * pass it up the stack as-is.
+ */
+ if (!decoded.known)
+ return 0;
+
+ /* process RSS/hash */
+ idpf_rx_hash(rxq, skb, rx_desc, &decoded);
+
+ skb->protocol = eth_type_trans(skb, rxq->vport->netdev);
+
+ if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M,
+ le16_to_cpu(rx_desc->hdrlen_flags)))
+ return idpf_rx_rsc(rxq, skb, rx_desc, &decoded);
+
+ idpf_rx_splitq_extract_csum_bits(rx_desc, &csum_bits);
+ idpf_rx_csum(rxq, skb, &csum_bits, &decoded);
+
+ return 0;
+}
+
+/**
+ * idpf_rx_add_frag - Add contents of Rx buffer to sk_buff as a frag
+ * @rx_buf: buffer containing page to add
+ * @skb: sk_buff to place the data into
+ * @size: packet length from rx_desc
+ *
+ * This function will add the data contained in rx_buf->page to the skb.
+ * It will just attach the page as a frag to the skb.
+ * The function will then update the page offset.
+ */
+void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
+ unsigned int size)
+{
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
+ rx_buf->page_offset, size, rx_buf->truesize);
+
+ rx_buf->page = NULL;
+}
+
+/**
+ * idpf_rx_construct_skb - Allocate skb and populate it
+ * @rxq: Rx descriptor queue
+ * @rx_buf: Rx buffer to pull data from
+ * @size: the length of the packet
+ *
+ * This function allocates an skb. It then populates it with the page
+ * data from the current receive descriptor, taking care to set up the
+ * skb correctly.
+ */
+struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq,
+ struct idpf_rx_buf *rx_buf,
+ unsigned int size)
+{
+ unsigned int headlen;
+ struct sk_buff *skb;
+ void *va;
+
+ va = page_address(rx_buf->page) + rx_buf->page_offset;
+
+ /* prefetch first cache line of first page */
+ net_prefetch(va);
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rxq->q_vector->napi, IDPF_RX_HDR_SIZE,
+ GFP_ATOMIC);
+ if (unlikely(!skb)) {
+ idpf_rx_put_page(rx_buf);
+
+ return NULL;
+ }
+
+ skb_record_rx_queue(skb, rxq->idx);
+ skb_mark_for_recycle(skb);
+
+ /* Determine available headroom for copy */
+ headlen = size;
+ if (headlen > IDPF_RX_HDR_SIZE)
+ headlen = eth_get_headlen(skb->dev, va, IDPF_RX_HDR_SIZE);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+
+ /* if we exhaust the linear part then add what is left as a frag */
+ size -= headlen;
+ if (!size) {
+ idpf_rx_put_page(rx_buf);
+
+ return skb;
+ }
+
+ skb_add_rx_frag(skb, 0, rx_buf->page, rx_buf->page_offset + headlen,
+ size, rx_buf->truesize);
+
+ /* Since we're giving the page to the stack, clear our reference to it.
+ * We'll get a new one during buffer posting.
+ */
+ rx_buf->page = NULL;
+
+ return skb;
+}
+
+/**
+ * idpf_rx_hdr_construct_skb - Allocate skb and populate it from header buffer
+ * @rxq: Rx descriptor queue
+ * @va: Rx buffer to pull data from
+ * @size: the length of the packet
+ *
+ * This function allocates an skb. It then populates it with the page data from
+ * the current receive descriptor, taking care to set up the skb correctly.
+ * This specifically uses a header buffer to start building the skb.
+ */
+static struct sk_buff *idpf_rx_hdr_construct_skb(struct idpf_queue *rxq,
+ const void *va,
+ unsigned int size)
+{
+ struct sk_buff *skb;
+
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rxq->q_vector->napi, size, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_record_rx_queue(skb, rxq->idx);
+
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+
+ /* More than likely, a payload fragment, which will use a page from
+ * page_pool will be added to the SKB so mark it for recycle
+ * preemptively. And if not, it's inconsequential.
+ */
+ skb_mark_for_recycle(skb);
+
+ return skb;
+}
+
+/**
+ * idpf_rx_splitq_test_staterr - tests bits in Rx descriptor
+ * status and error fields
+ * @stat_err_field: field from descriptor to test bits in
+ * @stat_err_bits: value to mask
+ *
+ */
+static bool idpf_rx_splitq_test_staterr(const u8 stat_err_field,
+ const u8 stat_err_bits)
+{
+ return !!(stat_err_field & stat_err_bits);
+}
+
+/**
+ * idpf_rx_splitq_is_eop - process handling of EOP buffers
+ * @rx_desc: Rx descriptor for current buffer
+ *
+ * If the buffer is an EOP buffer, this function exits returning true,
+ * otherwise return false indicating that this is in fact a non-EOP buffer.
+ */
+static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
+{
+ /* if we are the last buffer then there is nothing else to do */
+ return likely(idpf_rx_splitq_test_staterr(rx_desc->status_err0_qw1,
+ IDPF_RXD_EOF_SPLITQ));
+}
+
+/**
+ * idpf_rx_splitq_clean - Clean completed descriptors from Rx queue
+ * @rxq: Rx descriptor queue to retrieve receive buffer queue
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing. The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed
+ */
+static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
+{
+ int total_rx_bytes = 0, total_rx_pkts = 0;
+ struct idpf_queue *rx_bufq = NULL;
+ struct sk_buff *skb = rxq->skb;
+ u16 ntc = rxq->next_to_clean;
+
+ /* Process Rx packets bounded by budget */
+ while (likely(total_rx_pkts < budget)) {
+ struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
+ struct idpf_sw_queue *refillq = NULL;
+ struct idpf_rxq_set *rxq_set = NULL;
+ struct idpf_rx_buf *rx_buf = NULL;
+ union virtchnl2_rx_desc *desc;
+ unsigned int pkt_len = 0;
+ unsigned int hdr_len = 0;
+ u16 gen_id, buf_id = 0;
+ /* Header buffer overflow only valid for header split */
+ bool hbo = false;
+ int bufq_id;
+ u8 rxdid;
+
+ /* get the Rx desc from Rx queue based on 'next_to_clean' */
+ desc = IDPF_RX_DESC(rxq, ntc);
+ rx_desc = (struct virtchnl2_rx_flex_desc_adv_nic_3 *)desc;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc
+ */
+ dma_rmb();
+
+ /* if the descriptor isn't done, no work yet to do */
+ gen_id = le16_to_cpu(rx_desc->pktlen_gen_bufq_id);
+ gen_id = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M, gen_id);
+
+ if (test_bit(__IDPF_Q_GEN_CHK, rxq->flags) != gen_id)
+ break;
+
+ rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M,
+ rx_desc->rxdid_ucast);
+ if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) {
+ IDPF_RX_BUMP_NTC(rxq, ntc);
+ u64_stats_update_begin(&rxq->stats_sync);
+ u64_stats_inc(&rxq->q_stats.rx.bad_descs);
+ u64_stats_update_end(&rxq->stats_sync);
+ continue;
+ }
+
+ pkt_len = le16_to_cpu(rx_desc->pktlen_gen_bufq_id);
+ pkt_len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M,
+ pkt_len);
+
+ hbo = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M,
+ rx_desc->status_err0_qw1);
+
+ if (unlikely(hbo)) {
+ /* If a header buffer overflow, occurs, i.e. header is
+ * too large to fit in the header split buffer, HW will
+ * put the entire packet, including headers, in the
+ * data/payload buffer.
+ */
+ u64_stats_update_begin(&rxq->stats_sync);
+ u64_stats_inc(&rxq->q_stats.rx.hsplit_buf_ovf);
+ u64_stats_update_end(&rxq->stats_sync);
+ goto bypass_hsplit;
+ }
+
+ hdr_len = le16_to_cpu(rx_desc->hdrlen_flags);
+ hdr_len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M,
+ hdr_len);
+
+bypass_hsplit:
+ bufq_id = le16_to_cpu(rx_desc->pktlen_gen_bufq_id);
+ bufq_id = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M,
+ bufq_id);
+
+ rxq_set = container_of(rxq, struct idpf_rxq_set, rxq);
+ if (!bufq_id)
+ refillq = rxq_set->refillq0;
+ else
+ refillq = rxq_set->refillq1;
+
+ /* retrieve buffer from the rxq */
+ rx_bufq = &rxq->rxq_grp->splitq.bufq_sets[bufq_id].bufq;
+
+ buf_id = le16_to_cpu(rx_desc->buf_id);
+
+ rx_buf = &rx_bufq->rx_buf.buf[buf_id];
+
+ if (hdr_len) {
+ const void *va = (u8 *)rx_bufq->rx_buf.hdr_buf_va +
+ (u32)buf_id * IDPF_HDR_BUF_SIZE;
+
+ skb = idpf_rx_hdr_construct_skb(rxq, va, hdr_len);
+ u64_stats_update_begin(&rxq->stats_sync);
+ u64_stats_inc(&rxq->q_stats.rx.hsplit_pkts);
+ u64_stats_update_end(&rxq->stats_sync);
+ }
+
+ if (pkt_len) {
+ idpf_rx_sync_for_cpu(rx_buf, pkt_len);
+ if (skb)
+ idpf_rx_add_frag(rx_buf, skb, pkt_len);
+ else
+ skb = idpf_rx_construct_skb(rxq, rx_buf,
+ pkt_len);
+ } else {
+ idpf_rx_put_page(rx_buf);
+ }
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb)
+ break;
+
+ idpf_rx_post_buf_refill(refillq, buf_id);
+
+ IDPF_RX_BUMP_NTC(rxq, ntc);
+ /* skip if it is non EOP desc */
+ if (!idpf_rx_splitq_is_eop(rx_desc))
+ continue;
+
+ /* pad skb if needed (to make valid ethernet frame) */
+ if (eth_skb_pad(skb)) {
+ skb = NULL;
+ continue;
+ }
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+
+ /* protocol */
+ if (unlikely(idpf_rx_process_skb_fields(rxq, skb, rx_desc))) {
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ continue;
+ }
+
+ /* send completed skb up the stack */
+ napi_gro_receive(&rxq->q_vector->napi, skb);
+ skb = NULL;
+
+ /* update budget accounting */
+ total_rx_pkts++;
+ }
+
+ rxq->next_to_clean = ntc;
+
+ rxq->skb = skb;
+ u64_stats_update_begin(&rxq->stats_sync);
+ u64_stats_add(&rxq->q_stats.rx.packets, total_rx_pkts);
+ u64_stats_add(&rxq->q_stats.rx.bytes, total_rx_bytes);
+ u64_stats_update_end(&rxq->stats_sync);
+
+ /* guarantee a trip back through this routine if there was a failure */
+ return total_rx_pkts;
+}
+
+/**
+ * idpf_rx_update_bufq_desc - Update buffer queue descriptor
+ * @bufq: Pointer to the buffer queue
+ * @refill_desc: SW Refill queue descriptor containing buffer ID
+ * @buf_desc: Buffer queue descriptor
+ *
+ * Return 0 on success and negative on failure.
+ */
+static int idpf_rx_update_bufq_desc(struct idpf_queue *bufq, u16 refill_desc,
+ struct virtchnl2_splitq_rx_buf_desc *buf_desc)
+{
+ struct idpf_rx_buf *buf;
+ dma_addr_t addr;
+ u16 buf_id;
+
+ buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc);
+
+ buf = &bufq->rx_buf.buf[buf_id];
+
+ addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size);
+ if (unlikely(addr == DMA_MAPPING_ERROR))
+ return -ENOMEM;
+
+ buf_desc->pkt_addr = cpu_to_le64(addr);
+ buf_desc->qword0.buf_id = cpu_to_le16(buf_id);
+
+ if (!bufq->rx_hsplit_en)
+ return 0;
+
+ buf_desc->hdr_addr = cpu_to_le64(bufq->rx_buf.hdr_buf_pa +
+ (u32)buf_id * IDPF_HDR_BUF_SIZE);
+
+ return 0;
+}
+
+/**
+ * idpf_rx_clean_refillq - Clean refill queue buffers
+ * @bufq: buffer queue to post buffers back to
+ * @refillq: refill queue to clean
+ *
+ * This function takes care of the buffer refill management
+ */
+static void idpf_rx_clean_refillq(struct idpf_queue *bufq,
+ struct idpf_sw_queue *refillq)
+{
+ struct virtchnl2_splitq_rx_buf_desc *buf_desc;
+ u16 bufq_nta = bufq->next_to_alloc;
+ u16 ntc = refillq->next_to_clean;
+ int cleaned = 0;
+ u16 gen;
+
+ buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, bufq_nta);
+
+ /* make sure we stop at ring wrap in the unlikely case ring is full */
+ while (likely(cleaned < refillq->desc_count)) {
+ u16 refill_desc = IDPF_SPLITQ_RX_BI_DESC(refillq, ntc);
+ bool failure;
+
+ gen = FIELD_GET(IDPF_RX_BI_GEN_M, refill_desc);
+ if (test_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags) != gen)
+ break;
+
+ failure = idpf_rx_update_bufq_desc(bufq, refill_desc,
+ buf_desc);
+ if (failure)
+ break;
+
+ if (unlikely(++ntc == refillq->desc_count)) {
+ change_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags);
+ ntc = 0;
+ }
+
+ if (unlikely(++bufq_nta == bufq->desc_count)) {
+ buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, 0);
+ bufq_nta = 0;
+ } else {
+ buf_desc++;
+ }
+
+ cleaned++;
+ }
+
+ if (!cleaned)
+ return;
+
+ /* We want to limit how many transactions on the bus we trigger with
+ * tail writes so we only do it in strides. It's also important we
+ * align the write to a multiple of 8 as required by HW.
+ */
+ if (((bufq->next_to_use <= bufq_nta ? 0 : bufq->desc_count) +
+ bufq_nta - bufq->next_to_use) >= IDPF_RX_BUF_POST_STRIDE)
+ idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq_nta,
+ IDPF_RX_BUF_POST_STRIDE));
+
+ /* update next to alloc since we have filled the ring */
+ refillq->next_to_clean = ntc;
+ bufq->next_to_alloc = bufq_nta;
+}
+
+/**
+ * idpf_rx_clean_refillq_all - Clean all refill queues
+ * @bufq: buffer queue with refill queues
+ *
+ * Iterates through all refill queues assigned to the buffer queue assigned to
+ * this vector. Returns true if clean is complete within budget, false
+ * otherwise.
+ */
+static void idpf_rx_clean_refillq_all(struct idpf_queue *bufq)
+{
+ struct idpf_bufq_set *bufq_set;
+ int i;
+
+ bufq_set = container_of(bufq, struct idpf_bufq_set, bufq);
+ for (i = 0; i < bufq_set->num_refillqs; i++)
+ idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]);
+}
+
+/**
+ * idpf_vport_intr_clean_queues - MSIX mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ *
+ */
+static irqreturn_t idpf_vport_intr_clean_queues(int __always_unused irq,
+ void *data)
+{
+ struct idpf_q_vector *q_vector = (struct idpf_q_vector *)data;
+
+ q_vector->total_events++;
+ napi_schedule(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * idpf_vport_intr_napi_del_all - Unregister napi for all q_vectors in vport
+ * @vport: virtual port structure
+ *
+ */
+static void idpf_vport_intr_napi_del_all(struct idpf_vport *vport)
+{
+ u16 v_idx;
+
+ for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
+ netif_napi_del(&vport->q_vectors[v_idx].napi);
+}
+
+/**
+ * idpf_vport_intr_napi_dis_all - Disable NAPI for all q_vectors in the vport
+ * @vport: main vport structure
+ */
+static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport)
+{
+ int v_idx;
+
+ for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++)
+ napi_disable(&vport->q_vectors[v_idx].napi);
+}
+
+/**
+ * idpf_vport_intr_rel - Free memory allocated for interrupt vectors
+ * @vport: virtual port
+ *
+ * Free the memory allocated for interrupt vectors associated to a vport
+ */
+void idpf_vport_intr_rel(struct idpf_vport *vport)
+{
+ int i, j, v_idx;
+
+ for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
+ struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
+
+ kfree(q_vector->bufq);
+ q_vector->bufq = NULL;
+ kfree(q_vector->tx);
+ q_vector->tx = NULL;
+ kfree(q_vector->rx);
+ q_vector->rx = NULL;
+ }
+
+ /* Clean up the mapping of queues to vectors */
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ for (j = 0; j < rx_qgrp->splitq.num_rxq_sets; j++)
+ rx_qgrp->splitq.rxq_sets[j]->rxq.q_vector = NULL;
+ else
+ for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
+ rx_qgrp->singleq.rxqs[j]->q_vector = NULL;
+ }
+
+ if (idpf_is_queue_model_split(vport->txq_model))
+ for (i = 0; i < vport->num_txq_grp; i++)
+ vport->txq_grps[i].complq->q_vector = NULL;
+ else
+ for (i = 0; i < vport->num_txq_grp; i++)
+ for (j = 0; j < vport->txq_grps[i].num_txq; j++)
+ vport->txq_grps[i].txqs[j]->q_vector = NULL;
+
+ kfree(vport->q_vectors);
+ vport->q_vectors = NULL;
+}
+
+/**
+ * idpf_vport_intr_rel_irq - Free the IRQ association with the OS
+ * @vport: main vport structure
+ */
+static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ int vector;
+
+ for (vector = 0; vector < vport->num_q_vectors; vector++) {
+ struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
+ int irq_num, vidx;
+
+ /* free only the irqs that were actually requested */
+ if (!q_vector)
+ continue;
+
+ vidx = vport->q_vector_idxs[vector];
+ irq_num = adapter->msix_entries[vidx].vector;
+
+ /* clear the affinity_mask in the IRQ descriptor */
+ irq_set_affinity_hint(irq_num, NULL);
+ free_irq(irq_num, q_vector);
+ }
+}
+
+/**
+ * idpf_vport_intr_dis_irq_all - Disable all interrupt
+ * @vport: main vport structure
+ */
+static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport)
+{
+ struct idpf_q_vector *q_vector = vport->q_vectors;
+ int q_idx;
+
+ for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++)
+ writel(0, q_vector[q_idx].intr_reg.dyn_ctl);
+}
+
+/**
+ * idpf_vport_intr_buildreg_itr - Enable default interrupt generation settings
+ * @q_vector: pointer to q_vector
+ * @type: itr index
+ * @itr: itr value
+ */
+static u32 idpf_vport_intr_buildreg_itr(struct idpf_q_vector *q_vector,
+ const int type, u16 itr)
+{
+ u32 itr_val;
+
+ itr &= IDPF_ITR_MASK;
+ /* Don't clear PBA because that can cause lost interrupts that
+ * came in while we were cleaning/polling
+ */
+ itr_val = q_vector->intr_reg.dyn_ctl_intena_m |
+ (type << q_vector->intr_reg.dyn_ctl_itridx_s) |
+ (itr << (q_vector->intr_reg.dyn_ctl_intrvl_s - 1));
+
+ return itr_val;
+}
+
+/**
+ * idpf_update_dim_sample - Update dim sample with packets and bytes
+ * @q_vector: the vector associated with the interrupt
+ * @dim_sample: dim sample to update
+ * @dim: dim instance structure
+ * @packets: total packets
+ * @bytes: total bytes
+ *
+ * Update the dim sample with the packets and bytes which are passed to this
+ * function. Set the dim state appropriately if the dim settings gets stale.
+ */
+static void idpf_update_dim_sample(struct idpf_q_vector *q_vector,
+ struct dim_sample *dim_sample,
+ struct dim *dim, u64 packets, u64 bytes)
+{
+ dim_update_sample(q_vector->total_events, packets, bytes, dim_sample);
+ dim_sample->comp_ctr = 0;
+
+ /* if dim settings get stale, like when not updated for 1 second or
+ * longer, force it to start again. This addresses the frequent case
+ * of an idle queue being switched to by the scheduler.
+ */
+ if (ktime_ms_delta(dim_sample->time, dim->start_sample.time) >= HZ)
+ dim->state = DIM_START_MEASURE;
+}
+
+/**
+ * idpf_net_dim - Update net DIM algorithm
+ * @q_vector: the vector associated with the interrupt
+ *
+ * Create a DIM sample and notify net_dim() so that it can possibly decide
+ * a new ITR value based on incoming packets, bytes, and interrupts.
+ *
+ * This function is a no-op if the queue is not configured to dynamic ITR.
+ */
+static void idpf_net_dim(struct idpf_q_vector *q_vector)
+{
+ struct dim_sample dim_sample = { };
+ u64 packets, bytes;
+ u32 i;
+
+ if (!IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode))
+ goto check_rx_itr;
+
+ for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) {
+ struct idpf_queue *txq = q_vector->tx[i];
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&txq->stats_sync);
+ packets += u64_stats_read(&txq->q_stats.tx.packets);
+ bytes += u64_stats_read(&txq->q_stats.tx.bytes);
+ } while (u64_stats_fetch_retry(&txq->stats_sync, start));
+ }
+
+ idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->tx_dim,
+ packets, bytes);
+ net_dim(&q_vector->tx_dim, dim_sample);
+
+check_rx_itr:
+ if (!IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode))
+ return;
+
+ for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) {
+ struct idpf_queue *rxq = q_vector->rx[i];
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&rxq->stats_sync);
+ packets += u64_stats_read(&rxq->q_stats.rx.packets);
+ bytes += u64_stats_read(&rxq->q_stats.rx.bytes);
+ } while (u64_stats_fetch_retry(&rxq->stats_sync, start));
+ }
+
+ idpf_update_dim_sample(q_vector, &dim_sample, &q_vector->rx_dim,
+ packets, bytes);
+ net_dim(&q_vector->rx_dim, dim_sample);
+}
+
+/**
+ * idpf_vport_intr_update_itr_ena_irq - Update itr and re-enable MSIX interrupt
+ * @q_vector: q_vector for which itr is being updated and interrupt enabled
+ *
+ * Update the net_dim() algorithm and re-enable the interrupt associated with
+ * this vector.
+ */
+void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
+{
+ u32 intval;
+
+ /* net_dim() updates ITR out-of-band using a work item */
+ idpf_net_dim(q_vector);
+
+ intval = idpf_vport_intr_buildreg_itr(q_vector,
+ IDPF_NO_ITR_UPDATE_IDX, 0);
+
+ writel(intval, q_vector->intr_reg.dyn_ctl);
+}
+
+/**
+ * idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport
+ * @vport: main vport structure
+ * @basename: name for the vector
+ */
+static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ int vector, err, irq_num, vidx;
+ const char *vec_name;
+
+ for (vector = 0; vector < vport->num_q_vectors; vector++) {
+ struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
+
+ vidx = vport->q_vector_idxs[vector];
+ irq_num = adapter->msix_entries[vidx].vector;
+
+ if (q_vector->num_rxq && q_vector->num_txq)
+ vec_name = "TxRx";
+ else if (q_vector->num_rxq)
+ vec_name = "Rx";
+ else if (q_vector->num_txq)
+ vec_name = "Tx";
+ else
+ continue;
+
+ q_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d",
+ basename, vec_name, vidx);
+
+ err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0,
+ q_vector->name, q_vector);
+ if (err) {
+ netdev_err(vport->netdev,
+ "Request_irq failed, error: %d\n", err);
+ goto free_q_irqs;
+ }
+ /* assign the mask for this irq */
+ irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
+ }
+
+ return 0;
+
+free_q_irqs:
+ while (--vector >= 0) {
+ vidx = vport->q_vector_idxs[vector];
+ irq_num = adapter->msix_entries[vidx].vector;
+ free_irq(irq_num, &vport->q_vectors[vector]);
+ }
+
+ return err;
+}
+
+/**
+ * idpf_vport_intr_write_itr - Write ITR value to the ITR register
+ * @q_vector: q_vector structure
+ * @itr: Interrupt throttling rate
+ * @tx: Tx or Rx ITR
+ */
+void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, u16 itr, bool tx)
+{
+ struct idpf_intr_reg *intr_reg;
+
+ if (tx && !q_vector->tx)
+ return;
+ else if (!tx && !q_vector->rx)
+ return;
+
+ intr_reg = &q_vector->intr_reg;
+ writel(ITR_REG_ALIGN(itr) >> IDPF_ITR_GRAN_S,
+ tx ? intr_reg->tx_itr : intr_reg->rx_itr);
+}
+
+/**
+ * idpf_vport_intr_ena_irq_all - Enable IRQ for the given vport
+ * @vport: main vport structure
+ */
+static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
+{
+ bool dynamic;
+ int q_idx;
+ u16 itr;
+
+ for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
+ struct idpf_q_vector *qv = &vport->q_vectors[q_idx];
+
+ /* Set the initial ITR values */
+ if (qv->num_txq) {
+ dynamic = IDPF_ITR_IS_DYNAMIC(qv->tx_intr_mode);
+ itr = vport->tx_itr_profile[qv->tx_dim.profile_ix];
+ idpf_vport_intr_write_itr(qv, dynamic ?
+ itr : qv->tx_itr_value,
+ true);
+ }
+
+ if (qv->num_rxq) {
+ dynamic = IDPF_ITR_IS_DYNAMIC(qv->rx_intr_mode);
+ itr = vport->rx_itr_profile[qv->rx_dim.profile_ix];
+ idpf_vport_intr_write_itr(qv, dynamic ?
+ itr : qv->rx_itr_value,
+ false);
+ }
+
+ if (qv->num_txq || qv->num_rxq)
+ idpf_vport_intr_update_itr_ena_irq(qv);
+ }
+}
+
+/**
+ * idpf_vport_intr_deinit - Release all vector associations for the vport
+ * @vport: main vport structure
+ */
+void idpf_vport_intr_deinit(struct idpf_vport *vport)
+{
+ idpf_vport_intr_napi_dis_all(vport);
+ idpf_vport_intr_napi_del_all(vport);
+ idpf_vport_intr_dis_irq_all(vport);
+ idpf_vport_intr_rel_irq(vport);
+}
+
+/**
+ * idpf_tx_dim_work - Call back from the stack
+ * @work: work queue structure
+ */
+static void idpf_tx_dim_work(struct work_struct *work)
+{
+ struct idpf_q_vector *q_vector;
+ struct idpf_vport *vport;
+ struct dim *dim;
+ u16 itr;
+
+ dim = container_of(work, struct dim, work);
+ q_vector = container_of(dim, struct idpf_q_vector, tx_dim);
+ vport = q_vector->vport;
+
+ if (dim->profile_ix >= ARRAY_SIZE(vport->tx_itr_profile))
+ dim->profile_ix = ARRAY_SIZE(vport->tx_itr_profile) - 1;
+
+ /* look up the values in our local table */
+ itr = vport->tx_itr_profile[dim->profile_ix];
+
+ idpf_vport_intr_write_itr(q_vector, itr, true);
+
+ dim->state = DIM_START_MEASURE;
+}
+
+/**
+ * idpf_rx_dim_work - Call back from the stack
+ * @work: work queue structure
+ */
+static void idpf_rx_dim_work(struct work_struct *work)
+{
+ struct idpf_q_vector *q_vector;
+ struct idpf_vport *vport;
+ struct dim *dim;
+ u16 itr;
+
+ dim = container_of(work, struct dim, work);
+ q_vector = container_of(dim, struct idpf_q_vector, rx_dim);
+ vport = q_vector->vport;
+
+ if (dim->profile_ix >= ARRAY_SIZE(vport->rx_itr_profile))
+ dim->profile_ix = ARRAY_SIZE(vport->rx_itr_profile) - 1;
+
+ /* look up the values in our local table */
+ itr = vport->rx_itr_profile[dim->profile_ix];
+
+ idpf_vport_intr_write_itr(q_vector, itr, false);
+
+ dim->state = DIM_START_MEASURE;
+}
+
+/**
+ * idpf_init_dim - Set up dynamic interrupt moderation
+ * @qv: q_vector structure
+ */
+static void idpf_init_dim(struct idpf_q_vector *qv)
+{
+ INIT_WORK(&qv->tx_dim.work, idpf_tx_dim_work);
+ qv->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ qv->tx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
+
+ INIT_WORK(&qv->rx_dim.work, idpf_rx_dim_work);
+ qv->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ qv->rx_dim.profile_ix = IDPF_DIM_DEFAULT_PROFILE_IX;
+}
+
+/**
+ * idpf_vport_intr_napi_ena_all - Enable NAPI for all q_vectors in the vport
+ * @vport: main vport structure
+ */
+static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport)
+{
+ int q_idx;
+
+ for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) {
+ struct idpf_q_vector *q_vector = &vport->q_vectors[q_idx];
+
+ idpf_init_dim(q_vector);
+ napi_enable(&q_vector->napi);
+ }
+}
+
+/**
+ * idpf_tx_splitq_clean_all- Clean completion queues
+ * @q_vec: queue vector
+ * @budget: Used to determine if we are in netpoll
+ * @cleaned: returns number of packets cleaned
+ *
+ * Returns false if clean is not complete else returns true
+ */
+static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec,
+ int budget, int *cleaned)
+{
+ u16 num_txq = q_vec->num_txq;
+ bool clean_complete = true;
+ int i, budget_per_q;
+
+ if (unlikely(!num_txq))
+ return true;
+
+ budget_per_q = DIV_ROUND_UP(budget, num_txq);
+ for (i = 0; i < num_txq; i++)
+ clean_complete &= idpf_tx_clean_complq(q_vec->tx[i],
+ budget_per_q, cleaned);
+
+ return clean_complete;
+}
+
+/**
+ * idpf_rx_splitq_clean_all- Clean completion queues
+ * @q_vec: queue vector
+ * @budget: Used to determine if we are in netpoll
+ * @cleaned: returns number of packets cleaned
+ *
+ * Returns false if clean is not complete else returns true
+ */
+static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
+ int *cleaned)
+{
+ u16 num_rxq = q_vec->num_rxq;
+ bool clean_complete = true;
+ int pkts_cleaned = 0;
+ int i, budget_per_q;
+
+ /* We attempt to distribute budget to each Rx queue fairly, but don't
+ * allow the budget to go below 1 because that would exit polling early.
+ */
+ budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0;
+ for (i = 0; i < num_rxq; i++) {
+ struct idpf_queue *rxq = q_vec->rx[i];
+ int pkts_cleaned_per_q;
+
+ pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q);
+ /* if we clean as many as budgeted, we must not be done */
+ if (pkts_cleaned_per_q >= budget_per_q)
+ clean_complete = false;
+ pkts_cleaned += pkts_cleaned_per_q;
+ }
+ *cleaned = pkts_cleaned;
+
+ for (i = 0; i < q_vec->num_bufq; i++)
+ idpf_rx_clean_refillq_all(q_vec->bufq[i]);
+
+ return clean_complete;
+}
+
+/**
+ * idpf_vport_splitq_napi_poll - NAPI handler
+ * @napi: struct from which you get q_vector
+ * @budget: budget provided by stack
+ */
+static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct idpf_q_vector *q_vector =
+ container_of(napi, struct idpf_q_vector, napi);
+ bool clean_complete;
+ int work_done = 0;
+
+ /* Handle case where we are called by netpoll with a budget of 0 */
+ if (unlikely(!budget)) {
+ idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
+
+ return 0;
+ }
+
+ clean_complete = idpf_rx_splitq_clean_all(q_vector, budget, &work_done);
+ clean_complete &= idpf_tx_splitq_clean_all(q_vector, budget, &work_done);
+
+ /* If work not completed, return budget and polling will return */
+ if (!clean_complete)
+ return budget;
+
+ work_done = min_t(int, work_done, budget - 1);
+
+ /* Exit the polling mode, but don't re-enable interrupts if stack might
+ * poll us due to busy-polling
+ */
+ if (likely(napi_complete_done(napi, work_done)))
+ idpf_vport_intr_update_itr_ena_irq(q_vector);
+
+ /* Switch to poll mode in the tear-down path after sending disable
+ * queues virtchnl message, as the interrupts will be disabled after
+ * that
+ */
+ if (unlikely(q_vector->num_txq && test_bit(__IDPF_Q_POLL_MODE,
+ q_vector->tx[0]->flags)))
+ return budget;
+ else
+ return work_done;
+}
+
+/**
+ * idpf_vport_intr_map_vector_to_qs - Map vectors to queues
+ * @vport: virtual port
+ *
+ * Mapping for vectors to queues
+ */
+static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
+{
+ u16 num_txq_grp = vport->num_txq_grp;
+ int i, j, qv_idx, bufq_vidx = 0;
+ struct idpf_rxq_group *rx_qgrp;
+ struct idpf_txq_group *tx_qgrp;
+ struct idpf_queue *q, *bufq;
+ u16 q_index;
+
+ for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) {
+ u16 num_rxq;
+
+ rx_qgrp = &vport->rxq_grps[i];
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+ else
+ num_rxq = rx_qgrp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq; j++) {
+ if (qv_idx >= vport->num_q_vectors)
+ qv_idx = 0;
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ else
+ q = rx_qgrp->singleq.rxqs[j];
+ q->q_vector = &vport->q_vectors[qv_idx];
+ q_index = q->q_vector->num_rxq;
+ q->q_vector->rx[q_index] = q;
+ q->q_vector->num_rxq++;
+ qv_idx++;
+ }
+
+ if (idpf_is_queue_model_split(vport->rxq_model)) {
+ for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
+ bufq->q_vector = &vport->q_vectors[bufq_vidx];
+ q_index = bufq->q_vector->num_bufq;
+ bufq->q_vector->bufq[q_index] = bufq;
+ bufq->q_vector->num_bufq++;
+ }
+ if (++bufq_vidx >= vport->num_q_vectors)
+ bufq_vidx = 0;
+ }
+ }
+
+ for (i = 0, qv_idx = 0; i < num_txq_grp; i++) {
+ u16 num_txq;
+
+ tx_qgrp = &vport->txq_grps[i];
+ num_txq = tx_qgrp->num_txq;
+
+ if (idpf_is_queue_model_split(vport->txq_model)) {
+ if (qv_idx >= vport->num_q_vectors)
+ qv_idx = 0;
+
+ q = tx_qgrp->complq;
+ q->q_vector = &vport->q_vectors[qv_idx];
+ q_index = q->q_vector->num_txq;
+ q->q_vector->tx[q_index] = q;
+ q->q_vector->num_txq++;
+ qv_idx++;
+ } else {
+ for (j = 0; j < num_txq; j++) {
+ if (qv_idx >= vport->num_q_vectors)
+ qv_idx = 0;
+
+ q = tx_qgrp->txqs[j];
+ q->q_vector = &vport->q_vectors[qv_idx];
+ q_index = q->q_vector->num_txq;
+ q->q_vector->tx[q_index] = q;
+ q->q_vector->num_txq++;
+
+ qv_idx++;
+ }
+ }
+ }
+}
+
+/**
+ * idpf_vport_intr_init_vec_idx - Initialize the vector indexes
+ * @vport: virtual port
+ *
+ * Initialize vector indexes with values returened over mailbox
+ */
+static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_alloc_vectors *ac;
+ u16 *vecids, total_vecs;
+ int i;
+
+ ac = adapter->req_vec_chunks;
+ if (!ac) {
+ for (i = 0; i < vport->num_q_vectors; i++)
+ vport->q_vectors[i].v_idx = vport->q_vector_idxs[i];
+
+ return 0;
+ }
+
+ total_vecs = idpf_get_reserved_vecs(adapter);
+ vecids = kcalloc(total_vecs, sizeof(u16), GFP_KERNEL);
+ if (!vecids)
+ return -ENOMEM;
+
+ idpf_get_vec_ids(adapter, vecids, total_vecs, &ac->vchunks);
+
+ for (i = 0; i < vport->num_q_vectors; i++)
+ vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]];
+
+ kfree(vecids);
+
+ return 0;
+}
+
+/**
+ * idpf_vport_intr_napi_add_all- Register napi handler for all qvectors
+ * @vport: virtual port structure
+ */
+static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
+{
+ int (*napi_poll)(struct napi_struct *napi, int budget);
+ u16 v_idx;
+
+ if (idpf_is_queue_model_split(vport->txq_model))
+ napi_poll = idpf_vport_splitq_napi_poll;
+ else
+ napi_poll = idpf_vport_singleq_napi_poll;
+
+ for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
+ struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
+
+ netif_napi_add(vport->netdev, &q_vector->napi, napi_poll);
+
+ /* only set affinity_mask if the CPU is online */
+ if (cpu_online(v_idx))
+ cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+ }
+}
+
+/**
+ * idpf_vport_intr_alloc - Allocate memory for interrupt vectors
+ * @vport: virtual port
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ */
+int idpf_vport_intr_alloc(struct idpf_vport *vport)
+{
+ u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
+ struct idpf_q_vector *q_vector;
+ int v_idx, err;
+
+ vport->q_vectors = kcalloc(vport->num_q_vectors,
+ sizeof(struct idpf_q_vector), GFP_KERNEL);
+ if (!vport->q_vectors)
+ return -ENOMEM;
+
+ txqs_per_vector = DIV_ROUND_UP(vport->num_txq, vport->num_q_vectors);
+ rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq, vport->num_q_vectors);
+ bufqs_per_vector = vport->num_bufqs_per_qgrp *
+ DIV_ROUND_UP(vport->num_rxq_grp,
+ vport->num_q_vectors);
+
+ for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
+ q_vector = &vport->q_vectors[v_idx];
+ q_vector->vport = vport;
+
+ q_vector->tx_itr_value = IDPF_ITR_TX_DEF;
+ q_vector->tx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_vector->tx_itr_idx = VIRTCHNL2_ITR_IDX_1;
+
+ q_vector->rx_itr_value = IDPF_ITR_RX_DEF;
+ q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
+ q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
+
+ q_vector->tx = kcalloc(txqs_per_vector,
+ sizeof(struct idpf_queue *),
+ GFP_KERNEL);
+ if (!q_vector->tx) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ q_vector->rx = kcalloc(rxqs_per_vector,
+ sizeof(struct idpf_queue *),
+ GFP_KERNEL);
+ if (!q_vector->rx) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ if (!idpf_is_queue_model_split(vport->rxq_model))
+ continue;
+
+ q_vector->bufq = kcalloc(bufqs_per_vector,
+ sizeof(struct idpf_queue *),
+ GFP_KERNEL);
+ if (!q_vector->bufq) {
+ err = -ENOMEM;
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ idpf_vport_intr_rel(vport);
+
+ return err;
+}
+
+/**
+ * idpf_vport_intr_init - Setup all vectors for the given vport
+ * @vport: virtual port
+ *
+ * Returns 0 on success or negative on failure
+ */
+int idpf_vport_intr_init(struct idpf_vport *vport)
+{
+ char *int_name;
+ int err;
+
+ err = idpf_vport_intr_init_vec_idx(vport);
+ if (err)
+ return err;
+
+ idpf_vport_intr_map_vector_to_qs(vport);
+ idpf_vport_intr_napi_add_all(vport);
+ idpf_vport_intr_napi_ena_all(vport);
+
+ err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport);
+ if (err)
+ goto unroll_vectors_alloc;
+
+ int_name = kasprintf(GFP_KERNEL, "%s-%s",
+ dev_driver_string(&vport->adapter->pdev->dev),
+ vport->netdev->name);
+
+ err = idpf_vport_intr_req_irq(vport, int_name);
+ if (err)
+ goto unroll_vectors_alloc;
+
+ idpf_vport_intr_ena_irq_all(vport);
+
+ return 0;
+
+unroll_vectors_alloc:
+ idpf_vport_intr_napi_dis_all(vport);
+ idpf_vport_intr_napi_del_all(vport);
+
+ return err;
+}
+
+/**
+ * idpf_config_rss - Send virtchnl messages to configure RSS
+ * @vport: virtual port
+ *
+ * Return 0 on success, negative on failure
+ */
+int idpf_config_rss(struct idpf_vport *vport)
+{
+ int err;
+
+ err = idpf_send_get_set_rss_key_msg(vport, false);
+ if (err)
+ return err;
+
+ return idpf_send_get_set_rss_lut_msg(vport, false);
+}
+
+/**
+ * idpf_fill_dflt_rss_lut - Fill the indirection table with the default values
+ * @vport: virtual port structure
+ */
+static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ u16 num_active_rxq = vport->num_rxq;
+ struct idpf_rss_data *rss_data;
+ int i;
+
+ rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+
+ for (i = 0; i < rss_data->rss_lut_size; i++) {
+ rss_data->rss_lut[i] = i % num_active_rxq;
+ rss_data->cached_lut[i] = rss_data->rss_lut[i];
+ }
+}
+
+/**
+ * idpf_init_rss - Allocate and initialize RSS resources
+ * @vport: virtual port
+ *
+ * Return 0 on success, negative on failure
+ */
+int idpf_init_rss(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_rss_data *rss_data;
+ u32 lut_size;
+
+ rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+
+ lut_size = rss_data->rss_lut_size * sizeof(u32);
+ rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
+ if (!rss_data->rss_lut)
+ return -ENOMEM;
+
+ rss_data->cached_lut = kzalloc(lut_size, GFP_KERNEL);
+ if (!rss_data->cached_lut) {
+ kfree(rss_data->rss_lut);
+ rss_data->rss_lut = NULL;
+
+ return -ENOMEM;
+ }
+
+ /* Fill the default RSS lut values */
+ idpf_fill_dflt_rss_lut(vport);
+
+ return idpf_config_rss(vport);
+}
+
+/**
+ * idpf_deinit_rss - Release RSS resources
+ * @vport: virtual port
+ */
+void idpf_deinit_rss(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_rss_data *rss_data;
+
+ rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+ kfree(rss_data->cached_lut);
+ rss_data->cached_lut = NULL;
+ kfree(rss_data->rss_lut);
+ rss_data->rss_lut = NULL;
+}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
new file mode 100644
index 000000000000..df76493faa75
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -0,0 +1,1023 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef _IDPF_TXRX_H_
+#define _IDPF_TXRX_H_
+
+#include <net/page_pool/helpers.h>
+#include <net/tcp.h>
+#include <net/netdev_queues.h>
+
+#define IDPF_LARGE_MAX_Q 256
+#define IDPF_MAX_Q 16
+#define IDPF_MIN_Q 2
+/* Mailbox Queue */
+#define IDPF_MAX_MBXQ 1
+
+#define IDPF_MIN_TXQ_DESC 64
+#define IDPF_MIN_RXQ_DESC 64
+#define IDPF_MIN_TXQ_COMPLQ_DESC 256
+#define IDPF_MAX_QIDS 256
+
+/* Number of descriptors in a queue should be a multiple of 32. RX queue
+ * descriptors alone should be a multiple of IDPF_REQ_RXQ_DESC_MULTIPLE
+ * to achieve BufQ descriptors aligned to 32
+ */
+#define IDPF_REQ_DESC_MULTIPLE 32
+#define IDPF_REQ_RXQ_DESC_MULTIPLE (IDPF_MAX_BUFQS_PER_RXQ_GRP * 32)
+#define IDPF_MIN_TX_DESC_NEEDED (MAX_SKB_FRAGS + 6)
+#define IDPF_TX_WAKE_THRESH ((u16)IDPF_MIN_TX_DESC_NEEDED * 2)
+
+#define IDPF_MAX_DESCS 8160
+#define IDPF_MAX_TXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_DESC_MULTIPLE)
+#define IDPF_MAX_RXQ_DESC ALIGN_DOWN(IDPF_MAX_DESCS, IDPF_REQ_RXQ_DESC_MULTIPLE)
+#define MIN_SUPPORT_TXDID (\
+ VIRTCHNL2_TXDID_FLEX_FLOW_SCHED |\
+ VIRTCHNL2_TXDID_FLEX_TSO_CTX)
+
+#define IDPF_DFLT_SINGLEQ_TX_Q_GROUPS 1
+#define IDPF_DFLT_SINGLEQ_RX_Q_GROUPS 1
+#define IDPF_DFLT_SINGLEQ_TXQ_PER_GROUP 4
+#define IDPF_DFLT_SINGLEQ_RXQ_PER_GROUP 4
+
+#define IDPF_COMPLQ_PER_GROUP 1
+#define IDPF_SINGLE_BUFQ_PER_RXQ_GRP 1
+#define IDPF_MAX_BUFQS_PER_RXQ_GRP 2
+#define IDPF_BUFQ2_ENA 1
+#define IDPF_NUMQ_PER_CHUNK 1
+
+#define IDPF_DFLT_SPLITQ_TXQ_PER_GROUP 1
+#define IDPF_DFLT_SPLITQ_RXQ_PER_GROUP 1
+
+/* Default vector sharing */
+#define IDPF_MBX_Q_VEC 1
+#define IDPF_MIN_Q_VEC 1
+
+#define IDPF_DFLT_TX_Q_DESC_COUNT 512
+#define IDPF_DFLT_TX_COMPLQ_DESC_COUNT 512
+#define IDPF_DFLT_RX_Q_DESC_COUNT 512
+
+/* IMPORTANT: We absolutely _cannot_ have more buffers in the system than a
+ * given RX completion queue has descriptors. This includes _ALL_ buffer
+ * queues. E.g.: If you have two buffer queues of 512 descriptors and buffers,
+ * you have a total of 1024 buffers so your RX queue _must_ have at least that
+ * many descriptors. This macro divides a given number of RX descriptors by
+ * number of buffer queues to calculate how many descriptors each buffer queue
+ * can have without overrunning the RX queue.
+ *
+ * If you give hardware more buffers than completion descriptors what will
+ * happen is that if hardware gets a chance to post more than ring wrap of
+ * descriptors before SW gets an interrupt and overwrites SW head, the gen bit
+ * in the descriptor will be wrong. Any overwritten descriptors' buffers will
+ * be gone forever and SW has no reasonable way to tell that this has happened.
+ * From SW perspective, when we finally get an interrupt, it looks like we're
+ * still waiting for descriptor to be done, stalling forever.
+ */
+#define IDPF_RX_BUFQ_DESC_COUNT(RXD, NUM_BUFQ) ((RXD) / (NUM_BUFQ))
+
+#define IDPF_RX_BUFQ_WORKING_SET(rxq) ((rxq)->desc_count - 1)
+
+#define IDPF_RX_BUMP_NTC(rxq, ntc) \
+do { \
+ if (unlikely(++(ntc) == (rxq)->desc_count)) { \
+ ntc = 0; \
+ change_bit(__IDPF_Q_GEN_CHK, (rxq)->flags); \
+ } \
+} while (0)
+
+#define IDPF_SINGLEQ_BUMP_RING_IDX(q, idx) \
+do { \
+ if (unlikely(++(idx) == (q)->desc_count)) \
+ idx = 0; \
+} while (0)
+
+#define IDPF_RX_HDR_SIZE 256
+#define IDPF_RX_BUF_2048 2048
+#define IDPF_RX_BUF_4096 4096
+#define IDPF_RX_BUF_STRIDE 32
+#define IDPF_RX_BUF_POST_STRIDE 16
+#define IDPF_LOW_WATERMARK 64
+/* Size of header buffer specifically for header split */
+#define IDPF_HDR_BUF_SIZE 256
+#define IDPF_PACKET_HDR_PAD \
+ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN * 2)
+#define IDPF_TX_TSO_MIN_MSS 88
+
+/* Minimum number of descriptors between 2 descriptors with the RE bit set;
+ * only relevant in flow scheduling mode
+ */
+#define IDPF_TX_SPLITQ_RE_MIN_GAP 64
+
+#define IDPF_RX_BI_BUFID_S 0
+#define IDPF_RX_BI_BUFID_M GENMASK(14, 0)
+#define IDPF_RX_BI_GEN_S 15
+#define IDPF_RX_BI_GEN_M BIT(IDPF_RX_BI_GEN_S)
+#define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
+#define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
+
+#define IDPF_SINGLEQ_RX_BUF_DESC(rxq, i) \
+ (&(((struct virtchnl2_singleq_rx_buf_desc *)((rxq)->desc_ring))[i]))
+#define IDPF_SPLITQ_RX_BUF_DESC(rxq, i) \
+ (&(((struct virtchnl2_splitq_rx_buf_desc *)((rxq)->desc_ring))[i]))
+#define IDPF_SPLITQ_RX_BI_DESC(rxq, i) ((((rxq)->ring))[i])
+
+#define IDPF_BASE_TX_DESC(txq, i) \
+ (&(((struct idpf_base_tx_desc *)((txq)->desc_ring))[i]))
+#define IDPF_BASE_TX_CTX_DESC(txq, i) \
+ (&(((struct idpf_base_tx_ctx_desc *)((txq)->desc_ring))[i]))
+#define IDPF_SPLITQ_TX_COMPLQ_DESC(txcq, i) \
+ (&(((struct idpf_splitq_tx_compl_desc *)((txcq)->desc_ring))[i]))
+
+#define IDPF_FLEX_TX_DESC(txq, i) \
+ (&(((union idpf_tx_flex_desc *)((txq)->desc_ring))[i]))
+#define IDPF_FLEX_TX_CTX_DESC(txq, i) \
+ (&(((struct idpf_flex_tx_ctx_desc *)((txq)->desc_ring))[i]))
+
+#define IDPF_DESC_UNUSED(txq) \
+ ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
+ (txq)->next_to_clean - (txq)->next_to_use - 1)
+
+#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->buf_stack.top)
+#define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \
+ (txq)->desc_count >> 2)
+
+#define IDPF_TX_COMPLQ_OVERFLOW_THRESH(txcq) ((txcq)->desc_count >> 1)
+/* Determine the absolute number of completions pending, i.e. the number of
+ * completions that are expected to arrive on the TX completion queue.
+ */
+#define IDPF_TX_COMPLQ_PENDING(txq) \
+ (((txq)->num_completions_pending >= (txq)->complq->num_completions ? \
+ 0 : U64_MAX) + \
+ (txq)->num_completions_pending - (txq)->complq->num_completions)
+
+#define IDPF_TX_SPLITQ_COMPL_TAG_WIDTH 16
+#define IDPF_SPLITQ_TX_INVAL_COMPL_TAG -1
+/* Adjust the generation for the completion tag and wrap if necessary */
+#define IDPF_TX_ADJ_COMPL_TAG_GEN(txq) \
+ ((++(txq)->compl_tag_cur_gen) >= (txq)->compl_tag_gen_max ? \
+ 0 : (txq)->compl_tag_cur_gen)
+
+#define IDPF_TXD_LAST_DESC_CMD (IDPF_TX_DESC_CMD_EOP | IDPF_TX_DESC_CMD_RS)
+
+#define IDPF_TX_FLAGS_TSO BIT(0)
+#define IDPF_TX_FLAGS_IPV4 BIT(1)
+#define IDPF_TX_FLAGS_IPV6 BIT(2)
+#define IDPF_TX_FLAGS_TUNNEL BIT(3)
+
+union idpf_tx_flex_desc {
+ struct idpf_flex_tx_desc q; /* queue based scheduling */
+ struct idpf_flex_tx_sched_desc flow; /* flow based scheduling */
+};
+
+/**
+ * struct idpf_tx_buf
+ * @next_to_watch: Next descriptor to clean
+ * @skb: Pointer to the skb
+ * @dma: DMA address
+ * @len: DMA length
+ * @bytecount: Number of bytes
+ * @gso_segs: Number of GSO segments
+ * @compl_tag: Splitq only, unique identifier for a buffer. Used to compare
+ * with completion tag returned in buffer completion event.
+ * Because the completion tag is expected to be the same in all
+ * data descriptors for a given packet, and a single packet can
+ * span multiple buffers, we need this field to track all
+ * buffers associated with this completion tag independently of
+ * the buf_id. The tag consists of a N bit buf_id and M upper
+ * order "generation bits". See compl_tag_bufid_m and
+ * compl_tag_gen_s in struct idpf_queue. We'll use a value of -1
+ * to indicate the tag is not valid.
+ * @ctx_entry: Singleq only. Used to indicate the corresponding entry
+ * in the descriptor ring was used for a context descriptor and
+ * this buffer entry should be skipped.
+ */
+struct idpf_tx_buf {
+ void *next_to_watch;
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ unsigned int bytecount;
+ unsigned short gso_segs;
+
+ union {
+ int compl_tag;
+
+ bool ctx_entry;
+ };
+};
+
+struct idpf_tx_stash {
+ struct hlist_node hlist;
+ struct idpf_tx_buf buf;
+};
+
+/**
+ * struct idpf_buf_lifo - LIFO for managing OOO completions
+ * @top: Used to know how many buffers are left
+ * @size: Total size of LIFO
+ * @bufs: Backing array
+ */
+struct idpf_buf_lifo {
+ u16 top;
+ u16 size;
+ struct idpf_tx_stash **bufs;
+};
+
+/**
+ * struct idpf_tx_offload_params - Offload parameters for a given packet
+ * @tx_flags: Feature flags enabled for this packet
+ * @hdr_offsets: Offset parameter for single queue model
+ * @cd_tunneling: Type of tunneling enabled for single queue model
+ * @tso_len: Total length of payload to segment
+ * @mss: Segment size
+ * @tso_segs: Number of segments to be sent
+ * @tso_hdr_len: Length of headers to be duplicated
+ * @td_cmd: Command field to be inserted into descriptor
+ */
+struct idpf_tx_offload_params {
+ u32 tx_flags;
+
+ u32 hdr_offsets;
+ u32 cd_tunneling;
+
+ u32 tso_len;
+ u16 mss;
+ u16 tso_segs;
+ u16 tso_hdr_len;
+
+ u16 td_cmd;
+};
+
+/**
+ * struct idpf_tx_splitq_params
+ * @dtype: General descriptor info
+ * @eop_cmd: Type of EOP
+ * @compl_tag: Associated tag for completion
+ * @td_tag: Descriptor tunneling tag
+ * @offload: Offload parameters
+ */
+struct idpf_tx_splitq_params {
+ enum idpf_tx_desc_dtype_value dtype;
+ u16 eop_cmd;
+ union {
+ u16 compl_tag;
+ u16 td_tag;
+ };
+
+ struct idpf_tx_offload_params offload;
+};
+
+enum idpf_tx_ctx_desc_eipt_offload {
+ IDPF_TX_CTX_EXT_IP_NONE = 0x0,
+ IDPF_TX_CTX_EXT_IP_IPV6 = 0x1,
+ IDPF_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2,
+ IDPF_TX_CTX_EXT_IP_IPV4 = 0x3
+};
+
+/* Checksum offload bits decoded from the receive descriptor. */
+struct idpf_rx_csum_decoded {
+ u32 l3l4p : 1;
+ u32 ipe : 1;
+ u32 eipe : 1;
+ u32 eudpe : 1;
+ u32 ipv6exadd : 1;
+ u32 l4e : 1;
+ u32 pprs : 1;
+ u32 nat : 1;
+ u32 raw_csum_inv : 1;
+ u32 raw_csum : 16;
+};
+
+struct idpf_rx_extracted {
+ unsigned int size;
+ u16 rx_ptype;
+};
+
+#define IDPF_TX_COMPLQ_CLEAN_BUDGET 256
+#define IDPF_TX_MIN_PKT_LEN 17
+#define IDPF_TX_DESCS_FOR_SKB_DATA_PTR 1
+#define IDPF_TX_DESCS_PER_CACHE_LINE (L1_CACHE_BYTES / \
+ sizeof(struct idpf_flex_tx_desc))
+#define IDPF_TX_DESCS_FOR_CTX 1
+/* TX descriptors needed, worst case */
+#define IDPF_TX_DESC_NEEDED (MAX_SKB_FRAGS + IDPF_TX_DESCS_FOR_CTX + \
+ IDPF_TX_DESCS_PER_CACHE_LINE + \
+ IDPF_TX_DESCS_FOR_SKB_DATA_PTR)
+
+/* The size limit for a transmit buffer in a descriptor is (16K - 1).
+ * In order to align with the read requests we will align the value to
+ * the nearest 4K which represents our maximum read request size.
+ */
+#define IDPF_TX_MAX_READ_REQ_SIZE SZ_4K
+#define IDPF_TX_MAX_DESC_DATA (SZ_16K - 1)
+#define IDPF_TX_MAX_DESC_DATA_ALIGNED \
+ ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE)
+
+#define IDPF_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+#define IDPF_RX_DESC(rxq, i) \
+ (&(((union virtchnl2_rx_desc *)((rxq)->desc_ring))[i]))
+
+struct idpf_rx_buf {
+ struct page *page;
+ unsigned int page_offset;
+ u16 truesize;
+};
+
+#define IDPF_RX_MAX_PTYPE_PROTO_IDS 32
+#define IDPF_RX_MAX_PTYPE_SZ (sizeof(struct virtchnl2_ptype) + \
+ (sizeof(u16) * IDPF_RX_MAX_PTYPE_PROTO_IDS))
+#define IDPF_RX_PTYPE_HDR_SZ sizeof(struct virtchnl2_get_ptype_info)
+#define IDPF_RX_MAX_PTYPES_PER_BUF \
+ DIV_ROUND_DOWN_ULL((IDPF_CTLQ_MAX_BUF_LEN - IDPF_RX_PTYPE_HDR_SZ), \
+ IDPF_RX_MAX_PTYPE_SZ)
+
+#define IDPF_GET_PTYPE_SIZE(p) struct_size((p), proto_id, (p)->proto_id_count)
+
+#define IDPF_TUN_IP_GRE (\
+ IDPF_PTYPE_TUNNEL_IP |\
+ IDPF_PTYPE_TUNNEL_IP_GRENAT)
+
+#define IDPF_TUN_IP_GRE_MAC (\
+ IDPF_TUN_IP_GRE |\
+ IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC)
+
+#define IDPF_RX_MAX_PTYPE 1024
+#define IDPF_RX_MAX_BASE_PTYPE 256
+#define IDPF_INVALID_PTYPE_ID 0xFFFF
+
+/* Packet type non-ip values */
+enum idpf_rx_ptype_l2 {
+ IDPF_RX_PTYPE_L2_RESERVED = 0,
+ IDPF_RX_PTYPE_L2_MAC_PAY2 = 1,
+ IDPF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
+ IDPF_RX_PTYPE_L2_FIP_PAY2 = 3,
+ IDPF_RX_PTYPE_L2_OUI_PAY2 = 4,
+ IDPF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
+ IDPF_RX_PTYPE_L2_LLDP_PAY2 = 6,
+ IDPF_RX_PTYPE_L2_ECP_PAY2 = 7,
+ IDPF_RX_PTYPE_L2_EVB_PAY2 = 8,
+ IDPF_RX_PTYPE_L2_QCN_PAY2 = 9,
+ IDPF_RX_PTYPE_L2_EAPOL_PAY2 = 10,
+ IDPF_RX_PTYPE_L2_ARP = 11,
+};
+
+enum idpf_rx_ptype_outer_ip {
+ IDPF_RX_PTYPE_OUTER_L2 = 0,
+ IDPF_RX_PTYPE_OUTER_IP = 1,
+};
+
+#define IDPF_RX_PTYPE_TO_IPV(ptype, ipv) \
+ (((ptype)->outer_ip == IDPF_RX_PTYPE_OUTER_IP) && \
+ ((ptype)->outer_ip_ver == (ipv)))
+
+enum idpf_rx_ptype_outer_ip_ver {
+ IDPF_RX_PTYPE_OUTER_NONE = 0,
+ IDPF_RX_PTYPE_OUTER_IPV4 = 1,
+ IDPF_RX_PTYPE_OUTER_IPV6 = 2,
+};
+
+enum idpf_rx_ptype_outer_fragmented {
+ IDPF_RX_PTYPE_NOT_FRAG = 0,
+ IDPF_RX_PTYPE_FRAG = 1,
+};
+
+enum idpf_rx_ptype_tunnel_type {
+ IDPF_RX_PTYPE_TUNNEL_NONE = 0,
+ IDPF_RX_PTYPE_TUNNEL_IP_IP = 1,
+ IDPF_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
+ IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
+ IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
+};
+
+enum idpf_rx_ptype_tunnel_end_prot {
+ IDPF_RX_PTYPE_TUNNEL_END_NONE = 0,
+ IDPF_RX_PTYPE_TUNNEL_END_IPV4 = 1,
+ IDPF_RX_PTYPE_TUNNEL_END_IPV6 = 2,
+};
+
+enum idpf_rx_ptype_inner_prot {
+ IDPF_RX_PTYPE_INNER_PROT_NONE = 0,
+ IDPF_RX_PTYPE_INNER_PROT_UDP = 1,
+ IDPF_RX_PTYPE_INNER_PROT_TCP = 2,
+ IDPF_RX_PTYPE_INNER_PROT_SCTP = 3,
+ IDPF_RX_PTYPE_INNER_PROT_ICMP = 4,
+ IDPF_RX_PTYPE_INNER_PROT_TIMESYNC = 5,
+};
+
+enum idpf_rx_ptype_payload_layer {
+ IDPF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
+ IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
+ IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
+ IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
+};
+
+enum idpf_tunnel_state {
+ IDPF_PTYPE_TUNNEL_IP = BIT(0),
+ IDPF_PTYPE_TUNNEL_IP_GRENAT = BIT(1),
+ IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC = BIT(2),
+};
+
+struct idpf_ptype_state {
+ bool outer_ip;
+ bool outer_frag;
+ u8 tunnel_state;
+};
+
+struct idpf_rx_ptype_decoded {
+ u32 ptype:10;
+ u32 known:1;
+ u32 outer_ip:1;
+ u32 outer_ip_ver:2;
+ u32 outer_frag:1;
+ u32 tunnel_type:3;
+ u32 tunnel_end_prot:2;
+ u32 tunnel_end_frag:1;
+ u32 inner_prot:4;
+ u32 payload_layer:3;
+};
+
+/**
+ * enum idpf_queue_flags_t
+ * @__IDPF_Q_GEN_CHK: Queues operating in splitq mode use a generation bit to
+ * identify new descriptor writebacks on the ring. HW sets
+ * the gen bit to 1 on the first writeback of any given
+ * descriptor. After the ring wraps, HW sets the gen bit of
+ * those descriptors to 0, and continues flipping
+ * 0->1 or 1->0 on each ring wrap. SW maintains its own
+ * gen bit to know what value will indicate writebacks on
+ * the next pass around the ring. E.g. it is initialized
+ * to 1 and knows that reading a gen bit of 1 in any
+ * descriptor on the initial pass of the ring indicates a
+ * writeback. It also flips on every ring wrap.
+ * @__IDPF_RFLQ_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW bit
+ * and RFLGQ_GEN is the SW bit.
+ * @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
+ * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
+ * @__IDPF_Q_POLL_MODE: Enable poll mode
+ * @__IDPF_Q_FLAGS_NBITS: Must be last
+ */
+enum idpf_queue_flags_t {
+ __IDPF_Q_GEN_CHK,
+ __IDPF_RFLQ_GEN_CHK,
+ __IDPF_Q_FLOW_SCH_EN,
+ __IDPF_Q_SW_MARKER,
+ __IDPF_Q_POLL_MODE,
+
+ __IDPF_Q_FLAGS_NBITS,
+};
+
+/**
+ * struct idpf_vec_regs
+ * @dyn_ctl_reg: Dynamic control interrupt register offset
+ * @itrn_reg: Interrupt Throttling Rate register offset
+ * @itrn_index_spacing: Register spacing between ITR registers of the same
+ * vector
+ */
+struct idpf_vec_regs {
+ u32 dyn_ctl_reg;
+ u32 itrn_reg;
+ u32 itrn_index_spacing;
+};
+
+/**
+ * struct idpf_intr_reg
+ * @dyn_ctl: Dynamic control interrupt register
+ * @dyn_ctl_intena_m: Mask for dyn_ctl interrupt enable
+ * @dyn_ctl_itridx_s: Register bit offset for ITR index
+ * @dyn_ctl_itridx_m: Mask for ITR index
+ * @dyn_ctl_intrvl_s: Register bit offset for ITR interval
+ * @rx_itr: RX ITR register
+ * @tx_itr: TX ITR register
+ * @icr_ena: Interrupt cause register offset
+ * @icr_ena_ctlq_m: Mask for ICR
+ */
+struct idpf_intr_reg {
+ void __iomem *dyn_ctl;
+ u32 dyn_ctl_intena_m;
+ u32 dyn_ctl_itridx_s;
+ u32 dyn_ctl_itridx_m;
+ u32 dyn_ctl_intrvl_s;
+ void __iomem *rx_itr;
+ void __iomem *tx_itr;
+ void __iomem *icr_ena;
+ u32 icr_ena_ctlq_m;
+};
+
+/**
+ * struct idpf_q_vector
+ * @vport: Vport back pointer
+ * @affinity_mask: CPU affinity mask
+ * @napi: napi handler
+ * @v_idx: Vector index
+ * @intr_reg: See struct idpf_intr_reg
+ * @num_txq: Number of TX queues
+ * @tx: Array of TX queues to service
+ * @tx_dim: Data for TX net_dim algorithm
+ * @tx_itr_value: TX interrupt throttling rate
+ * @tx_intr_mode: Dynamic ITR or not
+ * @tx_itr_idx: TX ITR index
+ * @num_rxq: Number of RX queues
+ * @rx: Array of RX queues to service
+ * @rx_dim: Data for RX net_dim algorithm
+ * @rx_itr_value: RX interrupt throttling rate
+ * @rx_intr_mode: Dynamic ITR or not
+ * @rx_itr_idx: RX ITR index
+ * @num_bufq: Number of buffer queues
+ * @bufq: Array of buffer queues to service
+ * @total_events: Number of interrupts processed
+ * @name: Queue vector name
+ */
+struct idpf_q_vector {
+ struct idpf_vport *vport;
+ cpumask_t affinity_mask;
+ struct napi_struct napi;
+ u16 v_idx;
+ struct idpf_intr_reg intr_reg;
+
+ u16 num_txq;
+ struct idpf_queue **tx;
+ struct dim tx_dim;
+ u16 tx_itr_value;
+ bool tx_intr_mode;
+ u32 tx_itr_idx;
+
+ u16 num_rxq;
+ struct idpf_queue **rx;
+ struct dim rx_dim;
+ u16 rx_itr_value;
+ bool rx_intr_mode;
+ u32 rx_itr_idx;
+
+ u16 num_bufq;
+ struct idpf_queue **bufq;
+
+ u16 total_events;
+ char *name;
+};
+
+struct idpf_rx_queue_stats {
+ u64_stats_t packets;
+ u64_stats_t bytes;
+ u64_stats_t rsc_pkts;
+ u64_stats_t hw_csum_err;
+ u64_stats_t hsplit_pkts;
+ u64_stats_t hsplit_buf_ovf;
+ u64_stats_t bad_descs;
+};
+
+struct idpf_tx_queue_stats {
+ u64_stats_t packets;
+ u64_stats_t bytes;
+ u64_stats_t lso_pkts;
+ u64_stats_t linearize;
+ u64_stats_t q_busy;
+ u64_stats_t skb_drops;
+ u64_stats_t dma_map_errs;
+};
+
+struct idpf_cleaned_stats {
+ u32 packets;
+ u32 bytes;
+};
+
+union idpf_queue_stats {
+ struct idpf_rx_queue_stats rx;
+ struct idpf_tx_queue_stats tx;
+};
+
+#define IDPF_ITR_DYNAMIC 1
+#define IDPF_ITR_MAX 0x1FE0
+#define IDPF_ITR_20K 0x0032
+#define IDPF_ITR_GRAN_S 1 /* Assume ITR granularity is 2us */
+#define IDPF_ITR_MASK 0x1FFE /* ITR register value alignment mask */
+#define ITR_REG_ALIGN(setting) ((setting) & IDPF_ITR_MASK)
+#define IDPF_ITR_IS_DYNAMIC(itr_mode) (itr_mode)
+#define IDPF_ITR_TX_DEF IDPF_ITR_20K
+#define IDPF_ITR_RX_DEF IDPF_ITR_20K
+/* Index used for 'No ITR' update in DYN_CTL register */
+#define IDPF_NO_ITR_UPDATE_IDX 3
+#define IDPF_ITR_IDX_SPACING(spacing, dflt) (spacing ? spacing : dflt)
+#define IDPF_DIM_DEFAULT_PROFILE_IX 1
+
+/**
+ * struct idpf_queue
+ * @dev: Device back pointer for DMA mapping
+ * @vport: Back pointer to associated vport
+ * @txq_grp: See struct idpf_txq_group
+ * @rxq_grp: See struct idpf_rxq_group
+ * @idx: For buffer queue, it is used as group id, either 0 or 1. On clean,
+ * buffer queue uses this index to determine which group of refill queues
+ * to clean.
+ * For TX queue, it is used as index to map between TX queue group and
+ * hot path TX pointers stored in vport. Used in both singleq/splitq.
+ * For RX queue, it is used to index to total RX queue across groups and
+ * used for skb reporting.
+ * @tail: Tail offset. Used for both queue models single and split. In splitq
+ * model relevant only for TX queue and RX queue.
+ * @tx_buf: See struct idpf_tx_buf
+ * @rx_buf: Struct with RX buffer related members
+ * @rx_buf.buf: See struct idpf_rx_buf
+ * @rx_buf.hdr_buf_pa: DMA handle
+ * @rx_buf.hdr_buf_va: Virtual address
+ * @pp: Page pool pointer
+ * @skb: Pointer to the skb
+ * @q_type: Queue type (TX, RX, TX completion, RX buffer)
+ * @q_id: Queue id
+ * @desc_count: Number of descriptors
+ * @next_to_use: Next descriptor to use. Relevant in both split & single txq
+ * and bufq.
+ * @next_to_clean: Next descriptor to clean. In split queue model, only
+ * relevant to TX completion queue and RX queue.
+ * @next_to_alloc: RX buffer to allocate at. Used only for RX. In splitq model
+ * only relevant to RX queue.
+ * @flags: See enum idpf_queue_flags_t
+ * @q_stats: See union idpf_queue_stats
+ * @stats_sync: See struct u64_stats_sync
+ * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
+ * the TX completion queue, it can be for any TXQ associated
+ * with that completion queue. This means we can clean up to
+ * N TXQs during a single call to clean the completion queue.
+ * cleaned_bytes|pkts tracks the clean stats per TXQ during
+ * that single call to clean the completion queue. By doing so,
+ * we can update BQL with aggregate cleaned stats for each TXQ
+ * only once at the end of the cleaning routine.
+ * @cleaned_pkts: Number of packets cleaned for the above said case
+ * @rx_hsplit_en: RX headsplit enable
+ * @rx_hbuf_size: Header buffer size
+ * @rx_buf_size: Buffer size
+ * @rx_max_pkt_size: RX max packet size
+ * @rx_buf_stride: RX buffer stride
+ * @rx_buffer_low_watermark: RX buffer low watermark
+ * @rxdids: Supported RX descriptor ids
+ * @q_vector: Backreference to associated vector
+ * @size: Length of descriptor ring in bytes
+ * @dma: Physical address of ring
+ * @desc_ring: Descriptor ring memory
+ * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
+ * @tx_min_pkt_len: Min supported packet length
+ * @num_completions: Only relevant for TX completion queue. It tracks the
+ * number of completions received to compare against the
+ * number of completions pending, as accumulated by the
+ * TX queues.
+ * @buf_stack: Stack of empty buffers to store buffer info for out of order
+ * buffer completions. See struct idpf_buf_lifo.
+ * @compl_tag_bufid_m: Completion tag buffer id mask
+ * @compl_tag_gen_s: Completion tag generation bit
+ * The format of the completion tag will change based on the TXQ
+ * descriptor ring size so that we can maintain roughly the same level
+ * of "uniqueness" across all descriptor sizes. For example, if the
+ * TXQ descriptor ring size is 64 (the minimum size supported), the
+ * completion tag will be formatted as below:
+ * 15 6 5 0
+ * --------------------------------
+ * | GEN=0-1023 |IDX = 0-63|
+ * --------------------------------
+ *
+ * This gives us 64*1024 = 65536 possible unique values. Similarly, if
+ * the TXQ descriptor ring size is 8160 (the maximum size supported),
+ * the completion tag will be formatted as below:
+ * 15 13 12 0
+ * --------------------------------
+ * |GEN | IDX = 0-8159 |
+ * --------------------------------
+ *
+ * This gives us 8*8160 = 65280 possible unique values.
+ * @compl_tag_cur_gen: Used to keep track of current completion tag generation
+ * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
+ * @sched_buf_hash: Hash table to stores buffers
+ */
+struct idpf_queue {
+ struct device *dev;
+ struct idpf_vport *vport;
+ union {
+ struct idpf_txq_group *txq_grp;
+ struct idpf_rxq_group *rxq_grp;
+ };
+ u16 idx;
+ void __iomem *tail;
+ union {
+ struct idpf_tx_buf *tx_buf;
+ struct {
+ struct idpf_rx_buf *buf;
+ dma_addr_t hdr_buf_pa;
+ void *hdr_buf_va;
+ } rx_buf;
+ };
+ struct page_pool *pp;
+ struct sk_buff *skb;
+ u16 q_type;
+ u32 q_id;
+ u16 desc_count;
+
+ u16 next_to_use;
+ u16 next_to_clean;
+ u16 next_to_alloc;
+ DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
+
+ union idpf_queue_stats q_stats;
+ struct u64_stats_sync stats_sync;
+
+ u32 cleaned_bytes;
+ u16 cleaned_pkts;
+
+ bool rx_hsplit_en;
+ u16 rx_hbuf_size;
+ u16 rx_buf_size;
+ u16 rx_max_pkt_size;
+ u16 rx_buf_stride;
+ u8 rx_buffer_low_watermark;
+ u64 rxdids;
+ struct idpf_q_vector *q_vector;
+ unsigned int size;
+ dma_addr_t dma;
+ void *desc_ring;
+
+ u16 tx_max_bufs;
+ u8 tx_min_pkt_len;
+
+ u32 num_completions;
+
+ struct idpf_buf_lifo buf_stack;
+
+ u16 compl_tag_bufid_m;
+ u16 compl_tag_gen_s;
+
+ u16 compl_tag_cur_gen;
+ u16 compl_tag_gen_max;
+
+ DECLARE_HASHTABLE(sched_buf_hash, 12);
+} ____cacheline_internodealigned_in_smp;
+
+/**
+ * struct idpf_sw_queue
+ * @next_to_clean: Next descriptor to clean
+ * @next_to_alloc: Buffer to allocate at
+ * @flags: See enum idpf_queue_flags_t
+ * @ring: Pointer to the ring
+ * @desc_count: Descriptor count
+ * @dev: Device back pointer for DMA mapping
+ *
+ * Software queues are used in splitq mode to manage buffers between rxq
+ * producer and the bufq consumer. These are required in order to maintain a
+ * lockless buffer management system and are strictly software only constructs.
+ */
+struct idpf_sw_queue {
+ u16 next_to_clean;
+ u16 next_to_alloc;
+ DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
+ u16 *ring;
+ u16 desc_count;
+ struct device *dev;
+} ____cacheline_internodealigned_in_smp;
+
+/**
+ * struct idpf_rxq_set
+ * @rxq: RX queue
+ * @refillq0: Pointer to refill queue 0
+ * @refillq1: Pointer to refill queue 1
+ *
+ * Splitq only. idpf_rxq_set associates an rxq with at an array of refillqs.
+ * Each rxq needs a refillq to return used buffers back to the respective bufq.
+ * Bufqs then clean these refillqs for buffers to give to hardware.
+ */
+struct idpf_rxq_set {
+ struct idpf_queue rxq;
+ struct idpf_sw_queue *refillq0;
+ struct idpf_sw_queue *refillq1;
+};
+
+/**
+ * struct idpf_bufq_set
+ * @bufq: Buffer queue
+ * @num_refillqs: Number of refill queues. This is always equal to num_rxq_sets
+ * in idpf_rxq_group.
+ * @refillqs: Pointer to refill queues array.
+ *
+ * Splitq only. idpf_bufq_set associates a bufq to an array of refillqs.
+ * In this bufq_set, there will be one refillq for each rxq in this rxq_group.
+ * Used buffers received by rxqs will be put on refillqs which bufqs will
+ * clean to return new buffers back to hardware.
+ *
+ * Buffers needed by some number of rxqs associated in this rxq_group are
+ * managed by at most two bufqs (depending on performance configuration).
+ */
+struct idpf_bufq_set {
+ struct idpf_queue bufq;
+ int num_refillqs;
+ struct idpf_sw_queue *refillqs;
+};
+
+/**
+ * struct idpf_rxq_group
+ * @vport: Vport back pointer
+ * @singleq: Struct with single queue related members
+ * @singleq.num_rxq: Number of RX queues associated
+ * @singleq.rxqs: Array of RX queue pointers
+ * @splitq: Struct with split queue related members
+ * @splitq.num_rxq_sets: Number of RX queue sets
+ * @splitq.rxq_sets: Array of RX queue sets
+ * @splitq.bufq_sets: Buffer queue set pointer
+ *
+ * In singleq mode, an rxq_group is simply an array of rxqs. In splitq, a
+ * rxq_group contains all the rxqs, bufqs and refillqs needed to
+ * manage buffers in splitq mode.
+ */
+struct idpf_rxq_group {
+ struct idpf_vport *vport;
+
+ union {
+ struct {
+ u16 num_rxq;
+ struct idpf_queue *rxqs[IDPF_LARGE_MAX_Q];
+ } singleq;
+ struct {
+ u16 num_rxq_sets;
+ struct idpf_rxq_set *rxq_sets[IDPF_LARGE_MAX_Q];
+ struct idpf_bufq_set *bufq_sets;
+ } splitq;
+ };
+};
+
+/**
+ * struct idpf_txq_group
+ * @vport: Vport back pointer
+ * @num_txq: Number of TX queues associated
+ * @txqs: Array of TX queue pointers
+ * @complq: Associated completion queue pointer, split queue only
+ * @num_completions_pending: Total number of completions pending for the
+ * completion queue, acculumated for all TX queues
+ * associated with that completion queue.
+ *
+ * Between singleq and splitq, a txq_group is largely the same except for the
+ * complq. In splitq a single complq is responsible for handling completions
+ * for some number of txqs associated in this txq_group.
+ */
+struct idpf_txq_group {
+ struct idpf_vport *vport;
+
+ u16 num_txq;
+ struct idpf_queue *txqs[IDPF_LARGE_MAX_Q];
+
+ struct idpf_queue *complq;
+
+ u32 num_completions_pending;
+};
+
+/**
+ * idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag
+ * @size: transmit request size in bytes
+ *
+ * In the case where a large frag (>= 16K) needs to be split across multiple
+ * descriptors, we need to assume that we can have no more than 12K of data
+ * per descriptor due to hardware alignment restrictions (4K alignment).
+ */
+static inline u32 idpf_size_to_txd_count(unsigned int size)
+{
+ return DIV_ROUND_UP(size, IDPF_TX_MAX_DESC_DATA_ALIGNED);
+}
+
+/**
+ * idpf_tx_singleq_build_ctob - populate command tag offset and size
+ * @td_cmd: Command to be filled in desc
+ * @td_offset: Offset to be filled in desc
+ * @size: Size of the buffer
+ * @td_tag: td tag to be filled
+ *
+ * Returns the 64 bit value populated with the input parameters
+ */
+static inline __le64 idpf_tx_singleq_build_ctob(u64 td_cmd, u64 td_offset,
+ unsigned int size, u64 td_tag)
+{
+ return cpu_to_le64(IDPF_TX_DESC_DTYPE_DATA |
+ (td_cmd << IDPF_TXD_QW1_CMD_S) |
+ (td_offset << IDPF_TXD_QW1_OFFSET_S) |
+ ((u64)size << IDPF_TXD_QW1_TX_BUF_SZ_S) |
+ (td_tag << IDPF_TXD_QW1_L2TAG1_S));
+}
+
+void idpf_tx_splitq_build_ctb(union idpf_tx_flex_desc *desc,
+ struct idpf_tx_splitq_params *params,
+ u16 td_cmd, u16 size);
+void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
+ struct idpf_tx_splitq_params *params,
+ u16 td_cmd, u16 size);
+/**
+ * idpf_tx_splitq_build_desc - determine which type of data descriptor to build
+ * @desc: descriptor to populate
+ * @params: pointer to tx params struct
+ * @td_cmd: command to be filled in desc
+ * @size: size of buffer
+ */
+static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc,
+ struct idpf_tx_splitq_params *params,
+ u16 td_cmd, u16 size)
+{
+ if (params->dtype == IDPF_TX_DESC_DTYPE_FLEX_L2TAG1_L2TAG2)
+ idpf_tx_splitq_build_ctb(desc, params, td_cmd, size);
+ else
+ idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size);
+}
+
+/**
+ * idpf_alloc_page - Allocate a new RX buffer from the page pool
+ * @pool: page_pool to allocate from
+ * @buf: metadata struct to populate with page info
+ * @buf_size: 2K or 4K
+ *
+ * Returns &dma_addr_t to be passed to HW for Rx, %DMA_MAPPING_ERROR otherwise.
+ */
+static inline dma_addr_t idpf_alloc_page(struct page_pool *pool,
+ struct idpf_rx_buf *buf,
+ unsigned int buf_size)
+{
+ if (buf_size == IDPF_RX_BUF_2048)
+ buf->page = page_pool_dev_alloc_frag(pool, &buf->page_offset,
+ buf_size);
+ else
+ buf->page = page_pool_dev_alloc_pages(pool);
+
+ if (!buf->page)
+ return DMA_MAPPING_ERROR;
+
+ buf->truesize = buf_size;
+
+ return page_pool_get_dma_addr(buf->page) + buf->page_offset +
+ pool->p.offset;
+}
+
+/**
+ * idpf_rx_put_page - Return RX buffer page to pool
+ * @rx_buf: RX buffer metadata struct
+ */
+static inline void idpf_rx_put_page(struct idpf_rx_buf *rx_buf)
+{
+ page_pool_put_page(rx_buf->page->pp, rx_buf->page,
+ rx_buf->truesize, true);
+ rx_buf->page = NULL;
+}
+
+/**
+ * idpf_rx_sync_for_cpu - Synchronize DMA buffer
+ * @rx_buf: RX buffer metadata struct
+ * @len: frame length from descriptor
+ */
+static inline void idpf_rx_sync_for_cpu(struct idpf_rx_buf *rx_buf, u32 len)
+{
+ struct page *page = rx_buf->page;
+ struct page_pool *pp = page->pp;
+
+ dma_sync_single_range_for_cpu(pp->p.dev,
+ page_pool_get_dma_addr(page),
+ rx_buf->page_offset + pp->p.offset, len,
+ page_pool_get_dma_dir(pp));
+}
+
+int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
+void idpf_vport_init_num_qs(struct idpf_vport *vport,
+ struct virtchnl2_create_vport *vport_msg);
+void idpf_vport_calc_num_q_desc(struct idpf_vport *vport);
+int idpf_vport_calc_total_qs(struct idpf_adapter *adapter, u16 vport_index,
+ struct virtchnl2_create_vport *vport_msg,
+ struct idpf_vport_max_q *max_q);
+void idpf_vport_calc_num_q_groups(struct idpf_vport *vport);
+int idpf_vport_queues_alloc(struct idpf_vport *vport);
+void idpf_vport_queues_rel(struct idpf_vport *vport);
+void idpf_vport_intr_rel(struct idpf_vport *vport);
+int idpf_vport_intr_alloc(struct idpf_vport *vport);
+void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
+void idpf_vport_intr_deinit(struct idpf_vport *vport);
+int idpf_vport_intr_init(struct idpf_vport *vport);
+enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded);
+int idpf_config_rss(struct idpf_vport *vport);
+int idpf_init_rss(struct idpf_vport *vport);
+void idpf_deinit_rss(struct idpf_vport *vport);
+int idpf_rx_bufs_init_all(struct idpf_vport *vport);
+void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
+ unsigned int size);
+struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq,
+ struct idpf_rx_buf *rx_buf,
+ unsigned int size);
+bool idpf_init_rx_buf_hw_alloc(struct idpf_queue *rxq, struct idpf_rx_buf *buf);
+void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val);
+void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
+ bool xmit_more);
+unsigned int idpf_size_to_txd_count(unsigned int size);
+netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb);
+void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
+ struct idpf_tx_buf *first, u16 ring_idx);
+unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
+ struct sk_buff *skb);
+bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
+ unsigned int count);
+int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size);
+void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
+netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
+ struct net_device *netdev);
+netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb,
+ struct net_device *netdev);
+bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rxq,
+ u16 cleaned_count);
+int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
+
+#endif /* !_IDPF_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
new file mode 100644
index 000000000000..8ade4e3a9fe1
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2023 Intel Corporation */
+
+#include "idpf.h"
+#include "idpf_lan_vf_regs.h"
+
+#define IDPF_VF_ITR_IDX_SPACING 0x40
+
+/**
+ * idpf_vf_ctlq_reg_init - initialize default mailbox registers
+ * @cq: pointer to the array of create control queues
+ */
+static void idpf_vf_ctlq_reg_init(struct idpf_ctlq_create_info *cq)
+{
+ int i;
+
+ for (i = 0; i < IDPF_NUM_DFLT_MBX_Q; i++) {
+ struct idpf_ctlq_create_info *ccq = cq + i;
+
+ switch (ccq->type) {
+ case IDPF_CTLQ_TYPE_MAILBOX_TX:
+ /* set head and tail registers in our local struct */
+ ccq->reg.head = VF_ATQH;
+ ccq->reg.tail = VF_ATQT;
+ ccq->reg.len = VF_ATQLEN;
+ ccq->reg.bah = VF_ATQBAH;
+ ccq->reg.bal = VF_ATQBAL;
+ ccq->reg.len_mask = VF_ATQLEN_ATQLEN_M;
+ ccq->reg.len_ena_mask = VF_ATQLEN_ATQENABLE_M;
+ ccq->reg.head_mask = VF_ATQH_ATQH_M;
+ break;
+ case IDPF_CTLQ_TYPE_MAILBOX_RX:
+ /* set head and tail registers in our local struct */
+ ccq->reg.head = VF_ARQH;
+ ccq->reg.tail = VF_ARQT;
+ ccq->reg.len = VF_ARQLEN;
+ ccq->reg.bah = VF_ARQBAH;
+ ccq->reg.bal = VF_ARQBAL;
+ ccq->reg.len_mask = VF_ARQLEN_ARQLEN_M;
+ ccq->reg.len_ena_mask = VF_ARQLEN_ARQENABLE_M;
+ ccq->reg.head_mask = VF_ARQH_ARQH_M;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/**
+ * idpf_vf_mb_intr_reg_init - Initialize the mailbox register
+ * @adapter: adapter structure
+ */
+static void idpf_vf_mb_intr_reg_init(struct idpf_adapter *adapter)
+{
+ struct idpf_intr_reg *intr = &adapter->mb_vector.intr_reg;
+ u32 dyn_ctl = le32_to_cpu(adapter->caps.mailbox_dyn_ctl);
+
+ intr->dyn_ctl = idpf_get_reg_addr(adapter, dyn_ctl);
+ intr->dyn_ctl_intena_m = VF_INT_DYN_CTL0_INTENA_M;
+ intr->dyn_ctl_itridx_m = VF_INT_DYN_CTL0_ITR_INDX_M;
+ intr->icr_ena = idpf_get_reg_addr(adapter, VF_INT_ICR0_ENA1);
+ intr->icr_ena_ctlq_m = VF_INT_ICR0_ENA1_ADMINQ_M;
+}
+
+/**
+ * idpf_vf_intr_reg_init - Initialize interrupt registers
+ * @vport: virtual port structure
+ */
+static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ int num_vecs = vport->num_q_vectors;
+ struct idpf_vec_regs *reg_vals;
+ int num_regs, i, err = 0;
+ u32 rx_itr, tx_itr;
+ u16 total_vecs;
+
+ total_vecs = idpf_get_reserved_vecs(vport->adapter);
+ reg_vals = kcalloc(total_vecs, sizeof(struct idpf_vec_regs),
+ GFP_KERNEL);
+ if (!reg_vals)
+ return -ENOMEM;
+
+ num_regs = idpf_get_reg_intr_vecs(vport, reg_vals);
+ if (num_regs < num_vecs) {
+ err = -EINVAL;
+ goto free_reg_vals;
+ }
+
+ for (i = 0; i < num_vecs; i++) {
+ struct idpf_q_vector *q_vector = &vport->q_vectors[i];
+ u16 vec_id = vport->q_vector_idxs[i] - IDPF_MBX_Q_VEC;
+ struct idpf_intr_reg *intr = &q_vector->intr_reg;
+ u32 spacing;
+
+ intr->dyn_ctl = idpf_get_reg_addr(adapter,
+ reg_vals[vec_id].dyn_ctl_reg);
+ intr->dyn_ctl_intena_m = VF_INT_DYN_CTLN_INTENA_M;
+ intr->dyn_ctl_itridx_s = VF_INT_DYN_CTLN_ITR_INDX_S;
+
+ spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
+ IDPF_VF_ITR_IDX_SPACING);
+ rx_itr = VF_INT_ITRN_ADDR(VIRTCHNL2_ITR_IDX_0,
+ reg_vals[vec_id].itrn_reg,
+ spacing);
+ tx_itr = VF_INT_ITRN_ADDR(VIRTCHNL2_ITR_IDX_1,
+ reg_vals[vec_id].itrn_reg,
+ spacing);
+ intr->rx_itr = idpf_get_reg_addr(adapter, rx_itr);
+ intr->tx_itr = idpf_get_reg_addr(adapter, tx_itr);
+ }
+
+free_reg_vals:
+ kfree(reg_vals);
+
+ return err;
+}
+
+/**
+ * idpf_vf_reset_reg_init - Initialize reset registers
+ * @adapter: Driver specific private structure
+ */
+static void idpf_vf_reset_reg_init(struct idpf_adapter *adapter)
+{
+ adapter->reset_reg.rstat = idpf_get_reg_addr(adapter, VFGEN_RSTAT);
+ adapter->reset_reg.rstat_m = VFGEN_RSTAT_VFR_STATE_M;
+}
+
+/**
+ * idpf_vf_trigger_reset - trigger reset
+ * @adapter: Driver specific private structure
+ * @trig_cause: Reason to trigger a reset
+ */
+static void idpf_vf_trigger_reset(struct idpf_adapter *adapter,
+ enum idpf_flags trig_cause)
+{
+ /* Do not send VIRTCHNL2_OP_RESET_VF message on driver unload */
+ if (trig_cause == IDPF_HR_FUNC_RESET &&
+ !test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
+ idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL);
+}
+
+/**
+ * idpf_vf_reg_ops_init - Initialize register API function pointers
+ * @adapter: Driver specific private structure
+ */
+static void idpf_vf_reg_ops_init(struct idpf_adapter *adapter)
+{
+ adapter->dev_ops.reg_ops.ctlq_reg_init = idpf_vf_ctlq_reg_init;
+ adapter->dev_ops.reg_ops.intr_reg_init = idpf_vf_intr_reg_init;
+ adapter->dev_ops.reg_ops.mb_intr_reg_init = idpf_vf_mb_intr_reg_init;
+ adapter->dev_ops.reg_ops.reset_reg_init = idpf_vf_reset_reg_init;
+ adapter->dev_ops.reg_ops.trigger_reset = idpf_vf_trigger_reset;
+}
+
+/**
+ * idpf_vf_dev_ops_init - Initialize device API function pointers
+ * @adapter: Driver specific private structure
+ */
+void idpf_vf_dev_ops_init(struct idpf_adapter *adapter)
+{
+ idpf_vf_reg_ops_init(adapter);
+}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
new file mode 100644
index 000000000000..2c1b051fdc0d
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -0,0 +1,3798 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2023 Intel Corporation */
+
+#include "idpf.h"
+
+/**
+ * idpf_recv_event_msg - Receive virtchnl event message
+ * @vport: virtual port structure
+ * @ctlq_msg: message to copy from
+ *
+ * Receive virtchnl event message
+ */
+static void idpf_recv_event_msg(struct idpf_vport *vport,
+ struct idpf_ctlq_msg *ctlq_msg)
+{
+ struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ struct virtchnl2_event *v2e;
+ bool link_status;
+ u32 event;
+
+ v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va;
+ event = le32_to_cpu(v2e->event);
+
+ switch (event) {
+ case VIRTCHNL2_EVENT_LINK_CHANGE:
+ vport->link_speed_mbps = le32_to_cpu(v2e->link_speed);
+ link_status = v2e->link_status;
+
+ if (vport->link_up == link_status)
+ break;
+
+ vport->link_up = link_status;
+ if (np->state == __IDPF_VPORT_UP) {
+ if (vport->link_up) {
+ netif_carrier_on(vport->netdev);
+ netif_tx_start_all_queues(vport->netdev);
+ } else {
+ netif_tx_stop_all_queues(vport->netdev);
+ netif_carrier_off(vport->netdev);
+ }
+ }
+ break;
+ default:
+ dev_err(&vport->adapter->pdev->dev,
+ "Unknown event %d from PF\n", event);
+ break;
+ }
+}
+
+/**
+ * idpf_mb_clean - Reclaim the send mailbox queue entries
+ * @adapter: Driver specific private structure
+ *
+ * Reclaim the send mailbox queue entries to be used to send further messages
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int idpf_mb_clean(struct idpf_adapter *adapter)
+{
+ u16 i, num_q_msg = IDPF_DFLT_MBX_Q_LEN;
+ struct idpf_ctlq_msg **q_msg;
+ struct idpf_dma_mem *dma_mem;
+ int err;
+
+ q_msg = kcalloc(num_q_msg, sizeof(struct idpf_ctlq_msg *), GFP_ATOMIC);
+ if (!q_msg)
+ return -ENOMEM;
+
+ err = idpf_ctlq_clean_sq(adapter->hw.asq, &num_q_msg, q_msg);
+ if (err)
+ goto err_kfree;
+
+ for (i = 0; i < num_q_msg; i++) {
+ if (!q_msg[i])
+ continue;
+ dma_mem = q_msg[i]->ctx.indirect.payload;
+ if (dma_mem)
+ dma_free_coherent(&adapter->pdev->dev, dma_mem->size,
+ dma_mem->va, dma_mem->pa);
+ kfree(q_msg[i]);
+ kfree(dma_mem);
+ }
+
+err_kfree:
+ kfree(q_msg);
+
+ return err;
+}
+
+/**
+ * idpf_send_mb_msg - Send message over mailbox
+ * @adapter: Driver specific private structure
+ * @op: virtchnl opcode
+ * @msg_size: size of the payload
+ * @msg: pointer to buffer holding the payload
+ *
+ * Will prepare the control queue message and initiates the send api
+ *
+ * Returns 0 on success, negative on failure
+ */
+int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
+ u16 msg_size, u8 *msg)
+{
+ struct idpf_ctlq_msg *ctlq_msg;
+ struct idpf_dma_mem *dma_mem;
+ int err;
+
+ /* If we are here and a reset is detected nothing much can be
+ * done. This thread should silently abort and expected to
+ * be corrected with a new run either by user or driver
+ * flows after reset
+ */
+ if (idpf_is_reset_detected(adapter))
+ return 0;
+
+ err = idpf_mb_clean(adapter);
+ if (err)
+ return err;
+
+ ctlq_msg = kzalloc(sizeof(*ctlq_msg), GFP_ATOMIC);
+ if (!ctlq_msg)
+ return -ENOMEM;
+
+ dma_mem = kzalloc(sizeof(*dma_mem), GFP_ATOMIC);
+ if (!dma_mem) {
+ err = -ENOMEM;
+ goto dma_mem_error;
+ }
+
+ ctlq_msg->opcode = idpf_mbq_opc_send_msg_to_cp;
+ ctlq_msg->func_id = 0;
+ ctlq_msg->data_len = msg_size;
+ ctlq_msg->cookie.mbx.chnl_opcode = op;
+ ctlq_msg->cookie.mbx.chnl_retval = 0;
+ dma_mem->size = IDPF_CTLQ_MAX_BUF_LEN;
+ dma_mem->va = dma_alloc_coherent(&adapter->pdev->dev, dma_mem->size,
+ &dma_mem->pa, GFP_ATOMIC);
+ if (!dma_mem->va) {
+ err = -ENOMEM;
+ goto dma_alloc_error;
+ }
+ memcpy(dma_mem->va, msg, msg_size);
+ ctlq_msg->ctx.indirect.payload = dma_mem;
+
+ err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
+ if (err)
+ goto send_error;
+
+ return 0;
+
+send_error:
+ dma_free_coherent(&adapter->pdev->dev, dma_mem->size, dma_mem->va,
+ dma_mem->pa);
+dma_alloc_error:
+ kfree(dma_mem);
+dma_mem_error:
+ kfree(ctlq_msg);
+
+ return err;
+}
+
+/**
+ * idpf_find_vport - Find vport pointer from control queue message
+ * @adapter: driver specific private structure
+ * @vport: address of vport pointer to copy the vport from adapters vport list
+ * @ctlq_msg: control queue message
+ *
+ * Return 0 on success, error value on failure. Also this function does check
+ * for the opcodes which expect to receive payload and return error value if
+ * it is not the case.
+ */
+static int idpf_find_vport(struct idpf_adapter *adapter,
+ struct idpf_vport **vport,
+ struct idpf_ctlq_msg *ctlq_msg)
+{
+ bool no_op = false, vid_found = false;
+ int i, err = 0;
+ char *vc_msg;
+ u32 v_id;
+
+ vc_msg = kcalloc(IDPF_CTLQ_MAX_BUF_LEN, sizeof(char), GFP_KERNEL);
+ if (!vc_msg)
+ return -ENOMEM;
+
+ if (ctlq_msg->data_len) {
+ size_t payload_size = ctlq_msg->ctx.indirect.payload->size;
+
+ if (!payload_size) {
+ dev_err(&adapter->pdev->dev, "Failed to receive payload buffer\n");
+ kfree(vc_msg);
+
+ return -EINVAL;
+ }
+
+ memcpy(vc_msg, ctlq_msg->ctx.indirect.payload->va,
+ min_t(size_t, payload_size, IDPF_CTLQ_MAX_BUF_LEN));
+ }
+
+ switch (ctlq_msg->cookie.mbx.chnl_opcode) {
+ case VIRTCHNL2_OP_VERSION:
+ case VIRTCHNL2_OP_GET_CAPS:
+ case VIRTCHNL2_OP_CREATE_VPORT:
+ case VIRTCHNL2_OP_SET_SRIOV_VFS:
+ case VIRTCHNL2_OP_ALLOC_VECTORS:
+ case VIRTCHNL2_OP_DEALLOC_VECTORS:
+ case VIRTCHNL2_OP_GET_PTYPE_INFO:
+ goto free_vc_msg;
+ case VIRTCHNL2_OP_ENABLE_VPORT:
+ case VIRTCHNL2_OP_DISABLE_VPORT:
+ case VIRTCHNL2_OP_DESTROY_VPORT:
+ v_id = le32_to_cpu(((struct virtchnl2_vport *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
+ v_id = le32_to_cpu(((struct virtchnl2_config_tx_queues *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
+ v_id = le32_to_cpu(((struct virtchnl2_config_rx_queues *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_ENABLE_QUEUES:
+ case VIRTCHNL2_OP_DISABLE_QUEUES:
+ case VIRTCHNL2_OP_DEL_QUEUES:
+ v_id = le32_to_cpu(((struct virtchnl2_del_ena_dis_queues *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_ADD_QUEUES:
+ v_id = le32_to_cpu(((struct virtchnl2_add_queues *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
+ case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
+ v_id = le32_to_cpu(((struct virtchnl2_queue_vector_maps *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_GET_STATS:
+ v_id = le32_to_cpu(((struct virtchnl2_vport_stats *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_GET_RSS_LUT:
+ case VIRTCHNL2_OP_SET_RSS_LUT:
+ v_id = le32_to_cpu(((struct virtchnl2_rss_lut *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_GET_RSS_KEY:
+ case VIRTCHNL2_OP_SET_RSS_KEY:
+ v_id = le32_to_cpu(((struct virtchnl2_rss_key *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_EVENT:
+ v_id = le32_to_cpu(((struct virtchnl2_event *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_LOOPBACK:
+ v_id = le32_to_cpu(((struct virtchnl2_loopback *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE:
+ v_id = le32_to_cpu(((struct virtchnl2_promisc_info *)vc_msg)->vport_id);
+ break;
+ case VIRTCHNL2_OP_ADD_MAC_ADDR:
+ case VIRTCHNL2_OP_DEL_MAC_ADDR:
+ v_id = le32_to_cpu(((struct virtchnl2_mac_addr_list *)vc_msg)->vport_id);
+ break;
+ default:
+ no_op = true;
+ break;
+ }
+
+ if (no_op)
+ goto free_vc_msg;
+
+ for (i = 0; i < idpf_get_max_vports(adapter); i++) {
+ if (adapter->vport_ids[i] == v_id) {
+ vid_found = true;
+ break;
+ }
+ }
+
+ if (vid_found)
+ *vport = adapter->vports[i];
+ else
+ err = -EINVAL;
+
+free_vc_msg:
+ kfree(vc_msg);
+
+ return err;
+}
+
+/**
+ * idpf_copy_data_to_vc_buf - Copy the virtchnl response data into the buffer.
+ * @adapter: driver specific private structure
+ * @vport: virtual port structure
+ * @ctlq_msg: msg to copy from
+ * @err_enum: err bit to set on error
+ *
+ * Copies the payload from ctlq_msg into virtchnl buffer. Returns 0 on success,
+ * negative on failure.
+ */
+static int idpf_copy_data_to_vc_buf(struct idpf_adapter *adapter,
+ struct idpf_vport *vport,
+ struct idpf_ctlq_msg *ctlq_msg,
+ enum idpf_vport_vc_state err_enum)
+{
+ if (ctlq_msg->cookie.mbx.chnl_retval) {
+ if (vport)
+ set_bit(err_enum, vport->vc_state);
+ else
+ set_bit(err_enum, adapter->vc_state);
+
+ return -EINVAL;
+ }
+
+ if (vport)
+ memcpy(vport->vc_msg, ctlq_msg->ctx.indirect.payload->va,
+ min_t(int, ctlq_msg->ctx.indirect.payload->size,
+ IDPF_CTLQ_MAX_BUF_LEN));
+ else
+ memcpy(adapter->vc_msg, ctlq_msg->ctx.indirect.payload->va,
+ min_t(int, ctlq_msg->ctx.indirect.payload->size,
+ IDPF_CTLQ_MAX_BUF_LEN));
+
+ return 0;
+}
+
+/**
+ * idpf_recv_vchnl_op - helper function with common logic when handling the
+ * reception of VIRTCHNL OPs.
+ * @adapter: driver specific private structure
+ * @vport: virtual port structure
+ * @ctlq_msg: msg to copy from
+ * @state: state bit used on timeout check
+ * @err_state: err bit to set on error
+ */
+static void idpf_recv_vchnl_op(struct idpf_adapter *adapter,
+ struct idpf_vport *vport,
+ struct idpf_ctlq_msg *ctlq_msg,
+ enum idpf_vport_vc_state state,
+ enum idpf_vport_vc_state err_state)
+{
+ wait_queue_head_t *vchnl_wq;
+ int err;
+
+ if (vport)
+ vchnl_wq = &vport->vchnl_wq;
+ else
+ vchnl_wq = &adapter->vchnl_wq;
+
+ err = idpf_copy_data_to_vc_buf(adapter, vport, ctlq_msg, err_state);
+ if (wq_has_sleeper(vchnl_wq)) {
+ if (vport)
+ set_bit(state, vport->vc_state);
+ else
+ set_bit(state, adapter->vc_state);
+
+ wake_up(vchnl_wq);
+ } else {
+ if (!err) {
+ dev_warn(&adapter->pdev->dev, "opcode %d received without waiting thread\n",
+ ctlq_msg->cookie.mbx.chnl_opcode);
+ } else {
+ /* Clear the errors since there is no sleeper to pass
+ * them on
+ */
+ if (vport)
+ clear_bit(err_state, vport->vc_state);
+ else
+ clear_bit(err_state, adapter->vc_state);
+ }
+ }
+}
+
+/**
+ * idpf_recv_mb_msg - Receive message over mailbox
+ * @adapter: Driver specific private structure
+ * @op: virtchannel operation code
+ * @msg: Received message holding buffer
+ * @msg_size: message size
+ *
+ * Will receive control queue message and posts the receive buffer. Returns 0
+ * on success and negative on failure.
+ */
+int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op,
+ void *msg, int msg_size)
+{
+ struct idpf_vport *vport = NULL;
+ struct idpf_ctlq_msg ctlq_msg;
+ struct idpf_dma_mem *dma_mem;
+ bool work_done = false;
+ int num_retry = 2000;
+ u16 num_q_msg;
+ int err;
+
+ while (1) {
+ struct idpf_vport_config *vport_config;
+ int payload_size = 0;
+
+ /* Try to get one message */
+ num_q_msg = 1;
+ dma_mem = NULL;
+ err = idpf_ctlq_recv(adapter->hw.arq, &num_q_msg, &ctlq_msg);
+ /* If no message then decide if we have to retry based on
+ * opcode
+ */
+ if (err || !num_q_msg) {
+ /* Increasing num_retry to consider the delayed
+ * responses because of large number of VF's mailbox
+ * messages. If the mailbox message is received from
+ * the other side, we come out of the sleep cycle
+ * immediately else we wait for more time.
+ */
+ if (!op || !num_retry--)
+ break;
+ if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
+ err = -EIO;
+ break;
+ }
+ msleep(20);
+ continue;
+ }
+
+ /* If we are here a message is received. Check if we are looking
+ * for a specific message based on opcode. If it is different
+ * ignore and post buffers
+ */
+ if (op && ctlq_msg.cookie.mbx.chnl_opcode != op)
+ goto post_buffs;
+
+ err = idpf_find_vport(adapter, &vport, &ctlq_msg);
+ if (err)
+ goto post_buffs;
+
+ if (ctlq_msg.data_len)
+ payload_size = ctlq_msg.ctx.indirect.payload->size;
+
+ /* All conditions are met. Either a message requested is
+ * received or we received a message to be processed
+ */
+ switch (ctlq_msg.cookie.mbx.chnl_opcode) {
+ case VIRTCHNL2_OP_VERSION:
+ case VIRTCHNL2_OP_GET_CAPS:
+ if (ctlq_msg.cookie.mbx.chnl_retval) {
+ dev_err(&adapter->pdev->dev, "Failure initializing, vc op: %u retval: %u\n",
+ ctlq_msg.cookie.mbx.chnl_opcode,
+ ctlq_msg.cookie.mbx.chnl_retval);
+ err = -EBADMSG;
+ } else if (msg) {
+ memcpy(msg, ctlq_msg.ctx.indirect.payload->va,
+ min_t(int, payload_size, msg_size));
+ }
+ work_done = true;
+ break;
+ case VIRTCHNL2_OP_CREATE_VPORT:
+ idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
+ IDPF_VC_CREATE_VPORT,
+ IDPF_VC_CREATE_VPORT_ERR);
+ break;
+ case VIRTCHNL2_OP_ENABLE_VPORT:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_ENA_VPORT,
+ IDPF_VC_ENA_VPORT_ERR);
+ break;
+ case VIRTCHNL2_OP_DISABLE_VPORT:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_DIS_VPORT,
+ IDPF_VC_DIS_VPORT_ERR);
+ break;
+ case VIRTCHNL2_OP_DESTROY_VPORT:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_DESTROY_VPORT,
+ IDPF_VC_DESTROY_VPORT_ERR);
+ break;
+ case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_CONFIG_TXQ,
+ IDPF_VC_CONFIG_TXQ_ERR);
+ break;
+ case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_CONFIG_RXQ,
+ IDPF_VC_CONFIG_RXQ_ERR);
+ break;
+ case VIRTCHNL2_OP_ENABLE_QUEUES:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_ENA_QUEUES,
+ IDPF_VC_ENA_QUEUES_ERR);
+ break;
+ case VIRTCHNL2_OP_DISABLE_QUEUES:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_DIS_QUEUES,
+ IDPF_VC_DIS_QUEUES_ERR);
+ break;
+ case VIRTCHNL2_OP_ADD_QUEUES:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_ADD_QUEUES,
+ IDPF_VC_ADD_QUEUES_ERR);
+ break;
+ case VIRTCHNL2_OP_DEL_QUEUES:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_DEL_QUEUES,
+ IDPF_VC_DEL_QUEUES_ERR);
+ break;
+ case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_MAP_IRQ,
+ IDPF_VC_MAP_IRQ_ERR);
+ break;
+ case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_UNMAP_IRQ,
+ IDPF_VC_UNMAP_IRQ_ERR);
+ break;
+ case VIRTCHNL2_OP_GET_STATS:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_GET_STATS,
+ IDPF_VC_GET_STATS_ERR);
+ break;
+ case VIRTCHNL2_OP_GET_RSS_LUT:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_GET_RSS_LUT,
+ IDPF_VC_GET_RSS_LUT_ERR);
+ break;
+ case VIRTCHNL2_OP_SET_RSS_LUT:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_SET_RSS_LUT,
+ IDPF_VC_SET_RSS_LUT_ERR);
+ break;
+ case VIRTCHNL2_OP_GET_RSS_KEY:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_GET_RSS_KEY,
+ IDPF_VC_GET_RSS_KEY_ERR);
+ break;
+ case VIRTCHNL2_OP_SET_RSS_KEY:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_SET_RSS_KEY,
+ IDPF_VC_SET_RSS_KEY_ERR);
+ break;
+ case VIRTCHNL2_OP_SET_SRIOV_VFS:
+ idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
+ IDPF_VC_SET_SRIOV_VFS,
+ IDPF_VC_SET_SRIOV_VFS_ERR);
+ break;
+ case VIRTCHNL2_OP_ALLOC_VECTORS:
+ idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
+ IDPF_VC_ALLOC_VECTORS,
+ IDPF_VC_ALLOC_VECTORS_ERR);
+ break;
+ case VIRTCHNL2_OP_DEALLOC_VECTORS:
+ idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
+ IDPF_VC_DEALLOC_VECTORS,
+ IDPF_VC_DEALLOC_VECTORS_ERR);
+ break;
+ case VIRTCHNL2_OP_GET_PTYPE_INFO:
+ idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
+ IDPF_VC_GET_PTYPE_INFO,
+ IDPF_VC_GET_PTYPE_INFO_ERR);
+ break;
+ case VIRTCHNL2_OP_LOOPBACK:
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_LOOPBACK_STATE,
+ IDPF_VC_LOOPBACK_STATE_ERR);
+ break;
+ case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE:
+ /* This message can only be sent asynchronously. As
+ * such we'll have lost the context in which it was
+ * called and thus can only really report if it looks
+ * like an error occurred. Don't bother setting ERR bit
+ * or waking chnl_wq since no work queue will be waiting
+ * to read the message.
+ */
+ if (ctlq_msg.cookie.mbx.chnl_retval) {
+ dev_err(&adapter->pdev->dev, "Failed to set promiscuous mode: %d\n",
+ ctlq_msg.cookie.mbx.chnl_retval);
+ }
+ break;
+ case VIRTCHNL2_OP_ADD_MAC_ADDR:
+ vport_config = adapter->vport_config[vport->idx];
+ if (test_and_clear_bit(IDPF_VPORT_ADD_MAC_REQ,
+ vport_config->flags)) {
+ /* Message was sent asynchronously. We don't
+ * normally print errors here, instead
+ * prefer to handle errors in the function
+ * calling wait_for_event. However, if
+ * asynchronous, the context in which the
+ * message was sent is lost. We can't really do
+ * anything about at it this point, but we
+ * should at a minimum indicate that it looks
+ * like something went wrong. Also don't bother
+ * setting ERR bit or waking vchnl_wq since no
+ * one will be waiting to read the async
+ * message.
+ */
+ if (ctlq_msg.cookie.mbx.chnl_retval)
+ dev_err(&adapter->pdev->dev, "Failed to add MAC address: %d\n",
+ ctlq_msg.cookie.mbx.chnl_retval);
+ break;
+ }
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_ADD_MAC_ADDR,
+ IDPF_VC_ADD_MAC_ADDR_ERR);
+ break;
+ case VIRTCHNL2_OP_DEL_MAC_ADDR:
+ vport_config = adapter->vport_config[vport->idx];
+ if (test_and_clear_bit(IDPF_VPORT_DEL_MAC_REQ,
+ vport_config->flags)) {
+ /* Message was sent asynchronously like the
+ * VIRTCHNL2_OP_ADD_MAC_ADDR
+ */
+ if (ctlq_msg.cookie.mbx.chnl_retval)
+ dev_err(&adapter->pdev->dev, "Failed to delete MAC address: %d\n",
+ ctlq_msg.cookie.mbx.chnl_retval);
+ break;
+ }
+ idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
+ IDPF_VC_DEL_MAC_ADDR,
+ IDPF_VC_DEL_MAC_ADDR_ERR);
+ break;
+ case VIRTCHNL2_OP_EVENT:
+ idpf_recv_event_msg(vport, &ctlq_msg);
+ break;
+ default:
+ dev_warn(&adapter->pdev->dev,
+ "Unhandled virtchnl response %d\n",
+ ctlq_msg.cookie.mbx.chnl_opcode);
+ break;
+ }
+
+post_buffs:
+ if (ctlq_msg.data_len)
+ dma_mem = ctlq_msg.ctx.indirect.payload;
+ else
+ num_q_msg = 0;
+
+ err = idpf_ctlq_post_rx_buffs(&adapter->hw, adapter->hw.arq,
+ &num_q_msg, &dma_mem);
+ /* If post failed clear the only buffer we supplied */
+ if (err && dma_mem)
+ dma_free_coherent(&adapter->pdev->dev, dma_mem->size,
+ dma_mem->va, dma_mem->pa);
+
+ /* Applies only if we are looking for a specific opcode */
+ if (work_done)
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * __idpf_wait_for_event - wrapper function for wait on virtchannel response
+ * @adapter: Driver private data structure
+ * @vport: virtual port structure
+ * @state: check on state upon timeout
+ * @err_check: check if this specific error bit is set
+ * @timeout: Max time to wait
+ *
+ * Checks if state is set upon expiry of timeout. Returns 0 on success,
+ * negative on failure.
+ */
+static int __idpf_wait_for_event(struct idpf_adapter *adapter,
+ struct idpf_vport *vport,
+ enum idpf_vport_vc_state state,
+ enum idpf_vport_vc_state err_check,
+ int timeout)
+{
+ int time_to_wait, num_waits;
+ wait_queue_head_t *vchnl_wq;
+ unsigned long *vc_state;
+
+ time_to_wait = ((timeout <= IDPF_MAX_WAIT) ? timeout : IDPF_MAX_WAIT);
+ num_waits = ((timeout <= IDPF_MAX_WAIT) ? 1 : timeout / IDPF_MAX_WAIT);
+
+ if (vport) {
+ vchnl_wq = &vport->vchnl_wq;
+ vc_state = vport->vc_state;
+ } else {
+ vchnl_wq = &adapter->vchnl_wq;
+ vc_state = adapter->vc_state;
+ }
+
+ while (num_waits) {
+ int event;
+
+ /* If we are here and a reset is detected do not wait but
+ * return. Reset timing is out of drivers control. So
+ * while we are cleaning resources as part of reset if the
+ * underlying HW mailbox is gone, wait on mailbox messages
+ * is not meaningful
+ */
+ if (idpf_is_reset_detected(adapter))
+ return 0;
+
+ event = wait_event_timeout(*vchnl_wq,
+ test_and_clear_bit(state, vc_state),
+ msecs_to_jiffies(time_to_wait));
+ if (event) {
+ if (test_and_clear_bit(err_check, vc_state)) {
+ dev_err(&adapter->pdev->dev, "VC response error %s\n",
+ idpf_vport_vc_state_str[err_check]);
+
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+ num_waits--;
+ }
+
+ /* Timeout occurred */
+ dev_err(&adapter->pdev->dev, "VC timeout, state = %s\n",
+ idpf_vport_vc_state_str[state]);
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * idpf_min_wait_for_event - wait for virtchannel response
+ * @adapter: Driver private data structure
+ * @vport: virtual port structure
+ * @state: check on state upon timeout
+ * @err_check: check if this specific error bit is set
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int idpf_min_wait_for_event(struct idpf_adapter *adapter,
+ struct idpf_vport *vport,
+ enum idpf_vport_vc_state state,
+ enum idpf_vport_vc_state err_check)
+{
+ return __idpf_wait_for_event(adapter, vport, state, err_check,
+ IDPF_WAIT_FOR_EVENT_TIMEO_MIN);
+}
+
+/**
+ * idpf_wait_for_event - wait for virtchannel response
+ * @adapter: Driver private data structure
+ * @vport: virtual port structure
+ * @state: check on state upon timeout after 500ms
+ * @err_check: check if this specific error bit is set
+ *
+ * Returns 0 on success, negative on failure.
+ */
+static int idpf_wait_for_event(struct idpf_adapter *adapter,
+ struct idpf_vport *vport,
+ enum idpf_vport_vc_state state,
+ enum idpf_vport_vc_state err_check)
+{
+ /* Increasing the timeout in __IDPF_INIT_SW flow to consider large
+ * number of VF's mailbox message responses. When a message is received
+ * on mailbox, this thread is woken up by the idpf_recv_mb_msg before
+ * the timeout expires. Only in the error case i.e. if no message is
+ * received on mailbox, we wait for the complete timeout which is
+ * less likely to happen.
+ */
+ return __idpf_wait_for_event(adapter, vport, state, err_check,
+ IDPF_WAIT_FOR_EVENT_TIMEO);
+}
+
+/**
+ * idpf_wait_for_marker_event - wait for software marker response
+ * @vport: virtual port data structure
+ *
+ * Returns 0 success, negative on failure.
+ **/
+static int idpf_wait_for_marker_event(struct idpf_vport *vport)
+{
+ int event;
+ int i;
+
+ for (i = 0; i < vport->num_txq; i++)
+ set_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags);
+
+ event = wait_event_timeout(vport->sw_marker_wq,
+ test_and_clear_bit(IDPF_VPORT_SW_MARKER,
+ vport->flags),
+ msecs_to_jiffies(500));
+
+ for (i = 0; i < vport->num_txq; i++)
+ clear_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
+
+ if (event)
+ return 0;
+
+ dev_warn(&vport->adapter->pdev->dev, "Failed to receive marker packets\n");
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * idpf_send_ver_msg - send virtchnl version message
+ * @adapter: Driver specific private structure
+ *
+ * Send virtchnl version message. Returns 0 on success, negative on failure.
+ */
+static int idpf_send_ver_msg(struct idpf_adapter *adapter)
+{
+ struct virtchnl2_version_info vvi;
+
+ if (adapter->virt_ver_maj) {
+ vvi.major = cpu_to_le32(adapter->virt_ver_maj);
+ vvi.minor = cpu_to_le32(adapter->virt_ver_min);
+ } else {
+ vvi.major = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MAJOR);
+ vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR);
+ }
+
+ return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_VERSION, sizeof(vvi),
+ (u8 *)&vvi);
+}
+
+/**
+ * idpf_recv_ver_msg - Receive virtchnl version message
+ * @adapter: Driver specific private structure
+ *
+ * Receive virtchnl version message. Returns 0 on success, -EAGAIN if we need
+ * to send version message again, otherwise negative on failure.
+ */
+static int idpf_recv_ver_msg(struct idpf_adapter *adapter)
+{
+ struct virtchnl2_version_info vvi;
+ u32 major, minor;
+ int err;
+
+ err = idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_VERSION, &vvi,
+ sizeof(vvi));
+ if (err)
+ return err;
+
+ major = le32_to_cpu(vvi.major);
+ minor = le32_to_cpu(vvi.minor);
+
+ if (major > IDPF_VIRTCHNL_VERSION_MAJOR) {
+ dev_warn(&adapter->pdev->dev,
+ "Virtchnl major version (%d) greater than supported\n",
+ major);
+
+ return -EINVAL;
+ }
+
+ if (major == IDPF_VIRTCHNL_VERSION_MAJOR &&
+ minor > IDPF_VIRTCHNL_VERSION_MINOR)
+ dev_warn(&adapter->pdev->dev,
+ "Virtchnl minor version (%d) didn't match\n", minor);
+
+ /* If we have a mismatch, resend version to update receiver on what
+ * version we will use.
+ */
+ if (!adapter->virt_ver_maj &&
+ major != IDPF_VIRTCHNL_VERSION_MAJOR &&
+ minor != IDPF_VIRTCHNL_VERSION_MINOR)
+ err = -EAGAIN;
+
+ adapter->virt_ver_maj = major;
+ adapter->virt_ver_min = minor;
+
+ return err;
+}
+
+/**
+ * idpf_send_get_caps_msg - Send virtchnl get capabilities message
+ * @adapter: Driver specific private structure
+ *
+ * Send virtchl get capabilities message. Returns 0 on success, negative on
+ * failure.
+ */
+static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
+{
+ struct virtchnl2_get_capabilities caps = { };
+
+ caps.csum_caps =
+ cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 |
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP |
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP |
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP |
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP |
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP |
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP |
+ VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 |
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP |
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP |
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP |
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP |
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP |
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP |
+ VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL |
+ VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL |
+ VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL |
+ VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL |
+ VIRTCHNL2_CAP_RX_CSUM_GENERIC);
+
+ caps.seg_caps =
+ cpu_to_le32(VIRTCHNL2_CAP_SEG_IPV4_TCP |
+ VIRTCHNL2_CAP_SEG_IPV4_UDP |
+ VIRTCHNL2_CAP_SEG_IPV4_SCTP |
+ VIRTCHNL2_CAP_SEG_IPV6_TCP |
+ VIRTCHNL2_CAP_SEG_IPV6_UDP |
+ VIRTCHNL2_CAP_SEG_IPV6_SCTP |
+ VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL);
+
+ caps.rss_caps =
+ cpu_to_le64(VIRTCHNL2_CAP_RSS_IPV4_TCP |
+ VIRTCHNL2_CAP_RSS_IPV4_UDP |
+ VIRTCHNL2_CAP_RSS_IPV4_SCTP |
+ VIRTCHNL2_CAP_RSS_IPV4_OTHER |
+ VIRTCHNL2_CAP_RSS_IPV6_TCP |
+ VIRTCHNL2_CAP_RSS_IPV6_UDP |
+ VIRTCHNL2_CAP_RSS_IPV6_SCTP |
+ VIRTCHNL2_CAP_RSS_IPV6_OTHER);
+
+ caps.hsplit_caps =
+ cpu_to_le32(VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 |
+ VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6);
+
+ caps.rsc_caps =
+ cpu_to_le32(VIRTCHNL2_CAP_RSC_IPV4_TCP |
+ VIRTCHNL2_CAP_RSC_IPV6_TCP);
+
+ caps.other_caps =
+ cpu_to_le64(VIRTCHNL2_CAP_SRIOV |
+ VIRTCHNL2_CAP_MACFILTER |
+ VIRTCHNL2_CAP_SPLITQ_QSCHED |
+ VIRTCHNL2_CAP_PROMISC |
+ VIRTCHNL2_CAP_LOOPBACK);
+
+ return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, sizeof(caps),
+ (u8 *)&caps);
+}
+
+/**
+ * idpf_recv_get_caps_msg - Receive virtchnl get capabilities message
+ * @adapter: Driver specific private structure
+ *
+ * Receive virtchnl get capabilities message. Returns 0 on success, negative on
+ * failure.
+ */
+static int idpf_recv_get_caps_msg(struct idpf_adapter *adapter)
+{
+ return idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, &adapter->caps,
+ sizeof(struct virtchnl2_get_capabilities));
+}
+
+/**
+ * idpf_vport_alloc_max_qs - Allocate max queues for a vport
+ * @adapter: Driver specific private structure
+ * @max_q: vport max queue structure
+ */
+int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q)
+{
+ struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
+ struct virtchnl2_get_capabilities *caps = &adapter->caps;
+ u16 default_vports = idpf_get_default_vports(adapter);
+ int max_rx_q, max_tx_q;
+
+ mutex_lock(&adapter->queue_lock);
+
+ max_rx_q = le16_to_cpu(caps->max_rx_q) / default_vports;
+ max_tx_q = le16_to_cpu(caps->max_tx_q) / default_vports;
+ if (adapter->num_alloc_vports < default_vports) {
+ max_q->max_rxq = min_t(u16, max_rx_q, IDPF_MAX_Q);
+ max_q->max_txq = min_t(u16, max_tx_q, IDPF_MAX_Q);
+ } else {
+ max_q->max_rxq = IDPF_MIN_Q;
+ max_q->max_txq = IDPF_MIN_Q;
+ }
+ max_q->max_bufq = max_q->max_rxq * IDPF_MAX_BUFQS_PER_RXQ_GRP;
+ max_q->max_complq = max_q->max_txq;
+
+ if (avail_queues->avail_rxq < max_q->max_rxq ||
+ avail_queues->avail_txq < max_q->max_txq ||
+ avail_queues->avail_bufq < max_q->max_bufq ||
+ avail_queues->avail_complq < max_q->max_complq) {
+ mutex_unlock(&adapter->queue_lock);
+
+ return -EINVAL;
+ }
+
+ avail_queues->avail_rxq -= max_q->max_rxq;
+ avail_queues->avail_txq -= max_q->max_txq;
+ avail_queues->avail_bufq -= max_q->max_bufq;
+ avail_queues->avail_complq -= max_q->max_complq;
+
+ mutex_unlock(&adapter->queue_lock);
+
+ return 0;
+}
+
+/**
+ * idpf_vport_dealloc_max_qs - Deallocate max queues of a vport
+ * @adapter: Driver specific private structure
+ * @max_q: vport max queue structure
+ */
+void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q)
+{
+ struct idpf_avail_queue_info *avail_queues;
+
+ mutex_lock(&adapter->queue_lock);
+ avail_queues = &adapter->avail_queues;
+
+ avail_queues->avail_rxq += max_q->max_rxq;
+ avail_queues->avail_txq += max_q->max_txq;
+ avail_queues->avail_bufq += max_q->max_bufq;
+ avail_queues->avail_complq += max_q->max_complq;
+
+ mutex_unlock(&adapter->queue_lock);
+}
+
+/**
+ * idpf_init_avail_queues - Initialize available queues on the device
+ * @adapter: Driver specific private structure
+ */
+static void idpf_init_avail_queues(struct idpf_adapter *adapter)
+{
+ struct idpf_avail_queue_info *avail_queues = &adapter->avail_queues;
+ struct virtchnl2_get_capabilities *caps = &adapter->caps;
+
+ avail_queues->avail_rxq = le16_to_cpu(caps->max_rx_q);
+ avail_queues->avail_txq = le16_to_cpu(caps->max_tx_q);
+ avail_queues->avail_bufq = le16_to_cpu(caps->max_rx_bufq);
+ avail_queues->avail_complq = le16_to_cpu(caps->max_tx_complq);
+}
+
+/**
+ * idpf_get_reg_intr_vecs - Get vector queue register offset
+ * @vport: virtual port structure
+ * @reg_vals: Register offsets to store in
+ *
+ * Returns number of registers that got populated
+ */
+int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
+ struct idpf_vec_regs *reg_vals)
+{
+ struct virtchnl2_vector_chunks *chunks;
+ struct idpf_vec_regs reg_val;
+ u16 num_vchunks, num_vec;
+ int num_regs = 0, i, j;
+
+ chunks = &vport->adapter->req_vec_chunks->vchunks;
+ num_vchunks = le16_to_cpu(chunks->num_vchunks);
+
+ for (j = 0; j < num_vchunks; j++) {
+ struct virtchnl2_vector_chunk *chunk;
+ u32 dynctl_reg_spacing;
+ u32 itrn_reg_spacing;
+
+ chunk = &chunks->vchunks[j];
+ num_vec = le16_to_cpu(chunk->num_vectors);
+ reg_val.dyn_ctl_reg = le32_to_cpu(chunk->dynctl_reg_start);
+ reg_val.itrn_reg = le32_to_cpu(chunk->itrn_reg_start);
+ reg_val.itrn_index_spacing = le32_to_cpu(chunk->itrn_index_spacing);
+
+ dynctl_reg_spacing = le32_to_cpu(chunk->dynctl_reg_spacing);
+ itrn_reg_spacing = le32_to_cpu(chunk->itrn_reg_spacing);
+
+ for (i = 0; i < num_vec; i++) {
+ reg_vals[num_regs].dyn_ctl_reg = reg_val.dyn_ctl_reg;
+ reg_vals[num_regs].itrn_reg = reg_val.itrn_reg;
+ reg_vals[num_regs].itrn_index_spacing =
+ reg_val.itrn_index_spacing;
+
+ reg_val.dyn_ctl_reg += dynctl_reg_spacing;
+ reg_val.itrn_reg += itrn_reg_spacing;
+ num_regs++;
+ }
+ }
+
+ return num_regs;
+}
+
+/**
+ * idpf_vport_get_q_reg - Get the queue registers for the vport
+ * @reg_vals: register values needing to be set
+ * @num_regs: amount we expect to fill
+ * @q_type: queue model
+ * @chunks: queue regs received over mailbox
+ *
+ * This function parses the queue register offsets from the queue register
+ * chunk information, with a specific queue type and stores it into the array
+ * passed as an argument. It returns the actual number of queue registers that
+ * are filled.
+ */
+static int idpf_vport_get_q_reg(u32 *reg_vals, int num_regs, u32 q_type,
+ struct virtchnl2_queue_reg_chunks *chunks)
+{
+ u16 num_chunks = le16_to_cpu(chunks->num_chunks);
+ int reg_filled = 0, i;
+ u32 reg_val;
+
+ while (num_chunks--) {
+ struct virtchnl2_queue_reg_chunk *chunk;
+ u16 num_q;
+
+ chunk = &chunks->chunks[num_chunks];
+ if (le32_to_cpu(chunk->type) != q_type)
+ continue;
+
+ num_q = le32_to_cpu(chunk->num_queues);
+ reg_val = le64_to_cpu(chunk->qtail_reg_start);
+ for (i = 0; i < num_q && reg_filled < num_regs ; i++) {
+ reg_vals[reg_filled++] = reg_val;
+ reg_val += le32_to_cpu(chunk->qtail_reg_spacing);
+ }
+ }
+
+ return reg_filled;
+}
+
+/**
+ * __idpf_queue_reg_init - initialize queue registers
+ * @vport: virtual port structure
+ * @reg_vals: registers we are initializing
+ * @num_regs: how many registers there are in total
+ * @q_type: queue model
+ *
+ * Return number of queues that are initialized
+ */
+static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
+ int num_regs, u32 q_type)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_queue *q;
+ int i, j, k = 0;
+
+ switch (q_type) {
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+
+ for (j = 0; j < tx_qgrp->num_txq && k < num_regs; j++, k++)
+ tx_qgrp->txqs[j]->tail =
+ idpf_get_reg_addr(adapter, reg_vals[k]);
+ }
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u16 num_rxq = rx_qgrp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq && k < num_regs; j++, k++) {
+ q = rx_qgrp->singleq.rxqs[j];
+ q->tail = idpf_get_reg_addr(adapter,
+ reg_vals[k]);
+ }
+ }
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u8 num_bufqs = vport->num_bufqs_per_qgrp;
+
+ for (j = 0; j < num_bufqs && k < num_regs; j++, k++) {
+ q = &rx_qgrp->splitq.bufq_sets[j].bufq;
+ q->tail = idpf_get_reg_addr(adapter,
+ reg_vals[k]);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return k;
+}
+
+/**
+ * idpf_queue_reg_init - initialize queue registers
+ * @vport: virtual port structure
+ *
+ * Return 0 on success, negative on failure
+ */
+int idpf_queue_reg_init(struct idpf_vport *vport)
+{
+ struct virtchnl2_create_vport *vport_params;
+ struct virtchnl2_queue_reg_chunks *chunks;
+ struct idpf_vport_config *vport_config;
+ u16 vport_idx = vport->idx;
+ int num_regs, ret = 0;
+ u32 *reg_vals;
+
+ /* We may never deal with more than 256 same type of queues */
+ reg_vals = kzalloc(sizeof(void *) * IDPF_LARGE_MAX_Q, GFP_KERNEL);
+ if (!reg_vals)
+ return -ENOMEM;
+
+ vport_config = vport->adapter->vport_config[vport_idx];
+ if (vport_config->req_qs_chunks) {
+ struct virtchnl2_add_queues *vc_aq =
+ (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
+ chunks = &vc_aq->chunks;
+ } else {
+ vport_params = vport->adapter->vport_params_recvd[vport_idx];
+ chunks = &vport_params->chunks;
+ }
+
+ /* Initialize Tx queue tail register address */
+ num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
+ VIRTCHNL2_QUEUE_TYPE_TX,
+ chunks);
+ if (num_regs < vport->num_txq) {
+ ret = -EINVAL;
+ goto free_reg_vals;
+ }
+
+ num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
+ VIRTCHNL2_QUEUE_TYPE_TX);
+ if (num_regs < vport->num_txq) {
+ ret = -EINVAL;
+ goto free_reg_vals;
+ }
+
+ /* Initialize Rx/buffer queue tail register address based on Rx queue
+ * model
+ */
+ if (idpf_is_queue_model_split(vport->rxq_model)) {
+ num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
+ VIRTCHNL2_QUEUE_TYPE_RX_BUFFER,
+ chunks);
+ if (num_regs < vport->num_bufq) {
+ ret = -EINVAL;
+ goto free_reg_vals;
+ }
+
+ num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
+ VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
+ if (num_regs < vport->num_bufq) {
+ ret = -EINVAL;
+ goto free_reg_vals;
+ }
+ } else {
+ num_regs = idpf_vport_get_q_reg(reg_vals, IDPF_LARGE_MAX_Q,
+ VIRTCHNL2_QUEUE_TYPE_RX,
+ chunks);
+ if (num_regs < vport->num_rxq) {
+ ret = -EINVAL;
+ goto free_reg_vals;
+ }
+
+ num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs,
+ VIRTCHNL2_QUEUE_TYPE_RX);
+ if (num_regs < vport->num_rxq) {
+ ret = -EINVAL;
+ goto free_reg_vals;
+ }
+ }
+
+free_reg_vals:
+ kfree(reg_vals);
+
+ return ret;
+}
+
+/**
+ * idpf_send_create_vport_msg - Send virtchnl create vport message
+ * @adapter: Driver specific private structure
+ * @max_q: vport max queue info
+ *
+ * send virtchnl creae vport message
+ *
+ * Returns 0 on success, negative on failure
+ */
+int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q)
+{
+ struct virtchnl2_create_vport *vport_msg;
+ u16 idx = adapter->next_vport;
+ int err, buf_size;
+
+ buf_size = sizeof(struct virtchnl2_create_vport);
+ if (!adapter->vport_params_reqd[idx]) {
+ adapter->vport_params_reqd[idx] = kzalloc(buf_size,
+ GFP_KERNEL);
+ if (!adapter->vport_params_reqd[idx])
+ return -ENOMEM;
+ }
+
+ vport_msg = adapter->vport_params_reqd[idx];
+ vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
+ vport_msg->vport_index = cpu_to_le16(idx);
+
+ if (adapter->req_tx_splitq)
+ vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
+ else
+ vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+
+ if (adapter->req_rx_splitq)
+ vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
+ else
+ vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
+
+ err = idpf_vport_calc_total_qs(adapter, idx, vport_msg, max_q);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Enough queues are not available");
+
+ return err;
+ }
+
+ mutex_lock(&adapter->vc_buf_lock);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CREATE_VPORT, buf_size,
+ (u8 *)vport_msg);
+ if (err)
+ goto rel_lock;
+
+ err = idpf_wait_for_event(adapter, NULL, IDPF_VC_CREATE_VPORT,
+ IDPF_VC_CREATE_VPORT_ERR);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to receive create vport message");
+
+ goto rel_lock;
+ }
+
+ if (!adapter->vport_params_recvd[idx]) {
+ adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN,
+ GFP_KERNEL);
+ if (!adapter->vport_params_recvd[idx]) {
+ err = -ENOMEM;
+ goto rel_lock;
+ }
+ }
+
+ vport_msg = adapter->vport_params_recvd[idx];
+ memcpy(vport_msg, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN);
+
+rel_lock:
+ mutex_unlock(&adapter->vc_buf_lock);
+
+ return err;
+}
+
+/**
+ * idpf_check_supported_desc_ids - Verify we have required descriptor support
+ * @vport: virtual port structure
+ *
+ * Return 0 on success, error on failure
+ */
+int idpf_check_supported_desc_ids(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_create_vport *vport_msg;
+ u64 rx_desc_ids, tx_desc_ids;
+
+ vport_msg = adapter->vport_params_recvd[vport->idx];
+
+ rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids);
+ tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids);
+
+ if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) {
+ dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n");
+ vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
+ }
+ } else {
+ if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M))
+ vport->base_rxd = true;
+ }
+
+ if (vport->txq_model != VIRTCHNL2_QUEUE_MODEL_SPLIT)
+ return 0;
+
+ if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) {
+ dev_info(&adapter->pdev->dev, "Minimum TX descriptor support not provided, using the default\n");
+ vport_msg->tx_desc_ids = cpu_to_le64(MIN_SUPPORT_TXDID);
+ }
+
+ return 0;
+}
+
+/**
+ * idpf_send_destroy_vport_msg - Send virtchnl destroy vport message
+ * @vport: virtual port data structure
+ *
+ * Send virtchnl destroy vport message. Returns 0 on success, negative on
+ * failure.
+ */
+int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_vport v_id;
+ int err;
+
+ v_id.vport_id = cpu_to_le32(vport->vport_id);
+
+ mutex_lock(&vport->vc_buf_lock);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DESTROY_VPORT,
+ sizeof(v_id), (u8 *)&v_id);
+ if (err)
+ goto rel_lock;
+
+ err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DESTROY_VPORT,
+ IDPF_VC_DESTROY_VPORT_ERR);
+
+rel_lock:
+ mutex_unlock(&vport->vc_buf_lock);
+
+ return err;
+}
+
+/**
+ * idpf_send_enable_vport_msg - Send virtchnl enable vport message
+ * @vport: virtual port data structure
+ *
+ * Send enable vport virtchnl message. Returns 0 on success, negative on
+ * failure.
+ */
+int idpf_send_enable_vport_msg(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_vport v_id;
+ int err;
+
+ v_id.vport_id = cpu_to_le32(vport->vport_id);
+
+ mutex_lock(&vport->vc_buf_lock);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ENABLE_VPORT,
+ sizeof(v_id), (u8 *)&v_id);
+ if (err)
+ goto rel_lock;
+
+ err = idpf_wait_for_event(adapter, vport, IDPF_VC_ENA_VPORT,
+ IDPF_VC_ENA_VPORT_ERR);
+
+rel_lock:
+ mutex_unlock(&vport->vc_buf_lock);
+
+ return err;
+}
+
+/**
+ * idpf_send_disable_vport_msg - Send virtchnl disable vport message
+ * @vport: virtual port data structure
+ *
+ * Send disable vport virtchnl message. Returns 0 on success, negative on
+ * failure.
+ */
+int idpf_send_disable_vport_msg(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_vport v_id;
+ int err;
+
+ v_id.vport_id = cpu_to_le32(vport->vport_id);
+
+ mutex_lock(&vport->vc_buf_lock);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DISABLE_VPORT,
+ sizeof(v_id), (u8 *)&v_id);
+ if (err)
+ goto rel_lock;
+
+ err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DIS_VPORT,
+ IDPF_VC_DIS_VPORT_ERR);
+
+rel_lock:
+ mutex_unlock(&vport->vc_buf_lock);
+
+ return err;
+}
+
+/**
+ * idpf_send_config_tx_queues_msg - Send virtchnl config tx queues message
+ * @vport: virtual port data structure
+ *
+ * Send config tx queues virtchnl message. Returns 0 on success, negative on
+ * failure.
+ */
+static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
+{
+ struct virtchnl2_config_tx_queues *ctq;
+ u32 config_sz, chunk_sz, buf_sz;
+ int totqs, num_msgs, num_chunks;
+ struct virtchnl2_txq_info *qi;
+ int err = 0, i, k = 0;
+
+ totqs = vport->num_txq + vport->num_complq;
+ qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL);
+ if (!qi)
+ return -ENOMEM;
+
+ /* Populate the queue info buffer with all queue context info */
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+ int j, sched_mode;
+
+ for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
+ qi[k].queue_id =
+ cpu_to_le32(tx_qgrp->txqs[j]->q_id);
+ qi[k].model =
+ cpu_to_le16(vport->txq_model);
+ qi[k].type =
+ cpu_to_le32(tx_qgrp->txqs[j]->q_type);
+ qi[k].ring_len =
+ cpu_to_le16(tx_qgrp->txqs[j]->desc_count);
+ qi[k].dma_ring_addr =
+ cpu_to_le64(tx_qgrp->txqs[j]->dma);
+ if (idpf_is_queue_model_split(vport->txq_model)) {
+ struct idpf_queue *q = tx_qgrp->txqs[j];
+
+ qi[k].tx_compl_queue_id =
+ cpu_to_le16(tx_qgrp->complq->q_id);
+ qi[k].relative_queue_id = cpu_to_le16(j);
+
+ if (test_bit(__IDPF_Q_FLOW_SCH_EN, q->flags))
+ qi[k].sched_mode =
+ cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW);
+ else
+ qi[k].sched_mode =
+ cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
+ } else {
+ qi[k].sched_mode =
+ cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_QUEUE);
+ }
+ }
+
+ if (!idpf_is_queue_model_split(vport->txq_model))
+ continue;
+
+ qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
+ qi[k].model = cpu_to_le16(vport->txq_model);
+ qi[k].type = cpu_to_le32(tx_qgrp->complq->q_type);
+ qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count);
+ qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma);
+
+ if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags))
+ sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+ else
+ sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
+ qi[k].sched_mode = cpu_to_le16(sched_mode);
+
+ k++;
+ }
+
+ /* Make sure accounting agrees */
+ if (k != totqs) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ /* Chunk up the queue contexts into multiple messages to avoid
+ * sending a control queue message buffer that is too large
+ */
+ config_sz = sizeof(struct virtchnl2_config_tx_queues);
+ chunk_sz = sizeof(struct virtchnl2_txq_info);
+
+ num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
+ totqs);
+ num_msgs = DIV_ROUND_UP(totqs, num_chunks);
+
+ buf_sz = struct_size(ctq, qinfo, num_chunks);
+ ctq = kzalloc(buf_sz, GFP_KERNEL);
+ if (!ctq) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ mutex_lock(&vport->vc_buf_lock);
+
+ for (i = 0, k = 0; i < num_msgs; i++) {
+ memset(ctq, 0, buf_sz);
+ ctq->vport_id = cpu_to_le32(vport->vport_id);
+ ctq->num_qinfo = cpu_to_le16(num_chunks);
+ memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks);
+
+ err = idpf_send_mb_msg(vport->adapter,
+ VIRTCHNL2_OP_CONFIG_TX_QUEUES,
+ buf_sz, (u8 *)ctq);
+ if (err)
+ goto mbx_error;
+
+ err = idpf_wait_for_event(vport->adapter, vport,
+ IDPF_VC_CONFIG_TXQ,
+ IDPF_VC_CONFIG_TXQ_ERR);
+ if (err)
+ goto mbx_error;
+
+ k += num_chunks;
+ totqs -= num_chunks;
+ num_chunks = min(num_chunks, totqs);
+ /* Recalculate buffer size */
+ buf_sz = struct_size(ctq, qinfo, num_chunks);
+ }
+
+mbx_error:
+ mutex_unlock(&vport->vc_buf_lock);
+ kfree(ctq);
+error:
+ kfree(qi);
+
+ return err;
+}
+
+/**
+ * idpf_send_config_rx_queues_msg - Send virtchnl config rx queues message
+ * @vport: virtual port data structure
+ *
+ * Send config rx queues virtchnl message. Returns 0 on success, negative on
+ * failure.
+ */
+static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
+{
+ struct virtchnl2_config_rx_queues *crq;
+ u32 config_sz, chunk_sz, buf_sz;
+ int totqs, num_msgs, num_chunks;
+ struct virtchnl2_rxq_info *qi;
+ int err = 0, i, k = 0;
+
+ totqs = vport->num_rxq + vport->num_bufq;
+ qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL);
+ if (!qi)
+ return -ENOMEM;
+
+ /* Populate the queue info buffer with all queue context info */
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u16 num_rxq;
+ int j;
+
+ if (!idpf_is_queue_model_split(vport->rxq_model))
+ goto setup_rxqs;
+
+ for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
+ struct idpf_queue *bufq =
+ &rx_qgrp->splitq.bufq_sets[j].bufq;
+
+ qi[k].queue_id = cpu_to_le32(bufq->q_id);
+ qi[k].model = cpu_to_le16(vport->rxq_model);
+ qi[k].type = cpu_to_le32(bufq->q_type);
+ qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
+ qi[k].ring_len = cpu_to_le16(bufq->desc_count);
+ qi[k].dma_ring_addr = cpu_to_le64(bufq->dma);
+ qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size);
+ qi[k].buffer_notif_stride = bufq->rx_buf_stride;
+ qi[k].rx_buffer_low_watermark =
+ cpu_to_le16(bufq->rx_buffer_low_watermark);
+ if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
+ qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
+ }
+
+setup_rxqs:
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+ else
+ num_rxq = rx_qgrp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq; j++, k++) {
+ struct idpf_queue *rxq;
+
+ if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ rxq = rx_qgrp->singleq.rxqs[j];
+ goto common_qi_fields;
+ }
+ rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ qi[k].rx_bufq1_id =
+ cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[0].bufq.q_id);
+ if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
+ qi[k].bufq2_ena = IDPF_BUFQ2_ENA;
+ qi[k].rx_bufq2_id =
+ cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[1].bufq.q_id);
+ }
+ qi[k].rx_buffer_low_watermark =
+ cpu_to_le16(rxq->rx_buffer_low_watermark);
+ if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
+ qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
+
+common_qi_fields:
+ if (rxq->rx_hsplit_en) {
+ qi[k].qflags |=
+ cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
+ qi[k].hdr_buffer_size =
+ cpu_to_le16(rxq->rx_hbuf_size);
+ }
+ qi[k].queue_id = cpu_to_le32(rxq->q_id);
+ qi[k].model = cpu_to_le16(vport->rxq_model);
+ qi[k].type = cpu_to_le32(rxq->q_type);
+ qi[k].ring_len = cpu_to_le16(rxq->desc_count);
+ qi[k].dma_ring_addr = cpu_to_le64(rxq->dma);
+ qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size);
+ qi[k].data_buffer_size = cpu_to_le32(rxq->rx_buf_size);
+ qi[k].qflags |=
+ cpu_to_le16(VIRTCHNL2_RX_DESC_SIZE_32BYTE);
+ qi[k].desc_ids = cpu_to_le64(rxq->rxdids);
+ }
+ }
+
+ /* Make sure accounting agrees */
+ if (k != totqs) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ /* Chunk up the queue contexts into multiple messages to avoid
+ * sending a control queue message buffer that is too large
+ */
+ config_sz = sizeof(struct virtchnl2_config_rx_queues);
+ chunk_sz = sizeof(struct virtchnl2_rxq_info);
+
+ num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
+ totqs);
+ num_msgs = DIV_ROUND_UP(totqs, num_chunks);
+
+ buf_sz = struct_size(crq, qinfo, num_chunks);
+ crq = kzalloc(buf_sz, GFP_KERNEL);
+ if (!crq) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ mutex_lock(&vport->vc_buf_lock);
+
+ for (i = 0, k = 0; i < num_msgs; i++) {
+ memset(crq, 0, buf_sz);
+ crq->vport_id = cpu_to_le32(vport->vport_id);
+ crq->num_qinfo = cpu_to_le16(num_chunks);
+ memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks);
+
+ err = idpf_send_mb_msg(vport->adapter,
+ VIRTCHNL2_OP_CONFIG_RX_QUEUES,
+ buf_sz, (u8 *)crq);
+ if (err)
+ goto mbx_error;
+
+ err = idpf_wait_for_event(vport->adapter, vport,
+ IDPF_VC_CONFIG_RXQ,
+ IDPF_VC_CONFIG_RXQ_ERR);
+ if (err)
+ goto mbx_error;
+
+ k += num_chunks;
+ totqs -= num_chunks;
+ num_chunks = min(num_chunks, totqs);
+ /* Recalculate buffer size */
+ buf_sz = struct_size(crq, qinfo, num_chunks);
+ }
+
+mbx_error:
+ mutex_unlock(&vport->vc_buf_lock);
+ kfree(crq);
+error:
+ kfree(qi);
+
+ return err;
+}
+
+/**
+ * idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable
+ * queues message
+ * @vport: virtual port data structure
+ * @vc_op: virtchnl op code to send
+ *
+ * Send enable or disable queues virtchnl message. Returns 0 on success,
+ * negative on failure.
+ */
+static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
+{
+ u32 num_msgs, num_chunks, num_txq, num_rxq, num_q;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_del_ena_dis_queues *eq;
+ struct virtchnl2_queue_chunks *qcs;
+ struct virtchnl2_queue_chunk *qc;
+ u32 config_sz, chunk_sz, buf_sz;
+ int i, j, k = 0, err = 0;
+
+ /* validate virtchnl op */
+ switch (vc_op) {
+ case VIRTCHNL2_OP_ENABLE_QUEUES:
+ case VIRTCHNL2_OP_DISABLE_QUEUES:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ num_txq = vport->num_txq + vport->num_complq;
+ num_rxq = vport->num_rxq + vport->num_bufq;
+ num_q = num_txq + num_rxq;
+ buf_sz = sizeof(struct virtchnl2_queue_chunk) * num_q;
+ qc = kzalloc(buf_sz, GFP_KERNEL);
+ if (!qc)
+ return -ENOMEM;
+
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+
+ for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
+ qc[k].type = cpu_to_le32(tx_qgrp->txqs[j]->q_type);
+ qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
+ qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
+ }
+ }
+ if (vport->num_txq != k) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ if (!idpf_is_queue_model_split(vport->txq_model))
+ goto setup_rx;
+
+ for (i = 0; i < vport->num_txq_grp; i++, k++) {
+ struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+
+ qc[k].type = cpu_to_le32(tx_qgrp->complq->q_type);
+ qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
+ qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
+ }
+ if (vport->num_complq != (k - vport->num_txq)) {
+ err = -EINVAL;
+ goto error;
+ }
+
+setup_rx:
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+ else
+ num_rxq = rx_qgrp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq; j++, k++) {
+ if (idpf_is_queue_model_split(vport->rxq_model)) {
+ qc[k].start_queue_id =
+ cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id);
+ qc[k].type =
+ cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_type);
+ } else {
+ qc[k].start_queue_id =
+ cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id);
+ qc[k].type =
+ cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_type);
+ }
+ qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
+ }
+ }
+ if (vport->num_rxq != k - (vport->num_txq + vport->num_complq)) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ if (!idpf_is_queue_model_split(vport->rxq_model))
+ goto send_msg;
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+
+ for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
+ struct idpf_queue *q;
+
+ q = &rx_qgrp->splitq.bufq_sets[j].bufq;
+ qc[k].type = cpu_to_le32(q->q_type);
+ qc[k].start_queue_id = cpu_to_le32(q->q_id);
+ qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
+ }
+ }
+ if (vport->num_bufq != k - (vport->num_txq +
+ vport->num_complq +
+ vport->num_rxq)) {
+ err = -EINVAL;
+ goto error;
+ }
+
+send_msg:
+ /* Chunk up the queue info into multiple messages */
+ config_sz = sizeof(struct virtchnl2_del_ena_dis_queues);
+ chunk_sz = sizeof(struct virtchnl2_queue_chunk);
+
+ num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
+ num_q);
+ num_msgs = DIV_ROUND_UP(num_q, num_chunks);
+
+ buf_sz = struct_size(eq, chunks.chunks, num_chunks);
+ eq = kzalloc(buf_sz, GFP_KERNEL);
+ if (!eq) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ mutex_lock(&vport->vc_buf_lock);
+
+ for (i = 0, k = 0; i < num_msgs; i++) {
+ memset(eq, 0, buf_sz);
+ eq->vport_id = cpu_to_le32(vport->vport_id);
+ eq->chunks.num_chunks = cpu_to_le16(num_chunks);
+ qcs = &eq->chunks;
+ memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks);
+
+ err = idpf_send_mb_msg(adapter, vc_op, buf_sz, (u8 *)eq);
+ if (err)
+ goto mbx_error;
+
+ if (vc_op == VIRTCHNL2_OP_ENABLE_QUEUES)
+ err = idpf_wait_for_event(adapter, vport,
+ IDPF_VC_ENA_QUEUES,
+ IDPF_VC_ENA_QUEUES_ERR);
+ else
+ err = idpf_min_wait_for_event(adapter, vport,
+ IDPF_VC_DIS_QUEUES,
+ IDPF_VC_DIS_QUEUES_ERR);
+ if (err)
+ goto mbx_error;
+
+ k += num_chunks;
+ num_q -= num_chunks;
+ num_chunks = min(num_chunks, num_q);
+ /* Recalculate buffer size */
+ buf_sz = struct_size(eq, chunks.chunks, num_chunks);
+ }
+
+mbx_error:
+ mutex_unlock(&vport->vc_buf_lock);
+ kfree(eq);
+error:
+ kfree(qc);
+
+ return err;
+}
+
+/**
+ * idpf_send_map_unmap_queue_vector_msg - Send virtchnl map or unmap queue
+ * vector message
+ * @vport: virtual port data structure
+ * @map: true for map and false for unmap
+ *
+ * Send map or unmap queue vector virtchnl message. Returns 0 on success,
+ * negative on failure.
+ */
+int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_queue_vector_maps *vqvm;
+ struct virtchnl2_queue_vector *vqv;
+ u32 config_sz, chunk_sz, buf_sz;
+ u32 num_msgs, num_chunks, num_q;
+ int i, j, k = 0, err = 0;
+
+ num_q = vport->num_txq + vport->num_rxq;
+
+ buf_sz = sizeof(struct virtchnl2_queue_vector) * num_q;
+ vqv = kzalloc(buf_sz, GFP_KERNEL);
+ if (!vqv)
+ return -ENOMEM;
+
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+
+ for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
+ vqv[k].queue_type = cpu_to_le32(tx_qgrp->txqs[j]->q_type);
+ vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
+
+ if (idpf_is_queue_model_split(vport->txq_model)) {
+ vqv[k].vector_id =
+ cpu_to_le16(tx_qgrp->complq->q_vector->v_idx);
+ vqv[k].itr_idx =
+ cpu_to_le32(tx_qgrp->complq->q_vector->tx_itr_idx);
+ } else {
+ vqv[k].vector_id =
+ cpu_to_le16(tx_qgrp->txqs[j]->q_vector->v_idx);
+ vqv[k].itr_idx =
+ cpu_to_le32(tx_qgrp->txqs[j]->q_vector->tx_itr_idx);
+ }
+ }
+ }
+
+ if (vport->num_txq != k) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u16 num_rxq;
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+ else
+ num_rxq = rx_qgrp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq; j++, k++) {
+ struct idpf_queue *rxq;
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ else
+ rxq = rx_qgrp->singleq.rxqs[j];
+
+ vqv[k].queue_type = cpu_to_le32(rxq->q_type);
+ vqv[k].queue_id = cpu_to_le32(rxq->q_id);
+ vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx);
+ vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx);
+ }
+ }
+
+ if (idpf_is_queue_model_split(vport->txq_model)) {
+ if (vport->num_rxq != k - vport->num_complq) {
+ err = -EINVAL;
+ goto error;
+ }
+ } else {
+ if (vport->num_rxq != k - vport->num_txq) {
+ err = -EINVAL;
+ goto error;
+ }
+ }
+
+ /* Chunk up the vector info into multiple messages */
+ config_sz = sizeof(struct virtchnl2_queue_vector_maps);
+ chunk_sz = sizeof(struct virtchnl2_queue_vector);
+
+ num_chunks = min_t(u32, IDPF_NUM_CHUNKS_PER_MSG(config_sz, chunk_sz),
+ num_q);
+ num_msgs = DIV_ROUND_UP(num_q, num_chunks);
+
+ buf_sz = struct_size(vqvm, qv_maps, num_chunks);
+ vqvm = kzalloc(buf_sz, GFP_KERNEL);
+ if (!vqvm) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ mutex_lock(&vport->vc_buf_lock);
+
+ for (i = 0, k = 0; i < num_msgs; i++) {
+ memset(vqvm, 0, buf_sz);
+ vqvm->vport_id = cpu_to_le32(vport->vport_id);
+ vqvm->num_qv_maps = cpu_to_le16(num_chunks);
+ memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks);
+
+ if (map) {
+ err = idpf_send_mb_msg(adapter,
+ VIRTCHNL2_OP_MAP_QUEUE_VECTOR,
+ buf_sz, (u8 *)vqvm);
+ if (!err)
+ err = idpf_wait_for_event(adapter, vport,
+ IDPF_VC_MAP_IRQ,
+ IDPF_VC_MAP_IRQ_ERR);
+ } else {
+ err = idpf_send_mb_msg(adapter,
+ VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR,
+ buf_sz, (u8 *)vqvm);
+ if (!err)
+ err =
+ idpf_min_wait_for_event(adapter, vport,
+ IDPF_VC_UNMAP_IRQ,
+ IDPF_VC_UNMAP_IRQ_ERR);
+ }
+ if (err)
+ goto mbx_error;
+
+ k += num_chunks;
+ num_q -= num_chunks;
+ num_chunks = min(num_chunks, num_q);
+ /* Recalculate buffer size */
+ buf_sz = struct_size(vqvm, qv_maps, num_chunks);
+ }
+
+mbx_error:
+ mutex_unlock(&vport->vc_buf_lock);
+ kfree(vqvm);
+error:
+ kfree(vqv);
+
+ return err;
+}
+
+/**
+ * idpf_send_enable_queues_msg - send enable queues virtchnl message
+ * @vport: Virtual port private data structure
+ *
+ * Will send enable queues virtchnl message. Returns 0 on success, negative on
+ * failure.
+ */
+int idpf_send_enable_queues_msg(struct idpf_vport *vport)
+{
+ return idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_ENABLE_QUEUES);
+}
+
+/**
+ * idpf_send_disable_queues_msg - send disable queues virtchnl message
+ * @vport: Virtual port private data structure
+ *
+ * Will send disable queues virtchnl message. Returns 0 on success, negative
+ * on failure.
+ */
+int idpf_send_disable_queues_msg(struct idpf_vport *vport)
+{
+ int err, i;
+
+ err = idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_DISABLE_QUEUES);
+ if (err)
+ return err;
+
+ /* switch to poll mode as interrupts will be disabled after disable
+ * queues virtchnl message is sent
+ */
+ for (i = 0; i < vport->num_txq; i++)
+ set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
+
+ /* schedule the napi to receive all the marker packets */
+ for (i = 0; i < vport->num_q_vectors; i++)
+ napi_schedule(&vport->q_vectors[i].napi);
+
+ return idpf_wait_for_marker_event(vport);
+}
+
+/**
+ * idpf_convert_reg_to_queue_chunks - Copy queue chunk information to the right
+ * structure
+ * @dchunks: Destination chunks to store data to
+ * @schunks: Source chunks to copy data from
+ * @num_chunks: number of chunks to copy
+ */
+static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchunks,
+ struct virtchnl2_queue_reg_chunk *schunks,
+ u16 num_chunks)
+{
+ u16 i;
+
+ for (i = 0; i < num_chunks; i++) {
+ dchunks[i].type = schunks[i].type;
+ dchunks[i].start_queue_id = schunks[i].start_queue_id;
+ dchunks[i].num_queues = schunks[i].num_queues;
+ }
+}
+
+/**
+ * idpf_send_delete_queues_msg - send delete queues virtchnl message
+ * @vport: Virtual port private data structure
+ *
+ * Will send delete queues virtchnl message. Return 0 on success, negative on
+ * failure.
+ */
+int idpf_send_delete_queues_msg(struct idpf_vport *vport)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_create_vport *vport_params;
+ struct virtchnl2_queue_reg_chunks *chunks;
+ struct virtchnl2_del_ena_dis_queues *eq;
+ struct idpf_vport_config *vport_config;
+ u16 vport_idx = vport->idx;
+ int buf_size, err;
+ u16 num_chunks;
+
+ vport_config = adapter->vport_config[vport_idx];
+ if (vport_config->req_qs_chunks) {
+ struct virtchnl2_add_queues *vc_aq =
+ (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
+ chunks = &vc_aq->chunks;
+ } else {
+ vport_params = adapter->vport_params_recvd[vport_idx];
+ chunks = &vport_params->chunks;
+ }
+
+ num_chunks = le16_to_cpu(chunks->num_chunks);
+ buf_size = struct_size(eq, chunks.chunks, num_chunks);
+
+ eq = kzalloc(buf_size, GFP_KERNEL);
+ if (!eq)
+ return -ENOMEM;
+
+ eq->vport_id = cpu_to_le32(vport->vport_id);
+ eq->chunks.num_chunks = cpu_to_le16(num_chunks);
+
+ idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks,
+ num_chunks);
+
+ mutex_lock(&vport->vc_buf_lock);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEL_QUEUES,
+ buf_size, (u8 *)eq);
+ if (err)
+ goto rel_lock;
+
+ err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DEL_QUEUES,
+ IDPF_VC_DEL_QUEUES_ERR);
+
+rel_lock:
+ mutex_unlock(&vport->vc_buf_lock);
+ kfree(eq);
+
+ return err;
+}
+
+/**
+ * idpf_send_config_queues_msg - Send config queues virtchnl message
+ * @vport: Virtual port private data structure
+ *
+ * Will send config queues virtchnl message. Returns 0 on success, negative on
+ * failure.
+ */
+int idpf_send_config_queues_msg(struct idpf_vport *vport)
+{
+ int err;
+
+ err = idpf_send_config_tx_queues_msg(vport);
+ if (err)
+ return err;
+
+ return idpf_send_config_rx_queues_msg(vport);
+}
+
+/**
+ * idpf_send_add_queues_msg - Send virtchnl add queues message
+ * @vport: Virtual port private data structure
+ * @num_tx_q: number of transmit queues
+ * @num_complq: number of transmit completion queues
+ * @num_rx_q: number of receive queues
+ * @num_rx_bufq: number of receive buffer queues
+ *
+ * Returns 0 on success, negative on failure. vport _MUST_ be const here as
+ * we should not change any fields within vport itself in this function.
+ */
+int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
+ u16 num_complq, u16 num_rx_q, u16 num_rx_bufq)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vport_config *vport_config;
+ struct virtchnl2_add_queues aq = { };
+ struct virtchnl2_add_queues *vc_msg;
+ u16 vport_idx = vport->idx;
+ int size, err;
+
+ vport_config = adapter->vport_config[vport_idx];
+
+ aq.vport_id = cpu_to_le32(vport->vport_id);
+ aq.num_tx_q = cpu_to_le16(num_tx_q);
+ aq.num_tx_complq = cpu_to_le16(num_complq);
+ aq.num_rx_q = cpu_to_le16(num_rx_q);
+ aq.num_rx_bufq = cpu_to_le16(num_rx_bufq);
+
+ mutex_lock(&((struct idpf_vport *)vport)->vc_buf_lock);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ADD_QUEUES,
+ sizeof(struct virtchnl2_add_queues), (u8 *)&aq);
+ if (err)
+ goto rel_lock;
+
+ /* We want vport to be const to prevent incidental code changes making
+ * changes to the vport config. We're making a special exception here
+ * to discard const to use the virtchnl.
+ */
+ err = idpf_wait_for_event(adapter, (struct idpf_vport *)vport,
+ IDPF_VC_ADD_QUEUES, IDPF_VC_ADD_QUEUES_ERR);
+ if (err)
+ goto rel_lock;
+
+ kfree(vport_config->req_qs_chunks);
+ vport_config->req_qs_chunks = NULL;
+
+ vc_msg = (struct virtchnl2_add_queues *)vport->vc_msg;
+ /* compare vc_msg num queues with vport num queues */
+ if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q ||
+ le16_to_cpu(vc_msg->num_rx_q) != num_rx_q ||
+ le16_to_cpu(vc_msg->num_tx_complq) != num_complq ||
+ le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq) {
+ err = -EINVAL;
+ goto rel_lock;
+ }
+
+ size = struct_size(vc_msg, chunks.chunks,
+ le16_to_cpu(vc_msg->chunks.num_chunks));
+ vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
+ if (!vport_config->req_qs_chunks) {
+ err = -ENOMEM;
+ goto rel_lock;
+ }
+
+rel_lock:
+ mutex_unlock(&((struct idpf_vport *)vport)->vc_buf_lock);
+
+ return err;
+}
+
+/**
+ * idpf_send_alloc_vectors_msg - Send virtchnl alloc vectors message
+ * @adapter: Driver specific private structure
+ * @num_vectors: number of vectors to be allocated
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors)
+{
+ struct virtchnl2_alloc_vectors *alloc_vec, *rcvd_vec;
+ struct virtchnl2_alloc_vectors ac = { };
+ u16 num_vchunks;
+ int size, err;
+
+ ac.num_vectors = cpu_to_le16(num_vectors);
+
+ mutex_lock(&adapter->vc_buf_lock);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ALLOC_VECTORS,
+ sizeof(ac), (u8 *)&ac);
+ if (err)
+ goto rel_lock;
+
+ err = idpf_wait_for_event(adapter, NULL, IDPF_VC_ALLOC_VECTORS,
+ IDPF_VC_ALLOC_VECTORS_ERR);
+ if (err)
+ goto rel_lock;
+
+ rcvd_vec = (struct virtchnl2_alloc_vectors *)adapter->vc_msg;
+ num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks);
+
+ size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks);
+ if (size > sizeof(adapter->vc_msg)) {
+ err = -EINVAL;
+ goto rel_lock;
+ }
+
+ kfree(adapter->req_vec_chunks);
+ adapter->req_vec_chunks = NULL;
+ adapter->req_vec_chunks = kmemdup(adapter->vc_msg, size, GFP_KERNEL);
+ if (!adapter->req_vec_chunks) {
+ err = -ENOMEM;
+ goto rel_lock;
+ }
+
+ alloc_vec = adapter->req_vec_chunks;
+ if (le16_to_cpu(alloc_vec->num_vectors) < num_vectors) {
+ kfree(adapter->req_vec_chunks);
+ adapter->req_vec_chunks = NULL;
+ err = -EINVAL;
+ }
+
+rel_lock:
+ mutex_unlock(&adapter->vc_buf_lock);
+
+ return err;
+}
+
+/**
+ * idpf_send_dealloc_vectors_msg - Send virtchnl de allocate vectors message
+ * @adapter: Driver specific private structure
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter)
+{
+ struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks;
+ struct virtchnl2_vector_chunks *vcs = &ac->vchunks;
+ int buf_size, err;
+
+ buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks));
+
+ mutex_lock(&adapter->vc_buf_lock);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEALLOC_VECTORS, buf_size,
+ (u8 *)vcs);
+ if (err)
+ goto rel_lock;
+
+ err = idpf_min_wait_for_event(adapter, NULL, IDPF_VC_DEALLOC_VECTORS,
+ IDPF_VC_DEALLOC_VECTORS_ERR);
+ if (err)
+ goto rel_lock;
+
+ kfree(adapter->req_vec_chunks);
+ adapter->req_vec_chunks = NULL;
+
+rel_lock:
+ mutex_unlock(&adapter->vc_buf_lock);
+
+ return err;
+}
+
+/**
+ * idpf_get_max_vfs - Get max number of vfs supported
+ * @adapter: Driver specific private structure
+ *
+ * Returns max number of VFs
+ */
+static int idpf_get_max_vfs(struct idpf_adapter *adapter)
+{
+ return le16_to_cpu(adapter->caps.max_sriov_vfs);
+}
+
+/**
+ * idpf_send_set_sriov_vfs_msg - Send virtchnl set sriov vfs message
+ * @adapter: Driver specific private structure
+ * @num_vfs: number of virtual functions to be created
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs)
+{
+ struct virtchnl2_sriov_vfs_info svi = { };
+ int err;
+
+ svi.num_vfs = cpu_to_le16(num_vfs);
+
+ mutex_lock(&adapter->vc_buf_lock);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_SRIOV_VFS,
+ sizeof(svi), (u8 *)&svi);
+ if (err)
+ goto rel_lock;
+
+ err = idpf_wait_for_event(adapter, NULL, IDPF_VC_SET_SRIOV_VFS,
+ IDPF_VC_SET_SRIOV_VFS_ERR);
+
+rel_lock:
+ mutex_unlock(&adapter->vc_buf_lock);
+
+ return err;
+}
+
+/**
+ * idpf_send_get_stats_msg - Send virtchnl get statistics message
+ * @vport: vport to get stats for
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int idpf_send_get_stats_msg(struct idpf_vport *vport)
+{
+ struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ struct rtnl_link_stats64 *netstats = &np->netstats;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_vport_stats stats_msg = { };
+ struct virtchnl2_vport_stats *stats;
+ int err;
+
+ /* Don't send get_stats message if the link is down */
+ if (np->state <= __IDPF_VPORT_DOWN)
+ return 0;
+
+ stats_msg.vport_id = cpu_to_le32(vport->vport_id);
+
+ mutex_lock(&vport->vc_buf_lock);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_STATS,
+ sizeof(struct virtchnl2_vport_stats),
+ (u8 *)&stats_msg);
+ if (err)
+ goto rel_lock;
+
+ err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_STATS,
+ IDPF_VC_GET_STATS_ERR);
+ if (err)
+ goto rel_lock;
+
+ stats = (struct virtchnl2_vport_stats *)vport->vc_msg;
+
+ spin_lock_bh(&np->stats_lock);
+
+ netstats->rx_packets = le64_to_cpu(stats->rx_unicast) +
+ le64_to_cpu(stats->rx_multicast) +
+ le64_to_cpu(stats->rx_broadcast);
+ netstats->rx_bytes = le64_to_cpu(stats->rx_bytes);
+ netstats->rx_dropped = le64_to_cpu(stats->rx_discards);
+ netstats->rx_over_errors = le64_to_cpu(stats->rx_overflow_drop);
+ netstats->rx_length_errors = le64_to_cpu(stats->rx_invalid_frame_length);
+
+ netstats->tx_packets = le64_to_cpu(stats->tx_unicast) +
+ le64_to_cpu(stats->tx_multicast) +
+ le64_to_cpu(stats->tx_broadcast);
+ netstats->tx_bytes = le64_to_cpu(stats->tx_bytes);
+ netstats->tx_errors = le64_to_cpu(stats->tx_errors);
+ netstats->tx_dropped = le64_to_cpu(stats->tx_discards);
+
+ vport->port_stats.vport_stats = *stats;
+
+ spin_unlock_bh(&np->stats_lock);
+
+rel_lock:
+ mutex_unlock(&vport->vc_buf_lock);
+
+ return err;
+}
+
+/**
+ * idpf_send_get_set_rss_lut_msg - Send virtchnl get or set rss lut message
+ * @vport: virtual port data structure
+ * @get: flag to set or get rss look up table
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_rss_lut *recv_rl;
+ struct idpf_rss_data *rss_data;
+ struct virtchnl2_rss_lut *rl;
+ int buf_size, lut_buf_size;
+ int i, err;
+
+ rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+ buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
+ rl = kzalloc(buf_size, GFP_KERNEL);
+ if (!rl)
+ return -ENOMEM;
+
+ rl->vport_id = cpu_to_le32(vport->vport_id);
+ mutex_lock(&vport->vc_buf_lock);
+
+ if (!get) {
+ rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
+ for (i = 0; i < rss_data->rss_lut_size; i++)
+ rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_LUT,
+ buf_size, (u8 *)rl);
+ if (err)
+ goto free_mem;
+
+ err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_LUT,
+ IDPF_VC_SET_RSS_LUT_ERR);
+
+ goto free_mem;
+ }
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_LUT,
+ buf_size, (u8 *)rl);
+ if (err)
+ goto free_mem;
+
+ err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_LUT,
+ IDPF_VC_GET_RSS_LUT_ERR);
+ if (err)
+ goto free_mem;
+
+ recv_rl = (struct virtchnl2_rss_lut *)vport->vc_msg;
+ if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries))
+ goto do_memcpy;
+
+ rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries);
+ kfree(rss_data->rss_lut);
+
+ lut_buf_size = rss_data->rss_lut_size * sizeof(u32);
+ rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL);
+ if (!rss_data->rss_lut) {
+ rss_data->rss_lut_size = 0;
+ err = -ENOMEM;
+ goto free_mem;
+ }
+
+do_memcpy:
+ memcpy(rss_data->rss_lut, vport->vc_msg, rss_data->rss_lut_size);
+free_mem:
+ mutex_unlock(&vport->vc_buf_lock);
+ kfree(rl);
+
+ return err;
+}
+
+/**
+ * idpf_send_get_set_rss_key_msg - Send virtchnl get or set rss key message
+ * @vport: virtual port data structure
+ * @get: flag to set or get rss look up table
+ *
+ * Returns 0 on success, negative on failure
+ */
+int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_rss_key *recv_rk;
+ struct idpf_rss_data *rss_data;
+ struct virtchnl2_rss_key *rk;
+ int i, buf_size, err;
+
+ rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+ buf_size = struct_size(rk, key_flex, rss_data->rss_key_size);
+ rk = kzalloc(buf_size, GFP_KERNEL);
+ if (!rk)
+ return -ENOMEM;
+
+ rk->vport_id = cpu_to_le32(vport->vport_id);
+ mutex_lock(&vport->vc_buf_lock);
+
+ if (get) {
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_KEY,
+ buf_size, (u8 *)rk);
+ if (err)
+ goto error;
+
+ err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_KEY,
+ IDPF_VC_GET_RSS_KEY_ERR);
+ if (err)
+ goto error;
+
+ recv_rk = (struct virtchnl2_rss_key *)vport->vc_msg;
+ if (rss_data->rss_key_size !=
+ le16_to_cpu(recv_rk->key_len)) {
+ rss_data->rss_key_size =
+ min_t(u16, NETDEV_RSS_KEY_LEN,
+ le16_to_cpu(recv_rk->key_len));
+ kfree(rss_data->rss_key);
+ rss_data->rss_key = kzalloc(rss_data->rss_key_size,
+ GFP_KERNEL);
+ if (!rss_data->rss_key) {
+ rss_data->rss_key_size = 0;
+ err = -ENOMEM;
+ goto error;
+ }
+ }
+ memcpy(rss_data->rss_key, recv_rk->key_flex,
+ rss_data->rss_key_size);
+ } else {
+ rk->key_len = cpu_to_le16(rss_data->rss_key_size);
+ for (i = 0; i < rss_data->rss_key_size; i++)
+ rk->key_flex[i] = rss_data->rss_key[i];
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_KEY,
+ buf_size, (u8 *)rk);
+ if (err)
+ goto error;
+
+ err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_KEY,
+ IDPF_VC_SET_RSS_KEY_ERR);
+ }
+
+error:
+ mutex_unlock(&vport->vc_buf_lock);
+ kfree(rk);
+
+ return err;
+}
+
+/**
+ * idpf_fill_ptype_lookup - Fill L3 specific fields in ptype lookup table
+ * @ptype: ptype lookup table
+ * @pstate: state machine for ptype lookup table
+ * @ipv4: ipv4 or ipv6
+ * @frag: fragmentation allowed
+ *
+ */
+static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype,
+ struct idpf_ptype_state *pstate,
+ bool ipv4, bool frag)
+{
+ if (!pstate->outer_ip || !pstate->outer_frag) {
+ ptype->outer_ip = IDPF_RX_PTYPE_OUTER_IP;
+ pstate->outer_ip = true;
+
+ if (ipv4)
+ ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV4;
+ else
+ ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV6;
+
+ if (frag) {
+ ptype->outer_frag = IDPF_RX_PTYPE_FRAG;
+ pstate->outer_frag = true;
+ }
+ } else {
+ ptype->tunnel_type = IDPF_RX_PTYPE_TUNNEL_IP_IP;
+ pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP;
+
+ if (ipv4)
+ ptype->tunnel_end_prot =
+ IDPF_RX_PTYPE_TUNNEL_END_IPV4;
+ else
+ ptype->tunnel_end_prot =
+ IDPF_RX_PTYPE_TUNNEL_END_IPV6;
+
+ if (frag)
+ ptype->tunnel_end_frag = IDPF_RX_PTYPE_FRAG;
+ }
+}
+
+/**
+ * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
+ * @vport: virtual port data structure
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
+{
+ struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup;
+ struct virtchnl2_get_ptype_info get_ptype_info;
+ int max_ptype, ptypes_recvd = 0, ptype_offset;
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_get_ptype_info *ptype_info;
+ u16 next_ptype_id = 0;
+ int err = 0, i, j, k;
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ max_ptype = IDPF_RX_MAX_PTYPE;
+ else
+ max_ptype = IDPF_RX_MAX_BASE_PTYPE;
+
+ memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup));
+
+ ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!ptype_info)
+ return -ENOMEM;
+
+ mutex_lock(&adapter->vc_buf_lock);
+
+ while (next_ptype_id < max_ptype) {
+ get_ptype_info.start_ptype_id = cpu_to_le16(next_ptype_id);
+
+ if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype)
+ get_ptype_info.num_ptypes =
+ cpu_to_le16(max_ptype - next_ptype_id);
+ else
+ get_ptype_info.num_ptypes =
+ cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO,
+ sizeof(struct virtchnl2_get_ptype_info),
+ (u8 *)&get_ptype_info);
+ if (err)
+ goto vc_buf_unlock;
+
+ err = idpf_wait_for_event(adapter, NULL, IDPF_VC_GET_PTYPE_INFO,
+ IDPF_VC_GET_PTYPE_INFO_ERR);
+ if (err)
+ goto vc_buf_unlock;
+
+ memcpy(ptype_info, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN);
+
+ ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes);
+ if (ptypes_recvd > max_ptype) {
+ err = -EINVAL;
+ goto vc_buf_unlock;
+ }
+
+ next_ptype_id = le16_to_cpu(get_ptype_info.start_ptype_id) +
+ le16_to_cpu(get_ptype_info.num_ptypes);
+
+ ptype_offset = IDPF_RX_PTYPE_HDR_SZ;
+
+ for (i = 0; i < le16_to_cpu(ptype_info->num_ptypes); i++) {
+ struct idpf_ptype_state pstate = { };
+ struct virtchnl2_ptype *ptype;
+ u16 id;
+
+ ptype = (struct virtchnl2_ptype *)
+ ((u8 *)ptype_info + ptype_offset);
+
+ ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
+ if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN) {
+ err = -EINVAL;
+ goto vc_buf_unlock;
+ }
+
+ /* 0xFFFF indicates end of ptypes */
+ if (le16_to_cpu(ptype->ptype_id_10) ==
+ IDPF_INVALID_PTYPE_ID) {
+ err = 0;
+ goto vc_buf_unlock;
+ }
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ k = le16_to_cpu(ptype->ptype_id_10);
+ else
+ k = ptype->ptype_id_8;
+
+ if (ptype->proto_id_count)
+ ptype_lkup[k].known = 1;
+
+ for (j = 0; j < ptype->proto_id_count; j++) {
+ id = le16_to_cpu(ptype->proto_id[j]);
+ switch (id) {
+ case VIRTCHNL2_PROTO_HDR_GRE:
+ if (pstate.tunnel_state ==
+ IDPF_PTYPE_TUNNEL_IP) {
+ ptype_lkup[k].tunnel_type =
+ IDPF_RX_PTYPE_TUNNEL_IP_GRENAT;
+ pstate.tunnel_state |=
+ IDPF_PTYPE_TUNNEL_IP_GRENAT;
+ }
+ break;
+ case VIRTCHNL2_PROTO_HDR_MAC:
+ ptype_lkup[k].outer_ip =
+ IDPF_RX_PTYPE_OUTER_L2;
+ if (pstate.tunnel_state ==
+ IDPF_TUN_IP_GRE) {
+ ptype_lkup[k].tunnel_type =
+ IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC;
+ pstate.tunnel_state |=
+ IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
+ }
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV4:
+ idpf_fill_ptype_lookup(&ptype_lkup[k],
+ &pstate, true,
+ false);
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV6:
+ idpf_fill_ptype_lookup(&ptype_lkup[k],
+ &pstate, false,
+ false);
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV4_FRAG:
+ idpf_fill_ptype_lookup(&ptype_lkup[k],
+ &pstate, true,
+ true);
+ break;
+ case VIRTCHNL2_PROTO_HDR_IPV6_FRAG:
+ idpf_fill_ptype_lookup(&ptype_lkup[k],
+ &pstate, false,
+ true);
+ break;
+ case VIRTCHNL2_PROTO_HDR_UDP:
+ ptype_lkup[k].inner_prot =
+ IDPF_RX_PTYPE_INNER_PROT_UDP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_TCP:
+ ptype_lkup[k].inner_prot =
+ IDPF_RX_PTYPE_INNER_PROT_TCP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_SCTP:
+ ptype_lkup[k].inner_prot =
+ IDPF_RX_PTYPE_INNER_PROT_SCTP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_ICMP:
+ ptype_lkup[k].inner_prot =
+ IDPF_RX_PTYPE_INNER_PROT_ICMP;
+ break;
+ case VIRTCHNL2_PROTO_HDR_PAY:
+ ptype_lkup[k].payload_layer =
+ IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2;
+ break;
+ case VIRTCHNL2_PROTO_HDR_ICMPV6:
+ case VIRTCHNL2_PROTO_HDR_IPV6_EH:
+ case VIRTCHNL2_PROTO_HDR_PRE_MAC:
+ case VIRTCHNL2_PROTO_HDR_POST_MAC:
+ case VIRTCHNL2_PROTO_HDR_ETHERTYPE:
+ case VIRTCHNL2_PROTO_HDR_SVLAN:
+ case VIRTCHNL2_PROTO_HDR_CVLAN:
+ case VIRTCHNL2_PROTO_HDR_MPLS:
+ case VIRTCHNL2_PROTO_HDR_MMPLS:
+ case VIRTCHNL2_PROTO_HDR_PTP:
+ case VIRTCHNL2_PROTO_HDR_CTRL:
+ case VIRTCHNL2_PROTO_HDR_LLDP:
+ case VIRTCHNL2_PROTO_HDR_ARP:
+ case VIRTCHNL2_PROTO_HDR_ECP:
+ case VIRTCHNL2_PROTO_HDR_EAPOL:
+ case VIRTCHNL2_PROTO_HDR_PPPOD:
+ case VIRTCHNL2_PROTO_HDR_PPPOE:
+ case VIRTCHNL2_PROTO_HDR_IGMP:
+ case VIRTCHNL2_PROTO_HDR_AH:
+ case VIRTCHNL2_PROTO_HDR_ESP:
+ case VIRTCHNL2_PROTO_HDR_IKE:
+ case VIRTCHNL2_PROTO_HDR_NATT_KEEP:
+ case VIRTCHNL2_PROTO_HDR_L2TPV2:
+ case VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL:
+ case VIRTCHNL2_PROTO_HDR_L2TPV3:
+ case VIRTCHNL2_PROTO_HDR_GTP:
+ case VIRTCHNL2_PROTO_HDR_GTP_EH:
+ case VIRTCHNL2_PROTO_HDR_GTPCV2:
+ case VIRTCHNL2_PROTO_HDR_GTPC_TEID:
+ case VIRTCHNL2_PROTO_HDR_GTPU:
+ case VIRTCHNL2_PROTO_HDR_GTPU_UL:
+ case VIRTCHNL2_PROTO_HDR_GTPU_DL:
+ case VIRTCHNL2_PROTO_HDR_ECPRI:
+ case VIRTCHNL2_PROTO_HDR_VRRP:
+ case VIRTCHNL2_PROTO_HDR_OSPF:
+ case VIRTCHNL2_PROTO_HDR_TUN:
+ case VIRTCHNL2_PROTO_HDR_NVGRE:
+ case VIRTCHNL2_PROTO_HDR_VXLAN:
+ case VIRTCHNL2_PROTO_HDR_VXLAN_GPE:
+ case VIRTCHNL2_PROTO_HDR_GENEVE:
+ case VIRTCHNL2_PROTO_HDR_NSH:
+ case VIRTCHNL2_PROTO_HDR_QUIC:
+ case VIRTCHNL2_PROTO_HDR_PFCP:
+ case VIRTCHNL2_PROTO_HDR_PFCP_NODE:
+ case VIRTCHNL2_PROTO_HDR_PFCP_SESSION:
+ case VIRTCHNL2_PROTO_HDR_RTP:
+ case VIRTCHNL2_PROTO_HDR_NO_PROTO:
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+
+vc_buf_unlock:
+ mutex_unlock(&adapter->vc_buf_lock);
+ kfree(ptype_info);
+
+ return err;
+}
+
+/**
+ * idpf_send_ena_dis_loopback_msg - Send virtchnl enable/disable loopback
+ * message
+ * @vport: virtual port data structure
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport)
+{
+ struct virtchnl2_loopback loopback;
+ int err;
+
+ loopback.vport_id = cpu_to_le32(vport->vport_id);
+ loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK);
+
+ mutex_lock(&vport->vc_buf_lock);
+
+ err = idpf_send_mb_msg(vport->adapter, VIRTCHNL2_OP_LOOPBACK,
+ sizeof(loopback), (u8 *)&loopback);
+ if (err)
+ goto rel_lock;
+
+ err = idpf_wait_for_event(vport->adapter, vport,
+ IDPF_VC_LOOPBACK_STATE,
+ IDPF_VC_LOOPBACK_STATE_ERR);
+
+rel_lock:
+ mutex_unlock(&vport->vc_buf_lock);
+
+ return err;
+}
+
+/**
+ * idpf_find_ctlq - Given a type and id, find ctlq info
+ * @hw: hardware struct
+ * @type: type of ctrlq to find
+ * @id: ctlq id to find
+ *
+ * Returns pointer to found ctlq info struct, NULL otherwise.
+ */
+static struct idpf_ctlq_info *idpf_find_ctlq(struct idpf_hw *hw,
+ enum idpf_ctlq_type type, int id)
+{
+ struct idpf_ctlq_info *cq, *tmp;
+
+ list_for_each_entry_safe(cq, tmp, &hw->cq_list_head, cq_list)
+ if (cq->q_id == id && cq->cq_type == type)
+ return cq;
+
+ return NULL;
+}
+
+/**
+ * idpf_init_dflt_mbx - Setup default mailbox parameters and make request
+ * @adapter: adapter info struct
+ *
+ * Returns 0 on success, negative otherwise
+ */
+int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
+{
+ struct idpf_ctlq_create_info ctlq_info[] = {
+ {
+ .type = IDPF_CTLQ_TYPE_MAILBOX_TX,
+ .id = IDPF_DFLT_MBX_ID,
+ .len = IDPF_DFLT_MBX_Q_LEN,
+ .buf_size = IDPF_CTLQ_MAX_BUF_LEN
+ },
+ {
+ .type = IDPF_CTLQ_TYPE_MAILBOX_RX,
+ .id = IDPF_DFLT_MBX_ID,
+ .len = IDPF_DFLT_MBX_Q_LEN,
+ .buf_size = IDPF_CTLQ_MAX_BUF_LEN
+ }
+ };
+ struct idpf_hw *hw = &adapter->hw;
+ int err;
+
+ adapter->dev_ops.reg_ops.ctlq_reg_init(ctlq_info);
+
+ err = idpf_ctlq_init(hw, IDPF_NUM_DFLT_MBX_Q, ctlq_info);
+ if (err)
+ return err;
+
+ hw->asq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_TX,
+ IDPF_DFLT_MBX_ID);
+ hw->arq = idpf_find_ctlq(hw, IDPF_CTLQ_TYPE_MAILBOX_RX,
+ IDPF_DFLT_MBX_ID);
+
+ if (!hw->asq || !hw->arq) {
+ idpf_ctlq_deinit(hw);
+
+ return -ENOENT;
+ }
+
+ adapter->state = __IDPF_STARTUP;
+
+ return 0;
+}
+
+/**
+ * idpf_deinit_dflt_mbx - Free up ctlqs setup
+ * @adapter: Driver specific private data structure
+ */
+void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter)
+{
+ if (adapter->hw.arq && adapter->hw.asq) {
+ idpf_mb_clean(adapter);
+ idpf_ctlq_deinit(&adapter->hw);
+ }
+ adapter->hw.arq = NULL;
+ adapter->hw.asq = NULL;
+}
+
+/**
+ * idpf_vport_params_buf_rel - Release memory for MailBox resources
+ * @adapter: Driver specific private data structure
+ *
+ * Will release memory to hold the vport parameters received on MailBox
+ */
+static void idpf_vport_params_buf_rel(struct idpf_adapter *adapter)
+{
+ kfree(adapter->vport_params_recvd);
+ adapter->vport_params_recvd = NULL;
+ kfree(adapter->vport_params_reqd);
+ adapter->vport_params_reqd = NULL;
+ kfree(adapter->vport_ids);
+ adapter->vport_ids = NULL;
+}
+
+/**
+ * idpf_vport_params_buf_alloc - Allocate memory for MailBox resources
+ * @adapter: Driver specific private data structure
+ *
+ * Will alloc memory to hold the vport parameters received on MailBox
+ */
+static int idpf_vport_params_buf_alloc(struct idpf_adapter *adapter)
+{
+ u16 num_max_vports = idpf_get_max_vports(adapter);
+
+ adapter->vport_params_reqd = kcalloc(num_max_vports,
+ sizeof(*adapter->vport_params_reqd),
+ GFP_KERNEL);
+ if (!adapter->vport_params_reqd)
+ return -ENOMEM;
+
+ adapter->vport_params_recvd = kcalloc(num_max_vports,
+ sizeof(*adapter->vport_params_recvd),
+ GFP_KERNEL);
+ if (!adapter->vport_params_recvd)
+ goto err_mem;
+
+ adapter->vport_ids = kcalloc(num_max_vports, sizeof(u32), GFP_KERNEL);
+ if (!adapter->vport_ids)
+ goto err_mem;
+
+ if (adapter->vport_config)
+ return 0;
+
+ adapter->vport_config = kcalloc(num_max_vports,
+ sizeof(*adapter->vport_config),
+ GFP_KERNEL);
+ if (!adapter->vport_config)
+ goto err_mem;
+
+ return 0;
+
+err_mem:
+ idpf_vport_params_buf_rel(adapter);
+
+ return -ENOMEM;
+}
+
+/**
+ * idpf_vc_core_init - Initialize state machine and get driver specific
+ * resources
+ * @adapter: Driver specific private structure
+ *
+ * This function will initialize the state machine and request all necessary
+ * resources required by the device driver. Once the state machine is
+ * initialized, allocate memory to store vport specific information and also
+ * requests required interrupts.
+ *
+ * Returns 0 on success, -EAGAIN function will get called again,
+ * otherwise negative on failure.
+ */
+int idpf_vc_core_init(struct idpf_adapter *adapter)
+{
+ int task_delay = 30;
+ u16 num_max_vports;
+ int err = 0;
+
+ while (adapter->state != __IDPF_INIT_SW) {
+ switch (adapter->state) {
+ case __IDPF_STARTUP:
+ if (idpf_send_ver_msg(adapter))
+ goto init_failed;
+ adapter->state = __IDPF_VER_CHECK;
+ goto restart;
+ case __IDPF_VER_CHECK:
+ err = idpf_recv_ver_msg(adapter);
+ if (err == -EIO) {
+ return err;
+ } else if (err == -EAGAIN) {
+ adapter->state = __IDPF_STARTUP;
+ goto restart;
+ } else if (err) {
+ goto init_failed;
+ }
+ if (idpf_send_get_caps_msg(adapter))
+ goto init_failed;
+ adapter->state = __IDPF_GET_CAPS;
+ goto restart;
+ case __IDPF_GET_CAPS:
+ if (idpf_recv_get_caps_msg(adapter))
+ goto init_failed;
+ adapter->state = __IDPF_INIT_SW;
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n",
+ adapter->state);
+ goto init_failed;
+ }
+ break;
+restart:
+ /* Give enough time before proceeding further with
+ * state machine
+ */
+ msleep(task_delay);
+ }
+
+ pci_sriov_set_totalvfs(adapter->pdev, idpf_get_max_vfs(adapter));
+ num_max_vports = idpf_get_max_vports(adapter);
+ adapter->max_vports = num_max_vports;
+ adapter->vports = kcalloc(num_max_vports, sizeof(*adapter->vports),
+ GFP_KERNEL);
+ if (!adapter->vports)
+ return -ENOMEM;
+
+ if (!adapter->netdevs) {
+ adapter->netdevs = kcalloc(num_max_vports,
+ sizeof(struct net_device *),
+ GFP_KERNEL);
+ if (!adapter->netdevs) {
+ err = -ENOMEM;
+ goto err_netdev_alloc;
+ }
+ }
+
+ err = idpf_vport_params_buf_alloc(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to alloc vport params buffer: %d\n",
+ err);
+ goto err_netdev_alloc;
+ }
+
+ /* Start the mailbox task before requesting vectors. This will ensure
+ * vector information response from mailbox is handled
+ */
+ queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
+
+ queue_delayed_work(adapter->serv_wq, &adapter->serv_task,
+ msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
+
+ err = idpf_intr_req(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "failed to enable interrupt vectors: %d\n",
+ err);
+ goto err_intr_req;
+ }
+
+ idpf_init_avail_queues(adapter);
+
+ /* Skew the delay for init tasks for each function based on fn number
+ * to prevent every function from making the same call simultaneously.
+ */
+ queue_delayed_work(adapter->init_wq, &adapter->init_task,
+ msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
+
+ goto no_err;
+
+err_intr_req:
+ cancel_delayed_work_sync(&adapter->serv_task);
+ cancel_delayed_work_sync(&adapter->mbx_task);
+ idpf_vport_params_buf_rel(adapter);
+err_netdev_alloc:
+ kfree(adapter->vports);
+ adapter->vports = NULL;
+no_err:
+ return err;
+
+init_failed:
+ /* Don't retry if we're trying to go down, just bail. */
+ if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
+ return err;
+
+ if (++adapter->mb_wait_count > IDPF_MB_MAX_ERR) {
+ dev_err(&adapter->pdev->dev, "Failed to establish mailbox communications with hardware\n");
+
+ return -EFAULT;
+ }
+ /* If it reached here, it is possible that mailbox queue initialization
+ * register writes might not have taken effect. Retry to initialize
+ * the mailbox again
+ */
+ adapter->state = __IDPF_STARTUP;
+ idpf_deinit_dflt_mbx(adapter);
+ set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
+ queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
+ msecs_to_jiffies(task_delay));
+
+ return -EAGAIN;
+}
+
+/**
+ * idpf_vc_core_deinit - Device deinit routine
+ * @adapter: Driver specific private structure
+ *
+ */
+void idpf_vc_core_deinit(struct idpf_adapter *adapter)
+{
+ int i;
+
+ idpf_deinit_task(adapter);
+ idpf_intr_rel(adapter);
+ /* Set all bits as we dont know on which vc_state the vhnl_wq is
+ * waiting on and wakeup the virtchnl workqueue even if it is waiting
+ * for the response as we are going down
+ */
+ for (i = 0; i < IDPF_VC_NBITS; i++)
+ set_bit(i, adapter->vc_state);
+ wake_up(&adapter->vchnl_wq);
+
+ cancel_delayed_work_sync(&adapter->serv_task);
+ cancel_delayed_work_sync(&adapter->mbx_task);
+
+ idpf_vport_params_buf_rel(adapter);
+
+ /* Clear all the bits */
+ for (i = 0; i < IDPF_VC_NBITS; i++)
+ clear_bit(i, adapter->vc_state);
+
+ kfree(adapter->vports);
+ adapter->vports = NULL;
+}
+
+/**
+ * idpf_vport_alloc_vec_indexes - Get relative vector indexes
+ * @vport: virtual port data struct
+ *
+ * This function requests the vector information required for the vport and
+ * stores the vector indexes received from the 'global vector distribution'
+ * in the vport's queue vectors array.
+ *
+ * Return 0 on success, error on failure
+ */
+int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport)
+{
+ struct idpf_vector_info vec_info;
+ int num_alloc_vecs;
+
+ vec_info.num_curr_vecs = vport->num_q_vectors;
+ vec_info.num_req_vecs = max(vport->num_txq, vport->num_rxq);
+ vec_info.default_vport = vport->default_vport;
+ vec_info.index = vport->idx;
+
+ num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter,
+ vport->q_vector_idxs,
+ &vec_info);
+ if (num_alloc_vecs <= 0) {
+ dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n",
+ num_alloc_vecs);
+ return -EINVAL;
+ }
+
+ vport->num_q_vectors = num_alloc_vecs;
+
+ return 0;
+}
+
+/**
+ * idpf_vport_init - Initialize virtual port
+ * @vport: virtual port to be initialized
+ * @max_q: vport max queue info
+ *
+ * Will initialize vport with the info received through MB earlier
+ */
+void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
+{
+ struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_create_vport *vport_msg;
+ struct idpf_vport_config *vport_config;
+ u16 tx_itr[] = {2, 8, 64, 128, 256};
+ u16 rx_itr[] = {2, 8, 32, 96, 128};
+ struct idpf_rss_data *rss_data;
+ u16 idx = vport->idx;
+
+ vport_config = adapter->vport_config[idx];
+ rss_data = &vport_config->user_config.rss_data;
+ vport_msg = adapter->vport_params_recvd[idx];
+
+ vport_config->max_q.max_txq = max_q->max_txq;
+ vport_config->max_q.max_rxq = max_q->max_rxq;
+ vport_config->max_q.max_complq = max_q->max_complq;
+ vport_config->max_q.max_bufq = max_q->max_bufq;
+
+ vport->txq_model = le16_to_cpu(vport_msg->txq_model);
+ vport->rxq_model = le16_to_cpu(vport_msg->rxq_model);
+ vport->vport_type = le16_to_cpu(vport_msg->vport_type);
+ vport->vport_id = le32_to_cpu(vport_msg->vport_id);
+
+ rss_data->rss_key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
+ le16_to_cpu(vport_msg->rss_key_size));
+ rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size);
+
+ ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr);
+ vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - IDPF_PACKET_HDR_PAD;
+
+ /* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */
+ memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS);
+ memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS);
+
+ idpf_vport_init_num_qs(vport, vport_msg);
+ idpf_vport_calc_num_q_desc(vport);
+ idpf_vport_calc_num_q_groups(vport);
+ idpf_vport_alloc_vec_indexes(vport);
+
+ vport->crc_enable = adapter->crc_enable;
+}
+
+/**
+ * idpf_get_vec_ids - Initialize vector id from Mailbox parameters
+ * @adapter: adapter structure to get the mailbox vector id
+ * @vecids: Array of vector ids
+ * @num_vecids: number of vector ids
+ * @chunks: vector ids received over mailbox
+ *
+ * Will initialize the mailbox vector id which is received from the
+ * get capabilities and data queue vector ids with ids received as
+ * mailbox parameters.
+ * Returns number of ids filled
+ */
+int idpf_get_vec_ids(struct idpf_adapter *adapter,
+ u16 *vecids, int num_vecids,
+ struct virtchnl2_vector_chunks *chunks)
+{
+ u16 num_chunks = le16_to_cpu(chunks->num_vchunks);
+ int num_vecid_filled = 0;
+ int i, j;
+
+ vecids[num_vecid_filled] = adapter->mb_vector.v_idx;
+ num_vecid_filled++;
+
+ for (j = 0; j < num_chunks; j++) {
+ struct virtchnl2_vector_chunk *chunk;
+ u16 start_vecid, num_vec;
+
+ chunk = &chunks->vchunks[j];
+ num_vec = le16_to_cpu(chunk->num_vectors);
+ start_vecid = le16_to_cpu(chunk->start_vector_id);
+
+ for (i = 0; i < num_vec; i++) {
+ if ((num_vecid_filled + i) < num_vecids) {
+ vecids[num_vecid_filled + i] = start_vecid;
+ start_vecid++;
+ } else {
+ break;
+ }
+ }
+ num_vecid_filled = num_vecid_filled + i;
+ }
+
+ return num_vecid_filled;
+}
+
+/**
+ * idpf_vport_get_queue_ids - Initialize queue id from Mailbox parameters
+ * @qids: Array of queue ids
+ * @num_qids: number of queue ids
+ * @q_type: queue model
+ * @chunks: queue ids received over mailbox
+ *
+ * Will initialize all queue ids with ids received as mailbox parameters
+ * Returns number of ids filled
+ */
+static int idpf_vport_get_queue_ids(u32 *qids, int num_qids, u16 q_type,
+ struct virtchnl2_queue_reg_chunks *chunks)
+{
+ u16 num_chunks = le16_to_cpu(chunks->num_chunks);
+ u32 num_q_id_filled = 0, i;
+ u32 start_q_id, num_q;
+
+ while (num_chunks--) {
+ struct virtchnl2_queue_reg_chunk *chunk;
+
+ chunk = &chunks->chunks[num_chunks];
+ if (le32_to_cpu(chunk->type) != q_type)
+ continue;
+
+ num_q = le32_to_cpu(chunk->num_queues);
+ start_q_id = le32_to_cpu(chunk->start_queue_id);
+
+ for (i = 0; i < num_q; i++) {
+ if ((num_q_id_filled + i) < num_qids) {
+ qids[num_q_id_filled + i] = start_q_id;
+ start_q_id++;
+ } else {
+ break;
+ }
+ }
+ num_q_id_filled = num_q_id_filled + i;
+ }
+
+ return num_q_id_filled;
+}
+
+/**
+ * __idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
+ * @vport: virtual port for which the queues ids are initialized
+ * @qids: queue ids
+ * @num_qids: number of queue ids
+ * @q_type: type of queue
+ *
+ * Will initialize all queue ids with ids received as mailbox
+ * parameters. Returns number of queue ids initialized.
+ */
+static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
+ const u32 *qids,
+ int num_qids,
+ u32 q_type)
+{
+ struct idpf_queue *q;
+ int i, j, k = 0;
+
+ switch (q_type) {
+ case VIRTCHNL2_QUEUE_TYPE_TX:
+ for (i = 0; i < vport->num_txq_grp; i++) {
+ struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+
+ for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) {
+ tx_qgrp->txqs[j]->q_id = qids[k];
+ tx_qgrp->txqs[j]->q_type =
+ VIRTCHNL2_QUEUE_TYPE_TX;
+ }
+ }
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX:
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u16 num_rxq;
+
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ num_rxq = rx_qgrp->splitq.num_rxq_sets;
+ else
+ num_rxq = rx_qgrp->singleq.num_rxq;
+
+ for (j = 0; j < num_rxq && k < num_qids; j++, k++) {
+ if (idpf_is_queue_model_split(vport->rxq_model))
+ q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
+ else
+ q = rx_qgrp->singleq.rxqs[j];
+ q->q_id = qids[k];
+ q->q_type = VIRTCHNL2_QUEUE_TYPE_RX;
+ }
+ }
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+ for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) {
+ struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
+
+ tx_qgrp->complq->q_id = qids[k];
+ tx_qgrp->complq->q_type =
+ VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ }
+ break;
+ case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+ for (i = 0; i < vport->num_rxq_grp; i++) {
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u8 num_bufqs = vport->num_bufqs_per_qgrp;
+
+ for (j = 0; j < num_bufqs && k < num_qids; j++, k++) {
+ q = &rx_qgrp->splitq.bufq_sets[j].bufq;
+ q->q_id = qids[k];
+ q->q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return k;
+}
+
+/**
+ * idpf_vport_queue_ids_init - Initialize queue ids from Mailbox parameters
+ * @vport: virtual port for which the queues ids are initialized
+ *
+ * Will initialize all queue ids with ids received as mailbox parameters.
+ * Returns 0 on success, negative if all the queues are not initialized.
+ */
+int idpf_vport_queue_ids_init(struct idpf_vport *vport)
+{
+ struct virtchnl2_create_vport *vport_params;
+ struct virtchnl2_queue_reg_chunks *chunks;
+ struct idpf_vport_config *vport_config;
+ u16 vport_idx = vport->idx;
+ int num_ids, err = 0;
+ u16 q_type;
+ u32 *qids;
+
+ vport_config = vport->adapter->vport_config[vport_idx];
+ if (vport_config->req_qs_chunks) {
+ struct virtchnl2_add_queues *vc_aq =
+ (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
+ chunks = &vc_aq->chunks;
+ } else {
+ vport_params = vport->adapter->vport_params_recvd[vport_idx];
+ chunks = &vport_params->chunks;
+ }
+
+ qids = kcalloc(IDPF_MAX_QIDS, sizeof(u32), GFP_KERNEL);
+ if (!qids)
+ return -ENOMEM;
+
+ num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
+ VIRTCHNL2_QUEUE_TYPE_TX,
+ chunks);
+ if (num_ids < vport->num_txq) {
+ err = -EINVAL;
+ goto mem_rel;
+ }
+ num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
+ VIRTCHNL2_QUEUE_TYPE_TX);
+ if (num_ids < vport->num_txq) {
+ err = -EINVAL;
+ goto mem_rel;
+ }
+
+ num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS,
+ VIRTCHNL2_QUEUE_TYPE_RX,
+ chunks);
+ if (num_ids < vport->num_rxq) {
+ err = -EINVAL;
+ goto mem_rel;
+ }
+ num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids,
+ VIRTCHNL2_QUEUE_TYPE_RX);
+ if (num_ids < vport->num_rxq) {
+ err = -EINVAL;
+ goto mem_rel;
+ }
+
+ if (!idpf_is_queue_model_split(vport->txq_model))
+ goto check_rxq;
+
+ q_type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+ num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
+ if (num_ids < vport->num_complq) {
+ err = -EINVAL;
+ goto mem_rel;
+ }
+ num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
+ if (num_ids < vport->num_complq) {
+ err = -EINVAL;
+ goto mem_rel;
+ }
+
+check_rxq:
+ if (!idpf_is_queue_model_split(vport->rxq_model))
+ goto mem_rel;
+
+ q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+ num_ids = idpf_vport_get_queue_ids(qids, IDPF_MAX_QIDS, q_type, chunks);
+ if (num_ids < vport->num_bufq) {
+ err = -EINVAL;
+ goto mem_rel;
+ }
+ num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type);
+ if (num_ids < vport->num_bufq)
+ err = -EINVAL;
+
+mem_rel:
+ kfree(qids);
+
+ return err;
+}
+
+/**
+ * idpf_vport_adjust_qs - Adjust to new requested queues
+ * @vport: virtual port data struct
+ *
+ * Renegotiate queues. Returns 0 on success, negative on failure.
+ */
+int idpf_vport_adjust_qs(struct idpf_vport *vport)
+{
+ struct virtchnl2_create_vport vport_msg;
+ int err;
+
+ vport_msg.txq_model = cpu_to_le16(vport->txq_model);
+ vport_msg.rxq_model = cpu_to_le16(vport->rxq_model);
+ err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg,
+ NULL);
+ if (err)
+ return err;
+
+ idpf_vport_init_num_qs(vport, &vport_msg);
+ idpf_vport_calc_num_q_groups(vport);
+
+ return 0;
+}
+
+/**
+ * idpf_is_capability_ena - Default implementation of capability checking
+ * @adapter: Private data struct
+ * @all: all or one flag
+ * @field: caps field to check for flags
+ * @flag: flag to check
+ *
+ * Return true if all capabilities are supported, false otherwise
+ */
+bool idpf_is_capability_ena(struct idpf_adapter *adapter, bool all,
+ enum idpf_cap_field field, u64 flag)
+{
+ u8 *caps = (u8 *)&adapter->caps;
+ u32 *cap_field;
+
+ if (!caps)
+ return false;
+
+ if (field == IDPF_BASE_CAPS)
+ return false;
+
+ cap_field = (u32 *)(caps + field);
+
+ if (all)
+ return (*cap_field & flag) == flag;
+ else
+ return !!(*cap_field & flag);
+}
+
+/**
+ * idpf_get_vport_id: Get vport id
+ * @vport: virtual port structure
+ *
+ * Return vport id from the adapter persistent data
+ */
+u32 idpf_get_vport_id(struct idpf_vport *vport)
+{
+ struct virtchnl2_create_vport *vport_msg;
+
+ vport_msg = vport->adapter->vport_params_recvd[vport->idx];
+
+ return le32_to_cpu(vport_msg->vport_id);
+}
+
+/**
+ * idpf_add_del_mac_filters - Add/del mac filters
+ * @vport: Virtual port data structure
+ * @np: Netdev private structure
+ * @add: Add or delete flag
+ * @async: Don't wait for return message
+ *
+ * Returns 0 on success, error on failure.
+ **/
+int idpf_add_del_mac_filters(struct idpf_vport *vport,
+ struct idpf_netdev_priv *np,
+ bool add, bool async)
+{
+ struct virtchnl2_mac_addr_list *ma_list = NULL;
+ struct idpf_adapter *adapter = np->adapter;
+ struct idpf_vport_config *vport_config;
+ enum idpf_vport_config_flags mac_flag;
+ struct pci_dev *pdev = adapter->pdev;
+ enum idpf_vport_vc_state vc, vc_err;
+ struct virtchnl2_mac_addr *mac_addr;
+ struct idpf_mac_filter *f, *tmp;
+ u32 num_msgs, total_filters = 0;
+ int i = 0, k, err = 0;
+ u32 vop;
+
+ vport_config = adapter->vport_config[np->vport_idx];
+ spin_lock_bh(&vport_config->mac_filter_list_lock);
+
+ /* Find the number of newly added filters */
+ list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
+ list) {
+ if (add && f->add)
+ total_filters++;
+ else if (!add && f->remove)
+ total_filters++;
+ }
+
+ if (!total_filters) {
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+
+ return 0;
+ }
+
+ /* Fill all the new filters into virtchannel message */
+ mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr),
+ GFP_ATOMIC);
+ if (!mac_addr) {
+ err = -ENOMEM;
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+ goto error;
+ }
+
+ list_for_each_entry_safe(f, tmp, &vport_config->user_config.mac_filter_list,
+ list) {
+ if (add && f->add) {
+ ether_addr_copy(mac_addr[i].addr, f->macaddr);
+ i++;
+ f->add = false;
+ if (i == total_filters)
+ break;
+ }
+ if (!add && f->remove) {
+ ether_addr_copy(mac_addr[i].addr, f->macaddr);
+ i++;
+ f->remove = false;
+ if (i == total_filters)
+ break;
+ }
+ }
+
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+
+ if (add) {
+ vop = VIRTCHNL2_OP_ADD_MAC_ADDR;
+ vc = IDPF_VC_ADD_MAC_ADDR;
+ vc_err = IDPF_VC_ADD_MAC_ADDR_ERR;
+ mac_flag = IDPF_VPORT_ADD_MAC_REQ;
+ } else {
+ vop = VIRTCHNL2_OP_DEL_MAC_ADDR;
+ vc = IDPF_VC_DEL_MAC_ADDR;
+ vc_err = IDPF_VC_DEL_MAC_ADDR_ERR;
+ mac_flag = IDPF_VPORT_DEL_MAC_REQ;
+ }
+
+ /* Chunk up the filters into multiple messages to avoid
+ * sending a control queue message buffer that is too large
+ */
+ num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG);
+
+ if (!async)
+ mutex_lock(&vport->vc_buf_lock);
+
+ for (i = 0, k = 0; i < num_msgs; i++) {
+ u32 entries_size, buf_size, num_entries;
+
+ num_entries = min_t(u32, total_filters,
+ IDPF_NUM_FILTERS_PER_MSG);
+ entries_size = sizeof(struct virtchnl2_mac_addr) * num_entries;
+ buf_size = struct_size(ma_list, mac_addr_list, num_entries);
+
+ if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) {
+ kfree(ma_list);
+ ma_list = kzalloc(buf_size, GFP_ATOMIC);
+ if (!ma_list) {
+ err = -ENOMEM;
+ goto list_prep_error;
+ }
+ } else {
+ memset(ma_list, 0, buf_size);
+ }
+
+ ma_list->vport_id = cpu_to_le32(np->vport_id);
+ ma_list->num_mac_addr = cpu_to_le16(num_entries);
+ memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size);
+
+ if (async)
+ set_bit(mac_flag, vport_config->flags);
+
+ err = idpf_send_mb_msg(adapter, vop, buf_size, (u8 *)ma_list);
+ if (err)
+ goto mbx_error;
+
+ if (!async) {
+ err = idpf_wait_for_event(adapter, vport, vc, vc_err);
+ if (err)
+ goto mbx_error;
+ }
+
+ k += num_entries;
+ total_filters -= num_entries;
+ }
+
+mbx_error:
+ if (!async)
+ mutex_unlock(&vport->vc_buf_lock);
+ kfree(ma_list);
+list_prep_error:
+ kfree(mac_addr);
+error:
+ if (err)
+ dev_err(&pdev->dev, "Failed to add or del mac filters %d", err);
+
+ return err;
+}
+
+/**
+ * idpf_set_promiscuous - set promiscuous and send message to mailbox
+ * @adapter: Driver specific private structure
+ * @config_data: Vport specific config data
+ * @vport_id: Vport identifier
+ *
+ * Request to enable promiscuous mode for the vport. Message is sent
+ * asynchronously and won't wait for response. Returns 0 on success, negative
+ * on failure;
+ */
+int idpf_set_promiscuous(struct idpf_adapter *adapter,
+ struct idpf_vport_user_config_data *config_data,
+ u32 vport_id)
+{
+ struct virtchnl2_promisc_info vpi;
+ u16 flags = 0;
+ int err;
+
+ if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags))
+ flags |= VIRTCHNL2_UNICAST_PROMISC;
+ if (test_bit(__IDPF_PROMISC_MC, config_data->user_flags))
+ flags |= VIRTCHNL2_MULTICAST_PROMISC;
+
+ vpi.vport_id = cpu_to_le32(vport_id);
+ vpi.flags = cpu_to_le16(flags);
+
+ err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE,
+ sizeof(struct virtchnl2_promisc_info),
+ (u8 *)&vpi);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h
new file mode 100644
index 000000000000..07e72c72d156
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h
@@ -0,0 +1,1273 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef _VIRTCHNL2_H_
+#define _VIRTCHNL2_H_
+
+/* All opcodes associated with virtchnl2 are prefixed with virtchnl2 or
+ * VIRTCHNL2. Any future opcodes, offloads/capabilities, structures,
+ * and defines must be prefixed with virtchnl2 or VIRTCHNL2 to avoid confusion.
+ *
+ * PF/VF uses the virtchnl2 interface defined in this header file to communicate
+ * with device Control Plane (CP). Driver and the CP may run on different
+ * platforms with different endianness. To avoid byte order discrepancies,
+ * all the structures in this header follow little-endian format.
+ *
+ * This is an interface definition file where existing enums and their values
+ * must remain unchanged over time, so we specify explicit values for all enums.
+ */
+
+#include "virtchnl2_lan_desc.h"
+
+/* This macro is used to generate compilation errors if a structure
+ * is not exactly the correct length.
+ */
+#define VIRTCHNL2_CHECK_STRUCT_LEN(n, X) \
+ static_assert((n) == sizeof(struct X))
+
+/* New major set of opcodes introduced and so leaving room for
+ * old misc opcodes to be added in future. Also these opcodes may only
+ * be used if both the PF and VF have successfully negotiated the
+ * VIRTCHNL version as 2.0 during VIRTCHNL2_OP_VERSION exchange.
+ */
+enum virtchnl2_op {
+ VIRTCHNL2_OP_UNKNOWN = 0,
+ VIRTCHNL2_OP_VERSION = 1,
+ VIRTCHNL2_OP_GET_CAPS = 500,
+ VIRTCHNL2_OP_CREATE_VPORT = 501,
+ VIRTCHNL2_OP_DESTROY_VPORT = 502,
+ VIRTCHNL2_OP_ENABLE_VPORT = 503,
+ VIRTCHNL2_OP_DISABLE_VPORT = 504,
+ VIRTCHNL2_OP_CONFIG_TX_QUEUES = 505,
+ VIRTCHNL2_OP_CONFIG_RX_QUEUES = 506,
+ VIRTCHNL2_OP_ENABLE_QUEUES = 507,
+ VIRTCHNL2_OP_DISABLE_QUEUES = 508,
+ VIRTCHNL2_OP_ADD_QUEUES = 509,
+ VIRTCHNL2_OP_DEL_QUEUES = 510,
+ VIRTCHNL2_OP_MAP_QUEUE_VECTOR = 511,
+ VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR = 512,
+ VIRTCHNL2_OP_GET_RSS_KEY = 513,
+ VIRTCHNL2_OP_SET_RSS_KEY = 514,
+ VIRTCHNL2_OP_GET_RSS_LUT = 515,
+ VIRTCHNL2_OP_SET_RSS_LUT = 516,
+ VIRTCHNL2_OP_GET_RSS_HASH = 517,
+ VIRTCHNL2_OP_SET_RSS_HASH = 518,
+ VIRTCHNL2_OP_SET_SRIOV_VFS = 519,
+ VIRTCHNL2_OP_ALLOC_VECTORS = 520,
+ VIRTCHNL2_OP_DEALLOC_VECTORS = 521,
+ VIRTCHNL2_OP_EVENT = 522,
+ VIRTCHNL2_OP_GET_STATS = 523,
+ VIRTCHNL2_OP_RESET_VF = 524,
+ VIRTCHNL2_OP_GET_EDT_CAPS = 525,
+ VIRTCHNL2_OP_GET_PTYPE_INFO = 526,
+ /* Opcode 527 and 528 are reserved for VIRTCHNL2_OP_GET_PTYPE_ID and
+ * VIRTCHNL2_OP_GET_PTYPE_INFO_RAW.
+ * Opcodes 529, 530, 531, 532 and 533 are reserved.
+ */
+ VIRTCHNL2_OP_LOOPBACK = 534,
+ VIRTCHNL2_OP_ADD_MAC_ADDR = 535,
+ VIRTCHNL2_OP_DEL_MAC_ADDR = 536,
+ VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE = 537,
+};
+
+/**
+ * enum virtchnl2_vport_type - Type of virtual port.
+ * @VIRTCHNL2_VPORT_TYPE_DEFAULT: Default virtual port type.
+ */
+enum virtchnl2_vport_type {
+ VIRTCHNL2_VPORT_TYPE_DEFAULT = 0,
+};
+
+/**
+ * enum virtchnl2_queue_model - Type of queue model.
+ * @VIRTCHNL2_QUEUE_MODEL_SINGLE: Single queue model.
+ * @VIRTCHNL2_QUEUE_MODEL_SPLIT: Split queue model.
+ *
+ * In the single queue model, the same transmit descriptor queue is used by
+ * software to post descriptors to hardware and by hardware to post completed
+ * descriptors to software.
+ * Likewise, the same receive descriptor queue is used by hardware to post
+ * completions to software and by software to post buffers to hardware.
+ *
+ * In the split queue model, hardware uses transmit completion queues to post
+ * descriptor/buffer completions to software, while software uses transmit
+ * descriptor queues to post descriptors to hardware.
+ * Likewise, hardware posts descriptor completions to the receive descriptor
+ * queue, while software uses receive buffer queues to post buffers to hardware.
+ */
+enum virtchnl2_queue_model {
+ VIRTCHNL2_QUEUE_MODEL_SINGLE = 0,
+ VIRTCHNL2_QUEUE_MODEL_SPLIT = 1,
+};
+
+/* Checksum offload capability flags */
+enum virtchnl2_cap_txrx_csum {
+ VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 = BIT(0),
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP = BIT(1),
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP = BIT(2),
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP = BIT(3),
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP = BIT(4),
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP = BIT(5),
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP = BIT(6),
+ VIRTCHNL2_CAP_TX_CSUM_GENERIC = BIT(7),
+ VIRTCHNL2_CAP_RX_CSUM_L3_IPV4 = BIT(8),
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP = BIT(9),
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP = BIT(10),
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP = BIT(11),
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP = BIT(12),
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP = BIT(13),
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP = BIT(14),
+ VIRTCHNL2_CAP_RX_CSUM_GENERIC = BIT(15),
+ VIRTCHNL2_CAP_TX_CSUM_L3_SINGLE_TUNNEL = BIT(16),
+ VIRTCHNL2_CAP_TX_CSUM_L3_DOUBLE_TUNNEL = BIT(17),
+ VIRTCHNL2_CAP_RX_CSUM_L3_SINGLE_TUNNEL = BIT(18),
+ VIRTCHNL2_CAP_RX_CSUM_L3_DOUBLE_TUNNEL = BIT(19),
+ VIRTCHNL2_CAP_TX_CSUM_L4_SINGLE_TUNNEL = BIT(20),
+ VIRTCHNL2_CAP_TX_CSUM_L4_DOUBLE_TUNNEL = BIT(21),
+ VIRTCHNL2_CAP_RX_CSUM_L4_SINGLE_TUNNEL = BIT(22),
+ VIRTCHNL2_CAP_RX_CSUM_L4_DOUBLE_TUNNEL = BIT(23),
+};
+
+/* Segmentation offload capability flags */
+enum virtchnl2_cap_seg {
+ VIRTCHNL2_CAP_SEG_IPV4_TCP = BIT(0),
+ VIRTCHNL2_CAP_SEG_IPV4_UDP = BIT(1),
+ VIRTCHNL2_CAP_SEG_IPV4_SCTP = BIT(2),
+ VIRTCHNL2_CAP_SEG_IPV6_TCP = BIT(3),
+ VIRTCHNL2_CAP_SEG_IPV6_UDP = BIT(4),
+ VIRTCHNL2_CAP_SEG_IPV6_SCTP = BIT(5),
+ VIRTCHNL2_CAP_SEG_GENERIC = BIT(6),
+ VIRTCHNL2_CAP_SEG_TX_SINGLE_TUNNEL = BIT(7),
+ VIRTCHNL2_CAP_SEG_TX_DOUBLE_TUNNEL = BIT(8),
+};
+
+/* Receive Side Scaling Flow type capability flags */
+enum virtchnl2_cap_rss {
+ VIRTCHNL2_CAP_RSS_IPV4_TCP = BIT(0),
+ VIRTCHNL2_CAP_RSS_IPV4_UDP = BIT(1),
+ VIRTCHNL2_CAP_RSS_IPV4_SCTP = BIT(2),
+ VIRTCHNL2_CAP_RSS_IPV4_OTHER = BIT(3),
+ VIRTCHNL2_CAP_RSS_IPV6_TCP = BIT(4),
+ VIRTCHNL2_CAP_RSS_IPV6_UDP = BIT(5),
+ VIRTCHNL2_CAP_RSS_IPV6_SCTP = BIT(6),
+ VIRTCHNL2_CAP_RSS_IPV6_OTHER = BIT(7),
+ VIRTCHNL2_CAP_RSS_IPV4_AH = BIT(8),
+ VIRTCHNL2_CAP_RSS_IPV4_ESP = BIT(9),
+ VIRTCHNL2_CAP_RSS_IPV4_AH_ESP = BIT(10),
+ VIRTCHNL2_CAP_RSS_IPV6_AH = BIT(11),
+ VIRTCHNL2_CAP_RSS_IPV6_ESP = BIT(12),
+ VIRTCHNL2_CAP_RSS_IPV6_AH_ESP = BIT(13),
+};
+
+/* Header split capability flags */
+enum virtchnl2_cap_rx_hsplit_at {
+ /* for prepended metadata */
+ VIRTCHNL2_CAP_RX_HSPLIT_AT_L2 = BIT(0),
+ /* all VLANs go into header buffer */
+ VIRTCHNL2_CAP_RX_HSPLIT_AT_L3 = BIT(1),
+ VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4 = BIT(2),
+ VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6 = BIT(3),
+};
+
+/* Receive Side Coalescing offload capability flags */
+enum virtchnl2_cap_rsc {
+ VIRTCHNL2_CAP_RSC_IPV4_TCP = BIT(0),
+ VIRTCHNL2_CAP_RSC_IPV4_SCTP = BIT(1),
+ VIRTCHNL2_CAP_RSC_IPV6_TCP = BIT(2),
+ VIRTCHNL2_CAP_RSC_IPV6_SCTP = BIT(3),
+};
+
+/* Other capability flags */
+enum virtchnl2_cap_other {
+ VIRTCHNL2_CAP_RDMA = BIT_ULL(0),
+ VIRTCHNL2_CAP_SRIOV = BIT_ULL(1),
+ VIRTCHNL2_CAP_MACFILTER = BIT_ULL(2),
+ VIRTCHNL2_CAP_FLOW_DIRECTOR = BIT_ULL(3),
+ /* Queue based scheduling using split queue model */
+ VIRTCHNL2_CAP_SPLITQ_QSCHED = BIT_ULL(4),
+ VIRTCHNL2_CAP_CRC = BIT_ULL(5),
+ VIRTCHNL2_CAP_ADQ = BIT_ULL(6),
+ VIRTCHNL2_CAP_WB_ON_ITR = BIT_ULL(7),
+ VIRTCHNL2_CAP_PROMISC = BIT_ULL(8),
+ VIRTCHNL2_CAP_LINK_SPEED = BIT_ULL(9),
+ VIRTCHNL2_CAP_INLINE_IPSEC = BIT_ULL(10),
+ VIRTCHNL2_CAP_LARGE_NUM_QUEUES = BIT_ULL(11),
+ VIRTCHNL2_CAP_VLAN = BIT_ULL(12),
+ VIRTCHNL2_CAP_PTP = BIT_ULL(13),
+ /* EDT: Earliest Departure Time capability used for Timing Wheel */
+ VIRTCHNL2_CAP_EDT = BIT_ULL(14),
+ VIRTCHNL2_CAP_ADV_RSS = BIT_ULL(15),
+ VIRTCHNL2_CAP_FDIR = BIT_ULL(16),
+ VIRTCHNL2_CAP_RX_FLEX_DESC = BIT_ULL(17),
+ VIRTCHNL2_CAP_PTYPE = BIT_ULL(18),
+ VIRTCHNL2_CAP_LOOPBACK = BIT_ULL(19),
+ /* Other capability 20 is reserved */
+
+ /* this must be the last capability */
+ VIRTCHNL2_CAP_OEM = BIT_ULL(63),
+};
+
+/* underlying device type */
+enum virtchl2_device_type {
+ VIRTCHNL2_MEV_DEVICE = 0,
+};
+
+/**
+ * enum virtchnl2_txq_sched_mode - Transmit Queue Scheduling Modes.
+ * @VIRTCHNL2_TXQ_SCHED_MODE_QUEUE: Queue mode is the legacy mode i.e. inorder
+ * completions where descriptors and buffers
+ * are completed at the same time.
+ * @VIRTCHNL2_TXQ_SCHED_MODE_FLOW: Flow scheduling mode allows for out of order
+ * packet processing where descriptors are
+ * cleaned in order, but buffers can be
+ * completed out of order.
+ */
+enum virtchnl2_txq_sched_mode {
+ VIRTCHNL2_TXQ_SCHED_MODE_QUEUE = 0,
+ VIRTCHNL2_TXQ_SCHED_MODE_FLOW = 1,
+};
+
+/**
+ * enum virtchnl2_rxq_flags - Receive Queue Feature flags.
+ * @VIRTCHNL2_RXQ_RSC: Rx queue RSC flag.
+ * @VIRTCHNL2_RXQ_HDR_SPLIT: Rx queue header split flag.
+ * @VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK: When set, packet descriptors are flushed
+ * by hardware immediately after processing
+ * each packet.
+ * @VIRTCHNL2_RX_DESC_SIZE_16BYTE: Rx queue 16 byte descriptor size.
+ * @VIRTCHNL2_RX_DESC_SIZE_32BYTE: Rx queue 32 byte descriptor size.
+ */
+enum virtchnl2_rxq_flags {
+ VIRTCHNL2_RXQ_RSC = BIT(0),
+ VIRTCHNL2_RXQ_HDR_SPLIT = BIT(1),
+ VIRTCHNL2_RXQ_IMMEDIATE_WRITE_BACK = BIT(2),
+ VIRTCHNL2_RX_DESC_SIZE_16BYTE = BIT(3),
+ VIRTCHNL2_RX_DESC_SIZE_32BYTE = BIT(4),
+};
+
+/* Type of RSS algorithm */
+enum virtchnl2_rss_alg {
+ VIRTCHNL2_RSS_ALG_TOEPLITZ_ASYMMETRIC = 0,
+ VIRTCHNL2_RSS_ALG_R_ASYMMETRIC = 1,
+ VIRTCHNL2_RSS_ALG_TOEPLITZ_SYMMETRIC = 2,
+ VIRTCHNL2_RSS_ALG_XOR_SYMMETRIC = 3,
+};
+
+/* Type of event */
+enum virtchnl2_event_codes {
+ VIRTCHNL2_EVENT_UNKNOWN = 0,
+ VIRTCHNL2_EVENT_LINK_CHANGE = 1,
+ /* Event type 2, 3 are reserved */
+};
+
+/* Transmit and Receive queue types are valid in legacy as well as split queue
+ * models. With Split Queue model, 2 additional types are introduced -
+ * TX_COMPLETION and RX_BUFFER. In split queue model, receive corresponds to
+ * the queue where hardware posts completions.
+ */
+enum virtchnl2_queue_type {
+ VIRTCHNL2_QUEUE_TYPE_TX = 0,
+ VIRTCHNL2_QUEUE_TYPE_RX = 1,
+ VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION = 2,
+ VIRTCHNL2_QUEUE_TYPE_RX_BUFFER = 3,
+ VIRTCHNL2_QUEUE_TYPE_CONFIG_TX = 4,
+ VIRTCHNL2_QUEUE_TYPE_CONFIG_RX = 5,
+ /* Queue types 6, 7, 8, 9 are reserved */
+ VIRTCHNL2_QUEUE_TYPE_MBX_TX = 10,
+ VIRTCHNL2_QUEUE_TYPE_MBX_RX = 11,
+};
+
+/* Interrupt throttling rate index */
+enum virtchnl2_itr_idx {
+ VIRTCHNL2_ITR_IDX_0 = 0,
+ VIRTCHNL2_ITR_IDX_1 = 1,
+};
+
+/**
+ * enum virtchnl2_mac_addr_type - MAC address types.
+ * @VIRTCHNL2_MAC_ADDR_PRIMARY: PF/VF driver should set this type for the
+ * primary/device unicast MAC address filter for
+ * VIRTCHNL2_OP_ADD_MAC_ADDR and
+ * VIRTCHNL2_OP_DEL_MAC_ADDR. This allows for the
+ * underlying control plane function to accurately
+ * track the MAC address and for VM/function reset.
+ *
+ * @VIRTCHNL2_MAC_ADDR_EXTRA: PF/VF driver should set this type for any extra
+ * unicast and/or multicast filters that are being
+ * added/deleted via VIRTCHNL2_OP_ADD_MAC_ADDR or
+ * VIRTCHNL2_OP_DEL_MAC_ADDR.
+ */
+enum virtchnl2_mac_addr_type {
+ VIRTCHNL2_MAC_ADDR_PRIMARY = 1,
+ VIRTCHNL2_MAC_ADDR_EXTRA = 2,
+};
+
+/* Flags used for promiscuous mode */
+enum virtchnl2_promisc_flags {
+ VIRTCHNL2_UNICAST_PROMISC = BIT(0),
+ VIRTCHNL2_MULTICAST_PROMISC = BIT(1),
+};
+
+/* Protocol header type within a packet segment. A segment consists of one or
+ * more protocol headers that make up a logical group of protocol headers. Each
+ * logical group of protocol headers encapsulates or is encapsulated using/by
+ * tunneling or encapsulation protocols for network virtualization.
+ */
+enum virtchnl2_proto_hdr_type {
+ /* VIRTCHNL2_PROTO_HDR_ANY is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_ANY = 0,
+ VIRTCHNL2_PROTO_HDR_PRE_MAC = 1,
+ /* VIRTCHNL2_PROTO_HDR_MAC is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_MAC = 2,
+ VIRTCHNL2_PROTO_HDR_POST_MAC = 3,
+ VIRTCHNL2_PROTO_HDR_ETHERTYPE = 4,
+ VIRTCHNL2_PROTO_HDR_VLAN = 5,
+ VIRTCHNL2_PROTO_HDR_SVLAN = 6,
+ VIRTCHNL2_PROTO_HDR_CVLAN = 7,
+ VIRTCHNL2_PROTO_HDR_MPLS = 8,
+ VIRTCHNL2_PROTO_HDR_UMPLS = 9,
+ VIRTCHNL2_PROTO_HDR_MMPLS = 10,
+ VIRTCHNL2_PROTO_HDR_PTP = 11,
+ VIRTCHNL2_PROTO_HDR_CTRL = 12,
+ VIRTCHNL2_PROTO_HDR_LLDP = 13,
+ VIRTCHNL2_PROTO_HDR_ARP = 14,
+ VIRTCHNL2_PROTO_HDR_ECP = 15,
+ VIRTCHNL2_PROTO_HDR_EAPOL = 16,
+ VIRTCHNL2_PROTO_HDR_PPPOD = 17,
+ VIRTCHNL2_PROTO_HDR_PPPOE = 18,
+ /* VIRTCHNL2_PROTO_HDR_IPV4 is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_IPV4 = 19,
+ /* IPv4 and IPv6 Fragment header types are only associated to
+ * VIRTCHNL2_PROTO_HDR_IPV4 and VIRTCHNL2_PROTO_HDR_IPV6 respectively,
+ * cannot be used independently.
+ */
+ /* VIRTCHNL2_PROTO_HDR_IPV4_FRAG is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_IPV4_FRAG = 20,
+ /* VIRTCHNL2_PROTO_HDR_IPV6 is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_IPV6 = 21,
+ /* VIRTCHNL2_PROTO_HDR_IPV6_FRAG is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_IPV6_FRAG = 22,
+ VIRTCHNL2_PROTO_HDR_IPV6_EH = 23,
+ /* VIRTCHNL2_PROTO_HDR_UDP is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_UDP = 24,
+ /* VIRTCHNL2_PROTO_HDR_TCP is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_TCP = 25,
+ /* VIRTCHNL2_PROTO_HDR_SCTP is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_SCTP = 26,
+ /* VIRTCHNL2_PROTO_HDR_ICMP is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_ICMP = 27,
+ /* VIRTCHNL2_PROTO_HDR_ICMPV6 is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_ICMPV6 = 28,
+ VIRTCHNL2_PROTO_HDR_IGMP = 29,
+ VIRTCHNL2_PROTO_HDR_AH = 30,
+ VIRTCHNL2_PROTO_HDR_ESP = 31,
+ VIRTCHNL2_PROTO_HDR_IKE = 32,
+ VIRTCHNL2_PROTO_HDR_NATT_KEEP = 33,
+ /* VIRTCHNL2_PROTO_HDR_PAY is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_PAY = 34,
+ VIRTCHNL2_PROTO_HDR_L2TPV2 = 35,
+ VIRTCHNL2_PROTO_HDR_L2TPV2_CONTROL = 36,
+ VIRTCHNL2_PROTO_HDR_L2TPV3 = 37,
+ VIRTCHNL2_PROTO_HDR_GTP = 38,
+ VIRTCHNL2_PROTO_HDR_GTP_EH = 39,
+ VIRTCHNL2_PROTO_HDR_GTPCV2 = 40,
+ VIRTCHNL2_PROTO_HDR_GTPC_TEID = 41,
+ VIRTCHNL2_PROTO_HDR_GTPU = 42,
+ VIRTCHNL2_PROTO_HDR_GTPU_UL = 43,
+ VIRTCHNL2_PROTO_HDR_GTPU_DL = 44,
+ VIRTCHNL2_PROTO_HDR_ECPRI = 45,
+ VIRTCHNL2_PROTO_HDR_VRRP = 46,
+ VIRTCHNL2_PROTO_HDR_OSPF = 47,
+ /* VIRTCHNL2_PROTO_HDR_TUN is a mandatory protocol id */
+ VIRTCHNL2_PROTO_HDR_TUN = 48,
+ VIRTCHNL2_PROTO_HDR_GRE = 49,
+ VIRTCHNL2_PROTO_HDR_NVGRE = 50,
+ VIRTCHNL2_PROTO_HDR_VXLAN = 51,
+ VIRTCHNL2_PROTO_HDR_VXLAN_GPE = 52,
+ VIRTCHNL2_PROTO_HDR_GENEVE = 53,
+ VIRTCHNL2_PROTO_HDR_NSH = 54,
+ VIRTCHNL2_PROTO_HDR_QUIC = 55,
+ VIRTCHNL2_PROTO_HDR_PFCP = 56,
+ VIRTCHNL2_PROTO_HDR_PFCP_NODE = 57,
+ VIRTCHNL2_PROTO_HDR_PFCP_SESSION = 58,
+ VIRTCHNL2_PROTO_HDR_RTP = 59,
+ VIRTCHNL2_PROTO_HDR_ROCE = 60,
+ VIRTCHNL2_PROTO_HDR_ROCEV1 = 61,
+ VIRTCHNL2_PROTO_HDR_ROCEV2 = 62,
+ /* Protocol ids up to 32767 are reserved.
+ * 32768 - 65534 are used for user defined protocol ids.
+ * VIRTCHNL2_PROTO_HDR_NO_PROTO is a mandatory protocol id.
+ */
+ VIRTCHNL2_PROTO_HDR_NO_PROTO = 65535,
+};
+
+enum virtchl2_version {
+ VIRTCHNL2_VERSION_MINOR_0 = 0,
+ VIRTCHNL2_VERSION_MAJOR_2 = 2,
+};
+
+/**
+ * struct virtchnl2_edt_caps - Get EDT granularity and time horizon.
+ * @tstamp_granularity_ns: Timestamp granularity in nanoseconds.
+ * @time_horizon_ns: Total time window in nanoseconds.
+ *
+ * Associated with VIRTCHNL2_OP_GET_EDT_CAPS.
+ */
+struct virtchnl2_edt_caps {
+ __le64 tstamp_granularity_ns;
+ __le64 time_horizon_ns;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_edt_caps);
+
+/**
+ * struct virtchnl2_version_info - Version information.
+ * @major: Major version.
+ * @minor: Minor version.
+ *
+ * PF/VF posts its version number to the CP. CP responds with its version number
+ * in the same format, along with a return code.
+ * If there is a major version mismatch, then the PF/VF cannot operate.
+ * If there is a minor version mismatch, then the PF/VF can operate but should
+ * add a warning to the system log.
+ *
+ * This version opcode MUST always be specified as == 1, regardless of other
+ * changes in the API. The CP must always respond to this message without
+ * error regardless of version mismatch.
+ *
+ * Associated with VIRTCHNL2_OP_VERSION.
+ */
+struct virtchnl2_version_info {
+ __le32 major;
+ __le32 minor;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_version_info);
+
+/**
+ * struct virtchnl2_get_capabilities - Capabilities info.
+ * @csum_caps: See enum virtchnl2_cap_txrx_csum.
+ * @seg_caps: See enum virtchnl2_cap_seg.
+ * @hsplit_caps: See enum virtchnl2_cap_rx_hsplit_at.
+ * @rsc_caps: See enum virtchnl2_cap_rsc.
+ * @rss_caps: See enum virtchnl2_cap_rss.
+ * @other_caps: See enum virtchnl2_cap_other.
+ * @mailbox_dyn_ctl: DYN_CTL register offset and vector id for mailbox
+ * provided by CP.
+ * @mailbox_vector_id: Mailbox vector id.
+ * @num_allocated_vectors: Maximum number of allocated vectors for the device.
+ * @max_rx_q: Maximum number of supported Rx queues.
+ * @max_tx_q: Maximum number of supported Tx queues.
+ * @max_rx_bufq: Maximum number of supported buffer queues.
+ * @max_tx_complq: Maximum number of supported completion queues.
+ * @max_sriov_vfs: The PF sends the maximum VFs it is requesting. The CP
+ * responds with the maximum VFs granted.
+ * @max_vports: Maximum number of vports that can be supported.
+ * @default_num_vports: Default number of vports driver should allocate on load.
+ * @max_tx_hdr_size: Max header length hardware can parse/checksum, in bytes.
+ * @max_sg_bufs_per_tx_pkt: Max number of scatter gather buffers that can be
+ * sent per transmit packet without needing to be
+ * linearized.
+ * @pad: Padding.
+ * @reserved: Reserved.
+ * @device_type: See enum virtchl2_device_type.
+ * @min_sso_packet_len: Min packet length supported by device for single
+ * segment offload.
+ * @max_hdr_buf_per_lso: Max number of header buffers that can be used for
+ * an LSO.
+ * @pad1: Padding for future extensions.
+ *
+ * Dataplane driver sends this message to CP to negotiate capabilities and
+ * provides a virtchnl2_get_capabilities structure with its desired
+ * capabilities, max_sriov_vfs and num_allocated_vectors.
+ * CP responds with a virtchnl2_get_capabilities structure updated
+ * with allowed capabilities and the other fields as below.
+ * If PF sets max_sriov_vfs as 0, CP will respond with max number of VFs
+ * that can be created by this PF. For any other value 'n', CP responds
+ * with max_sriov_vfs set to min(n, x) where x is the max number of VFs
+ * allowed by CP's policy. max_sriov_vfs is not applicable for VFs.
+ * If dataplane driver sets num_allocated_vectors as 0, CP will respond with 1
+ * which is default vector associated with the default mailbox. For any other
+ * value 'n', CP responds with a value <= n based on the CP's policy of
+ * max number of vectors for a PF.
+ * CP will respond with the vector ID of mailbox allocated to the PF in
+ * mailbox_vector_id and the number of itr index registers in itr_idx_map.
+ * It also responds with default number of vports that the dataplane driver
+ * should comeup with in default_num_vports and maximum number of vports that
+ * can be supported in max_vports.
+ *
+ * Associated with VIRTCHNL2_OP_GET_CAPS.
+ */
+struct virtchnl2_get_capabilities {
+ __le32 csum_caps;
+ __le32 seg_caps;
+ __le32 hsplit_caps;
+ __le32 rsc_caps;
+ __le64 rss_caps;
+ __le64 other_caps;
+ __le32 mailbox_dyn_ctl;
+ __le16 mailbox_vector_id;
+ __le16 num_allocated_vectors;
+ __le16 max_rx_q;
+ __le16 max_tx_q;
+ __le16 max_rx_bufq;
+ __le16 max_tx_complq;
+ __le16 max_sriov_vfs;
+ __le16 max_vports;
+ __le16 default_num_vports;
+ __le16 max_tx_hdr_size;
+ u8 max_sg_bufs_per_tx_pkt;
+ u8 pad[3];
+ u8 reserved[4];
+ __le32 device_type;
+ u8 min_sso_packet_len;
+ u8 max_hdr_buf_per_lso;
+ u8 pad1[10];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(80, virtchnl2_get_capabilities);
+
+/**
+ * struct virtchnl2_queue_reg_chunk - Single queue chunk.
+ * @type: See enum virtchnl2_queue_type.
+ * @start_queue_id: Start Queue ID.
+ * @num_queues: Number of queues in the chunk.
+ * @pad: Padding.
+ * @qtail_reg_start: Queue tail register offset.
+ * @qtail_reg_spacing: Queue tail register spacing.
+ * @pad1: Padding for future extensions.
+ */
+struct virtchnl2_queue_reg_chunk {
+ __le32 type;
+ __le32 start_queue_id;
+ __le32 num_queues;
+ __le32 pad;
+ __le64 qtail_reg_start;
+ __le32 qtail_reg_spacing;
+ u8 pad1[4];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_queue_reg_chunk);
+
+/**
+ * struct virtchnl2_queue_reg_chunks - Specify several chunks of contiguous
+ * queues.
+ * @num_chunks: Number of chunks.
+ * @pad: Padding.
+ * @chunks: Chunks of queue info.
+ */
+struct virtchnl2_queue_reg_chunks {
+ __le16 num_chunks;
+ u8 pad[6];
+ struct virtchnl2_queue_reg_chunk chunks[];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks);
+
+/**
+ * struct virtchnl2_create_vport - Create vport config info.
+ * @vport_type: See enum virtchnl2_vport_type.
+ * @txq_model: See virtchnl2_queue_model.
+ * @rxq_model: See virtchnl2_queue_model.
+ * @num_tx_q: Number of Tx queues.
+ * @num_tx_complq: Valid only if txq_model is split queue.
+ * @num_rx_q: Number of Rx queues.
+ * @num_rx_bufq: Valid only if rxq_model is split queue.
+ * @default_rx_q: Relative receive queue index to be used as default.
+ * @vport_index: Used to align PF and CP in case of default multiple vports,
+ * it is filled by the PF and CP returns the same value, to
+ * enable the driver to support multiple asynchronous parallel
+ * CREATE_VPORT requests and associate a response to a specific
+ * request.
+ * @max_mtu: Max MTU. CP populates this field on response.
+ * @vport_id: Vport id. CP populates this field on response.
+ * @default_mac_addr: Default MAC address.
+ * @pad: Padding.
+ * @rx_desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions.
+ * @tx_desc_ids: See VIRTCHNL2_TX_DESC_IDS definitions.
+ * @pad1: Padding.
+ * @rss_algorithm: RSS algorithm.
+ * @rss_key_size: RSS key size.
+ * @rss_lut_size: RSS LUT size.
+ * @rx_split_pos: See enum virtchnl2_cap_rx_hsplit_at.
+ * @pad2: Padding.
+ * @chunks: Chunks of contiguous queues.
+ *
+ * PF sends this message to CP to create a vport by filling in required
+ * fields of virtchnl2_create_vport structure.
+ * CP responds with the updated virtchnl2_create_vport structure containing the
+ * necessary fields followed by chunks which in turn will have an array of
+ * num_chunks entries of virtchnl2_queue_chunk structures.
+ *
+ * Associated with VIRTCHNL2_OP_CREATE_VPORT.
+ */
+struct virtchnl2_create_vport {
+ __le16 vport_type;
+ __le16 txq_model;
+ __le16 rxq_model;
+ __le16 num_tx_q;
+ __le16 num_tx_complq;
+ __le16 num_rx_q;
+ __le16 num_rx_bufq;
+ __le16 default_rx_q;
+ __le16 vport_index;
+ /* CP populates the following fields on response */
+ __le16 max_mtu;
+ __le32 vport_id;
+ u8 default_mac_addr[ETH_ALEN];
+ __le16 pad;
+ __le64 rx_desc_ids;
+ __le64 tx_desc_ids;
+ u8 pad1[72];
+ __le32 rss_algorithm;
+ __le16 rss_key_size;
+ __le16 rss_lut_size;
+ __le32 rx_split_pos;
+ u8 pad2[20];
+ struct virtchnl2_queue_reg_chunks chunks;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(160, virtchnl2_create_vport);
+
+/**
+ * struct virtchnl2_vport - Vport ID info.
+ * @vport_id: Vport id.
+ * @pad: Padding for future extensions.
+ *
+ * PF sends this message to CP to destroy, enable or disable a vport by filling
+ * in the vport_id in virtchnl2_vport structure.
+ * CP responds with the status of the requested operation.
+ *
+ * Associated with VIRTCHNL2_OP_DESTROY_VPORT, VIRTCHNL2_OP_ENABLE_VPORT,
+ * VIRTCHNL2_OP_DISABLE_VPORT.
+ */
+struct virtchnl2_vport {
+ __le32 vport_id;
+ u8 pad[4];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_vport);
+
+/**
+ * struct virtchnl2_txq_info - Transmit queue config info
+ * @dma_ring_addr: DMA address.
+ * @type: See enum virtchnl2_queue_type.
+ * @queue_id: Queue ID.
+ * @relative_queue_id: Valid only if queue model is split and type is transmit
+ * queue. Used in many to one mapping of transmit queues to
+ * completion queue.
+ * @model: See enum virtchnl2_queue_model.
+ * @sched_mode: See enum virtchnl2_txq_sched_mode.
+ * @qflags: TX queue feature flags.
+ * @ring_len: Ring length.
+ * @tx_compl_queue_id: Valid only if queue model is split and type is transmit
+ * queue.
+ * @peer_type: Valid only if queue type is VIRTCHNL2_QUEUE_TYPE_MAILBOX_TX
+ * @peer_rx_queue_id: Valid only if queue type is CONFIG_TX and used to deliver
+ * messages for the respective CONFIG_TX queue.
+ * @pad: Padding.
+ * @egress_pasid: Egress PASID info.
+ * @egress_hdr_pasid: Egress HDR passid.
+ * @egress_buf_pasid: Egress buf passid.
+ * @pad1: Padding for future extensions.
+ */
+struct virtchnl2_txq_info {
+ __le64 dma_ring_addr;
+ __le32 type;
+ __le32 queue_id;
+ __le16 relative_queue_id;
+ __le16 model;
+ __le16 sched_mode;
+ __le16 qflags;
+ __le16 ring_len;
+ __le16 tx_compl_queue_id;
+ __le16 peer_type;
+ __le16 peer_rx_queue_id;
+ u8 pad[4];
+ __le32 egress_pasid;
+ __le32 egress_hdr_pasid;
+ __le32 egress_buf_pasid;
+ u8 pad1[8];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(56, virtchnl2_txq_info);
+
+/**
+ * struct virtchnl2_config_tx_queues - TX queue config.
+ * @vport_id: Vport id.
+ * @num_qinfo: Number of virtchnl2_txq_info structs.
+ * @pad: Padding.
+ * @qinfo: Tx queues config info.
+ *
+ * PF sends this message to set up parameters for one or more transmit queues.
+ * This message contains an array of num_qinfo instances of virtchnl2_txq_info
+ * structures. CP configures requested queues and returns a status code. If
+ * num_qinfo specified is greater than the number of queues associated with the
+ * vport, an error is returned and no queues are configured.
+ *
+ * Associated with VIRTCHNL2_OP_CONFIG_TX_QUEUES.
+ */
+struct virtchnl2_config_tx_queues {
+ __le32 vport_id;
+ __le16 num_qinfo;
+ u8 pad[10];
+ struct virtchnl2_txq_info qinfo[];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_config_tx_queues);
+
+/**
+ * struct virtchnl2_rxq_info - Receive queue config info.
+ * @desc_ids: See VIRTCHNL2_RX_DESC_IDS definitions.
+ * @dma_ring_addr: See VIRTCHNL2_RX_DESC_IDS definitions.
+ * @type: See enum virtchnl2_queue_type.
+ * @queue_id: Queue id.
+ * @model: See enum virtchnl2_queue_model.
+ * @hdr_buffer_size: Header buffer size.
+ * @data_buffer_size: Data buffer size.
+ * @max_pkt_size: Max packet size.
+ * @ring_len: Ring length.
+ * @buffer_notif_stride: Buffer notification stride in units of 32-descriptors.
+ * This field must be a power of 2.
+ * @pad: Padding.
+ * @dma_head_wb_addr: Applicable only for receive buffer queues.
+ * @qflags: Applicable only for receive completion queues.
+ * See enum virtchnl2_rxq_flags.
+ * @rx_buffer_low_watermark: Rx buffer low watermark.
+ * @rx_bufq1_id: Buffer queue index of the first buffer queue associated with
+ * the Rx queue. Valid only in split queue model.
+ * @rx_bufq2_id: Buffer queue index of the second buffer queue associated with
+ * the Rx queue. Valid only in split queue model.
+ * @bufq2_ena: It indicates if there is a second buffer, rx_bufq2_id is valid
+ * only if this field is set.
+ * @pad1: Padding.
+ * @ingress_pasid: Ingress PASID.
+ * @ingress_hdr_pasid: Ingress PASID header.
+ * @ingress_buf_pasid: Ingress PASID buffer.
+ * @pad2: Padding for future extensions.
+ */
+struct virtchnl2_rxq_info {
+ __le64 desc_ids;
+ __le64 dma_ring_addr;
+ __le32 type;
+ __le32 queue_id;
+ __le16 model;
+ __le16 hdr_buffer_size;
+ __le32 data_buffer_size;
+ __le32 max_pkt_size;
+ __le16 ring_len;
+ u8 buffer_notif_stride;
+ u8 pad;
+ __le64 dma_head_wb_addr;
+ __le16 qflags;
+ __le16 rx_buffer_low_watermark;
+ __le16 rx_bufq1_id;
+ __le16 rx_bufq2_id;
+ u8 bufq2_ena;
+ u8 pad1[3];
+ __le32 ingress_pasid;
+ __le32 ingress_hdr_pasid;
+ __le32 ingress_buf_pasid;
+ u8 pad2[16];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(88, virtchnl2_rxq_info);
+
+/**
+ * struct virtchnl2_config_rx_queues - Rx queues config.
+ * @vport_id: Vport id.
+ * @num_qinfo: Number of instances.
+ * @pad: Padding.
+ * @qinfo: Rx queues config info.
+ *
+ * PF sends this message to set up parameters for one or more receive queues.
+ * This message contains an array of num_qinfo instances of virtchnl2_rxq_info
+ * structures. CP configures requested queues and returns a status code.
+ * If the number of queues specified is greater than the number of queues
+ * associated with the vport, an error is returned and no queues are configured.
+ *
+ * Associated with VIRTCHNL2_OP_CONFIG_RX_QUEUES.
+ */
+struct virtchnl2_config_rx_queues {
+ __le32 vport_id;
+ __le16 num_qinfo;
+ u8 pad[18];
+ struct virtchnl2_rxq_info qinfo[];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_config_rx_queues);
+
+/**
+ * struct virtchnl2_add_queues - data for VIRTCHNL2_OP_ADD_QUEUES.
+ * @vport_id: Vport id.
+ * @num_tx_q: Number of Tx qieues.
+ * @num_tx_complq: Number of Tx completion queues.
+ * @num_rx_q: Number of Rx queues.
+ * @num_rx_bufq: Number of Rx buffer queues.
+ * @pad: Padding.
+ * @chunks: Chunks of contiguous queues.
+ *
+ * PF sends this message to request additional transmit/receive queues beyond
+ * the ones that were assigned via CREATE_VPORT request. virtchnl2_add_queues
+ * structure is used to specify the number of each type of queues.
+ * CP responds with the same structure with the actual number of queues assigned
+ * followed by num_chunks of virtchnl2_queue_chunk structures.
+ *
+ * Associated with VIRTCHNL2_OP_ADD_QUEUES.
+ */
+struct virtchnl2_add_queues {
+ __le32 vport_id;
+ __le16 num_tx_q;
+ __le16 num_tx_complq;
+ __le16 num_rx_q;
+ __le16 num_rx_bufq;
+ u8 pad[4];
+ struct virtchnl2_queue_reg_chunks chunks;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_add_queues);
+
+/**
+ * struct virtchnl2_vector_chunk - Structure to specify a chunk of contiguous
+ * interrupt vectors.
+ * @start_vector_id: Start vector id.
+ * @start_evv_id: Start EVV id.
+ * @num_vectors: Number of vectors.
+ * @pad: Padding.
+ * @dynctl_reg_start: DYN_CTL register offset.
+ * @dynctl_reg_spacing: register spacing between DYN_CTL registers of 2
+ * consecutive vectors.
+ * @itrn_reg_start: ITRN register offset.
+ * @itrn_reg_spacing: Register spacing between dynctl registers of 2
+ * consecutive vectors.
+ * @itrn_index_spacing: Register spacing between itrn registers of the same
+ * vector where n=0..2.
+ * @pad1: Padding for future extensions.
+ *
+ * Register offsets and spacing provided by CP.
+ * Dynamic control registers are used for enabling/disabling/re-enabling
+ * interrupts and updating interrupt rates in the hotpath. Any changes
+ * to interrupt rates in the dynamic control registers will be reflected
+ * in the interrupt throttling rate registers.
+ * itrn registers are used to update interrupt rates for specific
+ * interrupt indices without modifying the state of the interrupt.
+ */
+struct virtchnl2_vector_chunk {
+ __le16 start_vector_id;
+ __le16 start_evv_id;
+ __le16 num_vectors;
+ __le16 pad;
+ __le32 dynctl_reg_start;
+ __le32 dynctl_reg_spacing;
+ __le32 itrn_reg_start;
+ __le32 itrn_reg_spacing;
+ __le32 itrn_index_spacing;
+ u8 pad1[4];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_vector_chunk);
+
+/**
+ * struct virtchnl2_vector_chunks - chunks of contiguous interrupt vectors.
+ * @num_vchunks: number of vector chunks.
+ * @pad: Padding.
+ * @vchunks: Chunks of contiguous vector info.
+ *
+ * PF sends virtchnl2_vector_chunks struct to specify the vectors it is giving
+ * away. CP performs requested action and returns status.
+ *
+ * Associated with VIRTCHNL2_OP_DEALLOC_VECTORS.
+ */
+struct virtchnl2_vector_chunks {
+ __le16 num_vchunks;
+ u8 pad[14];
+ struct virtchnl2_vector_chunk vchunks[];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_vector_chunks);
+
+/**
+ * struct virtchnl2_alloc_vectors - vector allocation info.
+ * @num_vectors: Number of vectors.
+ * @pad: Padding.
+ * @vchunks: Chunks of contiguous vector info.
+ *
+ * PF sends this message to request additional interrupt vectors beyond the
+ * ones that were assigned via GET_CAPS request. virtchnl2_alloc_vectors
+ * structure is used to specify the number of vectors requested. CP responds
+ * with the same structure with the actual number of vectors assigned followed
+ * by virtchnl2_vector_chunks structure identifying the vector ids.
+ *
+ * Associated with VIRTCHNL2_OP_ALLOC_VECTORS.
+ */
+struct virtchnl2_alloc_vectors {
+ __le16 num_vectors;
+ u8 pad[14];
+ struct virtchnl2_vector_chunks vchunks;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_alloc_vectors);
+
+/**
+ * struct virtchnl2_rss_lut - RSS LUT info.
+ * @vport_id: Vport id.
+ * @lut_entries_start: Start of LUT entries.
+ * @lut_entries: Number of LUT entrties.
+ * @pad: Padding.
+ * @lut: RSS lookup table.
+ *
+ * PF sends this message to get or set RSS lookup table. Only supported if
+ * both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration
+ * negotiation.
+ *
+ * Associated with VIRTCHNL2_OP_GET_RSS_LUT and VIRTCHNL2_OP_SET_RSS_LUT.
+ */
+struct virtchnl2_rss_lut {
+ __le32 vport_id;
+ __le16 lut_entries_start;
+ __le16 lut_entries;
+ u8 pad[4];
+ __le32 lut[];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(12, virtchnl2_rss_lut);
+
+/**
+ * struct virtchnl2_rss_hash - RSS hash info.
+ * @ptype_groups: Packet type groups bitmap.
+ * @vport_id: Vport id.
+ * @pad: Padding for future extensions.
+ *
+ * PF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the CP sets these to all possible traffic types that the
+ * hardware supports. The PF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ * Only supported if both PF and CP drivers set the VIRTCHNL2_CAP_RSS bit
+ * during configuration negotiation.
+ *
+ * Associated with VIRTCHNL2_OP_GET_RSS_HASH and VIRTCHNL2_OP_SET_RSS_HASH
+ */
+struct virtchnl2_rss_hash {
+ __le64 ptype_groups;
+ __le32 vport_id;
+ u8 pad[4];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_rss_hash);
+
+/**
+ * struct virtchnl2_sriov_vfs_info - VFs info.
+ * @num_vfs: Number of VFs.
+ * @pad: Padding for future extensions.
+ *
+ * This message is used to set number of SRIOV VFs to be created. The actual
+ * allocation of resources for the VFs in terms of vport, queues and interrupts
+ * is done by CP. When this call completes, the IDPF driver calls
+ * pci_enable_sriov to let the OS instantiate the SRIOV PCIE devices.
+ * The number of VFs set to 0 will destroy all the VFs of this function.
+ *
+ * Associated with VIRTCHNL2_OP_SET_SRIOV_VFS.
+ */
+struct virtchnl2_sriov_vfs_info {
+ __le16 num_vfs;
+ __le16 pad;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(4, virtchnl2_sriov_vfs_info);
+
+/**
+ * struct virtchnl2_ptype - Packet type info.
+ * @ptype_id_10: 10-bit packet type.
+ * @ptype_id_8: 8-bit packet type.
+ * @proto_id_count: Number of protocol ids the packet supports, maximum of 32
+ * protocol ids are supported.
+ * @pad: Padding.
+ * @proto_id: proto_id_count decides the allocation of protocol id array.
+ * See enum virtchnl2_proto_hdr_type.
+ *
+ * Based on the descriptor type the PF supports, CP fills ptype_id_10 or
+ * ptype_id_8 for flex and base descriptor respectively. If ptype_id_10 value
+ * is set to 0xFFFF, PF should consider this ptype as dummy one and it is the
+ * last ptype.
+ */
+struct virtchnl2_ptype {
+ __le16 ptype_id_10;
+ u8 ptype_id_8;
+ u8 proto_id_count;
+ __le16 pad;
+ __le16 proto_id[];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(6, virtchnl2_ptype);
+
+/**
+ * struct virtchnl2_get_ptype_info - Packet type info.
+ * @start_ptype_id: Starting ptype ID.
+ * @num_ptypes: Number of packet types from start_ptype_id.
+ * @pad: Padding for future extensions.
+ *
+ * The total number of supported packet types is based on the descriptor type.
+ * For the flex descriptor, it is 1024 (10-bit ptype), and for the base
+ * descriptor, it is 256 (8-bit ptype). Send this message to the CP by
+ * populating the 'start_ptype_id' and the 'num_ptypes'. CP responds with the
+ * 'start_ptype_id', 'num_ptypes', and the array of ptype (virtchnl2_ptype) that
+ * are added at the end of the 'virtchnl2_get_ptype_info' message (Note: There
+ * is no specific field for the ptypes but are added at the end of the
+ * ptype info message. PF/VF is expected to extract the ptypes accordingly.
+ * Reason for doing this is because compiler doesn't allow nested flexible
+ * array fields).
+ *
+ * If all the ptypes don't fit into one mailbox buffer, CP splits the
+ * ptype info into multiple messages, where each message will have its own
+ * 'start_ptype_id', 'num_ptypes', and the ptype array itself. When CP is done
+ * updating all the ptype information extracted from the package (the number of
+ * ptypes extracted might be less than what PF/VF expects), it will append a
+ * dummy ptype (which has 'ptype_id_10' of 'struct virtchnl2_ptype' as 0xFFFF)
+ * to the ptype array.
+ *
+ * PF/VF is expected to receive multiple VIRTCHNL2_OP_GET_PTYPE_INFO messages.
+ *
+ * Associated with VIRTCHNL2_OP_GET_PTYPE_INFO.
+ */
+struct virtchnl2_get_ptype_info {
+ __le16 start_ptype_id;
+ __le16 num_ptypes;
+ __le32 pad;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_get_ptype_info);
+
+/**
+ * struct virtchnl2_vport_stats - Vport statistics.
+ * @vport_id: Vport id.
+ * @pad: Padding.
+ * @rx_bytes: Received bytes.
+ * @rx_unicast: Received unicast packets.
+ * @rx_multicast: Received multicast packets.
+ * @rx_broadcast: Received broadcast packets.
+ * @rx_discards: Discarded packets on receive.
+ * @rx_errors: Receive errors.
+ * @rx_unknown_protocol: Unlnown protocol.
+ * @tx_bytes: Transmitted bytes.
+ * @tx_unicast: Transmitted unicast packets.
+ * @tx_multicast: Transmitted multicast packets.
+ * @tx_broadcast: Transmitted broadcast packets.
+ * @tx_discards: Discarded packets on transmit.
+ * @tx_errors: Transmit errors.
+ * @rx_invalid_frame_length: Packets with invalid frame length.
+ * @rx_overflow_drop: Packets dropped on buffer overflow.
+ *
+ * PF/VF sends this message to CP to get the update stats by specifying the
+ * vport_id. CP responds with stats in struct virtchnl2_vport_stats.
+ *
+ * Associated with VIRTCHNL2_OP_GET_STATS.
+ */
+struct virtchnl2_vport_stats {
+ __le32 vport_id;
+ u8 pad[4];
+ __le64 rx_bytes;
+ __le64 rx_unicast;
+ __le64 rx_multicast;
+ __le64 rx_broadcast;
+ __le64 rx_discards;
+ __le64 rx_errors;
+ __le64 rx_unknown_protocol;
+ __le64 tx_bytes;
+ __le64 tx_unicast;
+ __le64 tx_multicast;
+ __le64 tx_broadcast;
+ __le64 tx_discards;
+ __le64 tx_errors;
+ __le64 rx_invalid_frame_length;
+ __le64 rx_overflow_drop;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(128, virtchnl2_vport_stats);
+
+/**
+ * struct virtchnl2_event - Event info.
+ * @event: Event opcode. See enum virtchnl2_event_codes.
+ * @link_speed: Link_speed provided in Mbps.
+ * @vport_id: Vport ID.
+ * @link_status: Link status.
+ * @pad: Padding.
+ * @reserved: Reserved.
+ *
+ * CP sends this message to inform the PF/VF driver of events that may affect
+ * it. No direct response is expected from the driver, though it may generate
+ * other messages in response to this one.
+ *
+ * Associated with VIRTCHNL2_OP_EVENT.
+ */
+struct virtchnl2_event {
+ __le32 event;
+ __le32 link_speed;
+ __le32 vport_id;
+ u8 link_status;
+ u8 pad;
+ __le16 reserved;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_event);
+
+/**
+ * struct virtchnl2_rss_key - RSS key info.
+ * @vport_id: Vport id.
+ * @key_len: Length of RSS key.
+ * @pad: Padding.
+ * @key_flex: RSS hash key, packed bytes.
+ * PF/VF sends this message to get or set RSS key. Only supported if both
+ * PF/VF and CP drivers set the VIRTCHNL2_CAP_RSS bit during configuration
+ * negotiation.
+ *
+ * Associated with VIRTCHNL2_OP_GET_RSS_KEY and VIRTCHNL2_OP_SET_RSS_KEY.
+ */
+struct virtchnl2_rss_key {
+ __le32 vport_id;
+ __le16 key_len;
+ u8 pad;
+ __DECLARE_FLEX_ARRAY(u8, key_flex);
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_rss_key);
+
+/**
+ * struct virtchnl2_queue_chunk - chunk of contiguous queues
+ * @type: See enum virtchnl2_queue_type.
+ * @start_queue_id: Starting queue id.
+ * @num_queues: Number of queues.
+ * @pad: Padding for future extensions.
+ */
+struct virtchnl2_queue_chunk {
+ __le32 type;
+ __le32 start_queue_id;
+ __le32 num_queues;
+ u8 pad[4];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_chunk);
+
+/* struct virtchnl2_queue_chunks - chunks of contiguous queues
+ * @num_chunks: Number of chunks.
+ * @pad: Padding.
+ * @chunks: Chunks of contiguous queues info.
+ */
+struct virtchnl2_queue_chunks {
+ __le16 num_chunks;
+ u8 pad[6];
+ struct virtchnl2_queue_chunk chunks[];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_chunks);
+
+/**
+ * struct virtchnl2_del_ena_dis_queues - Enable/disable queues info.
+ * @vport_id: Vport id.
+ * @pad: Padding.
+ * @chunks: Chunks of contiguous queues info.
+ *
+ * PF sends these messages to enable, disable or delete queues specified in
+ * chunks. PF sends virtchnl2_del_ena_dis_queues struct to specify the queues
+ * to be enabled/disabled/deleted. Also applicable to single queue receive or
+ * transmit. CP performs requested action and returns status.
+ *
+ * Associated with VIRTCHNL2_OP_ENABLE_QUEUES, VIRTCHNL2_OP_DISABLE_QUEUES and
+ * VIRTCHNL2_OP_DISABLE_QUEUES.
+ */
+struct virtchnl2_del_ena_dis_queues {
+ __le32 vport_id;
+ u8 pad[4];
+ struct virtchnl2_queue_chunks chunks;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_del_ena_dis_queues);
+
+/**
+ * struct virtchnl2_queue_vector - Queue to vector mapping.
+ * @queue_id: Queue id.
+ * @vector_id: Vector id.
+ * @pad: Padding.
+ * @itr_idx: See enum virtchnl2_itr_idx.
+ * @queue_type: See enum virtchnl2_queue_type.
+ * @pad1: Padding for future extensions.
+ */
+struct virtchnl2_queue_vector {
+ __le32 queue_id;
+ __le16 vector_id;
+ u8 pad[2];
+ __le32 itr_idx;
+ __le32 queue_type;
+ u8 pad1[8];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_queue_vector);
+
+/**
+ * struct virtchnl2_queue_vector_maps - Map/unmap queues info.
+ * @vport_id: Vport id.
+ * @num_qv_maps: Number of queue vector maps.
+ * @pad: Padding.
+ * @qv_maps: Queue to vector maps.
+ *
+ * PF sends this message to map or unmap queues to vectors and interrupt
+ * throttling rate index registers. External data buffer contains
+ * virtchnl2_queue_vector_maps structure that contains num_qv_maps of
+ * virtchnl2_queue_vector structures. CP maps the requested queue vector maps
+ * after validating the queue and vector ids and returns a status code.
+ *
+ * Associated with VIRTCHNL2_OP_MAP_QUEUE_VECTOR and
+ * VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR.
+ */
+struct virtchnl2_queue_vector_maps {
+ __le32 vport_id;
+ __le16 num_qv_maps;
+ u8 pad[10];
+ struct virtchnl2_queue_vector qv_maps[];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_vector_maps);
+
+/**
+ * struct virtchnl2_loopback - Loopback info.
+ * @vport_id: Vport id.
+ * @enable: Enable/disable.
+ * @pad: Padding for future extensions.
+ *
+ * PF/VF sends this message to transition to/from the loopback state. Setting
+ * the 'enable' to 1 enables the loopback state and setting 'enable' to 0
+ * disables it. CP configures the state to loopback and returns status.
+ *
+ * Associated with VIRTCHNL2_OP_LOOPBACK.
+ */
+struct virtchnl2_loopback {
+ __le32 vport_id;
+ u8 enable;
+ u8 pad[3];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_loopback);
+
+/* struct virtchnl2_mac_addr - MAC address info.
+ * @addr: MAC address.
+ * @type: MAC type. See enum virtchnl2_mac_addr_type.
+ * @pad: Padding for future extensions.
+ */
+struct virtchnl2_mac_addr {
+ u8 addr[ETH_ALEN];
+ u8 type;
+ u8 pad;
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr);
+
+/**
+ * struct virtchnl2_mac_addr_list - List of MAC addresses.
+ * @vport_id: Vport id.
+ * @num_mac_addr: Number of MAC addresses.
+ * @pad: Padding.
+ * @mac_addr_list: List with MAC address info.
+ *
+ * PF/VF driver uses this structure to send list of MAC addresses to be
+ * added/deleted to the CP where as CP performs the action and returns the
+ * status.
+ *
+ * Associated with VIRTCHNL2_OP_ADD_MAC_ADDR and VIRTCHNL2_OP_DEL_MAC_ADDR.
+ */
+struct virtchnl2_mac_addr_list {
+ __le32 vport_id;
+ __le16 num_mac_addr;
+ u8 pad[2];
+ struct virtchnl2_mac_addr mac_addr_list[];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr_list);
+
+/**
+ * struct virtchnl2_promisc_info - Promisc type info.
+ * @vport_id: Vport id.
+ * @flags: See enum virtchnl2_promisc_flags.
+ * @pad: Padding for future extensions.
+ *
+ * PF/VF sends vport id and flags to the CP where as CP performs the action
+ * and returns the status.
+ *
+ * Associated with VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE.
+ */
+struct virtchnl2_promisc_info {
+ __le32 vport_id;
+ /* See VIRTCHNL2_PROMISC_FLAGS definitions */
+ __le16 flags;
+ u8 pad[2];
+};
+VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_promisc_info);
+
+#endif /* _VIRTCHNL_2_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2_lan_desc.h b/drivers/net/ethernet/intel/idpf/virtchnl2_lan_desc.h
new file mode 100644
index 000000000000..f1b577f1c452
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/virtchnl2_lan_desc.h
@@ -0,0 +1,451 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2023 Intel Corporation */
+
+#ifndef _VIRTCHNL2_LAN_DESC_H_
+#define _VIRTCHNL2_LAN_DESC_H_
+
+#include <linux/bits.h>
+
+/* This is an interface definition file where existing enums and their values
+ * must remain unchanged over time, so we specify explicit values for all enums.
+ */
+
+/* Transmit descriptor ID flags
+ */
+enum virtchnl2_tx_desc_ids {
+ VIRTCHNL2_TXDID_DATA = BIT(0),
+ VIRTCHNL2_TXDID_CTX = BIT(1),
+ /* TXDID bit 2 is reserved
+ * TXDID bit 3 is free for future use
+ * TXDID bit 4 is reserved
+ */
+ VIRTCHNL2_TXDID_FLEX_TSO_CTX = BIT(5),
+ /* TXDID bit 6 is reserved */
+ VIRTCHNL2_TXDID_FLEX_L2TAG1_L2TAG2 = BIT(7),
+ /* TXDID bits 8 and 9 are free for future use
+ * TXDID bit 10 is reserved
+ * TXDID bit 11 is free for future use
+ */
+ VIRTCHNL2_TXDID_FLEX_FLOW_SCHED = BIT(12),
+ /* TXDID bits 13 and 14 are free for future use */
+ VIRTCHNL2_TXDID_DESC_DONE = BIT(15),
+};
+
+/* Receive descriptor IDs */
+enum virtchnl2_rx_desc_ids {
+ VIRTCHNL2_RXDID_1_32B_BASE = 1,
+ /* FLEX_SQ_NIC and FLEX_SPLITQ share desc ids because they can be
+ * differentiated based on queue model; e.g. single queue model can
+ * only use FLEX_SQ_NIC and split queue model can only use FLEX_SPLITQ
+ * for DID 2.
+ */
+ VIRTCHNL2_RXDID_2_FLEX_SPLITQ = 2,
+ VIRTCHNL2_RXDID_2_FLEX_SQ_NIC = VIRTCHNL2_RXDID_2_FLEX_SPLITQ,
+ /* 3 through 6 are reserved */
+ VIRTCHNL2_RXDID_7_HW_RSVD = 7,
+ /* 8 through 15 are free */
+};
+
+/* Receive descriptor ID bitmasks */
+#define VIRTCHNL2_RXDID_M(bit) BIT_ULL(VIRTCHNL2_RXDID_##bit)
+
+enum virtchnl2_rx_desc_id_bitmasks {
+ VIRTCHNL2_RXDID_1_32B_BASE_M = VIRTCHNL2_RXDID_M(1_32B_BASE),
+ VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M = VIRTCHNL2_RXDID_M(2_FLEX_SPLITQ),
+ VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M = VIRTCHNL2_RXDID_M(2_FLEX_SQ_NIC),
+ VIRTCHNL2_RXDID_7_HW_RSVD_M = VIRTCHNL2_RXDID_M(7_HW_RSVD),
+};
+
+/* For splitq virtchnl2_rx_flex_desc_adv_nic_3 desc members */
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M GENMASK(3, 0)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_UMBCAST_M GENMASK(7, 6)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M GENMASK(9, 0)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_S 12
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M \
+ BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF0_M GENMASK(15, 13)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M GENMASK(13, 0)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S 14
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M \
+ BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S 15
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M \
+ BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M GENMASK(9, 0)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_S 10
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M \
+ BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_S 11
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_M \
+ BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_SPH_S)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_S 12
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_FF1_M GENMASK(14, 12)
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S 15
+#define VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_M \
+ BIT_ULL(VIRTCHNL2_RX_FLEX_DESC_ADV_MISS_S)
+
+/* Bitmasks for splitq virtchnl2_rx_flex_desc_adv_nic_3 */
+enum virtchl2_rx_flex_desc_adv_status_error_0_qw1_bits {
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_DD_M = BIT(0),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M = BIT(1),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M = BIT(2),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M = BIT(3),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M = BIT(4),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M = BIT(5),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M = BIT(6),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_M = BIT(7),
+};
+
+/* Bitmasks for splitq virtchnl2_rx_flex_desc_adv_nic_3 */
+enum virtchnl2_rx_flex_desc_adv_status_error_0_qw0_bits {
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_LPBK_M = BIT(0),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M = BIT(1),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RXE_M = BIT(2),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_CRCP_M = BIT(3),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_M = BIT(4),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L2TAG1P_M = BIT(5),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XTRMD0_VALID_M = BIT(6),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XTRMD1_VALID_M = BIT(7),
+};
+
+/* Bitmasks for splitq virtchnl2_rx_flex_desc_adv_nic_3 */
+enum virtchnl2_rx_flex_desc_adv_status_error_1_bits {
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_RSVD_M = GENMASK(1, 0),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_ATRAEFAIL_M = BIT(2),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_L2TAG2P_M = BIT(3),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD2_VALID_M = BIT(4),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD3_VALID_M = BIT(5),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD4_VALID_M = BIT(6),
+ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS1_XTRMD5_VALID_M = BIT(7),
+};
+
+/* For singleq (flex) virtchnl2_rx_flex_desc fields
+ * For virtchnl2_rx_flex_desc.ptype_flex_flags0 member
+ */
+#define VIRTCHNL2_RX_FLEX_DESC_PTYPE_M GENMASK(9, 0)
+
+/* For virtchnl2_rx_flex_desc.pkt_len member */
+#define VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M GENMASK(13, 0)
+
+/* Bitmasks for singleq (flex) virtchnl2_rx_flex_desc */
+enum virtchnl2_rx_flex_desc_status_error_0_bits {
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_DD_M = BIT(0),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_EOF_M = BIT(1),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_HBO_M = BIT(2),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_L3L4P_M = BIT(3),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_IPE_M = BIT(4),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_L4E_M = BIT(5),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EIPE_M = BIT(6),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_M = BIT(7),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_LPBK_M = BIT(8),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_IPV6EXADD_M = BIT(9),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_RXE_M = BIT(10),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_CRCP_M = BIT(11),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_M = BIT(12),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_L2TAG1P_M = BIT(13),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_M = BIT(14),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_M = BIT(15),
+};
+
+/* Bitmasks for singleq (flex) virtchnl2_rx_flex_desc */
+enum virtchnl2_rx_flex_desc_status_error_1_bits {
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_CPM_M = GENMASK(3, 0),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_NAT_M = BIT(4),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_CRYPTO_M = BIT(5),
+ /* [10:6] reserved */
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_L2TAG2P_M = BIT(11),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_M = BIT(12),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_M = BIT(13),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_M = BIT(14),
+ VIRTCHNL2_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_M = BIT(15),
+};
+
+/* For virtchnl2_rx_flex_desc.ts_low member */
+#define VIRTCHNL2_RX_FLEX_TSTAMP_VALID BIT(0)
+
+/* For singleq (non flex) virtchnl2_singleq_base_rx_desc legacy desc members */
+#define VIRTCHNL2_RX_BASE_DESC_QW1_LEN_PBUF_M GENMASK_ULL(51, 38)
+#define VIRTCHNL2_RX_BASE_DESC_QW1_PTYPE_M GENMASK_ULL(37, 30)
+#define VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M GENMASK_ULL(26, 19)
+#define VIRTCHNL2_RX_BASE_DESC_QW1_STATUS_M GENMASK_ULL(18, 0)
+
+/* Bitmasks for singleq (base) virtchnl2_rx_base_desc */
+enum virtchnl2_rx_base_desc_status_bits {
+ VIRTCHNL2_RX_BASE_DESC_STATUS_DD_M = BIT(0),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M = BIT(1),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_L2TAG1P_M = BIT(2),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_L3L4P_M = BIT(3),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_CRCP_M = BIT(4),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_RSVD_M = GENMASK(7, 5),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_EXT_UDP_0_M = BIT(8),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_UMBCAST_M = GENMASK(10, 9),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_FLM_M = BIT(11),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_FLTSTAT_M = GENMASK(13, 12),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_LPBK_M = BIT(14),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_IPV6EXADD_M = BIT(15),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_RSVD1_M = GENMASK(17, 16),
+ VIRTCHNL2_RX_BASE_DESC_STATUS_INT_UDP_0_M = BIT(18),
+};
+
+/* Bitmasks for singleq (base) virtchnl2_rx_base_desc */
+enum virtchnl2_rx_base_desc_error_bits {
+ VIRTCHNL2_RX_BASE_DESC_ERROR_RXE_M = BIT(0),
+ VIRTCHNL2_RX_BASE_DESC_ERROR_ATRAEFAIL_M = BIT(1),
+ VIRTCHNL2_RX_BASE_DESC_ERROR_HBO_M = BIT(2),
+ VIRTCHNL2_RX_BASE_DESC_ERROR_L3L4E_M = GENMASK(5, 3),
+ VIRTCHNL2_RX_BASE_DESC_ERROR_IPE_M = BIT(3),
+ VIRTCHNL2_RX_BASE_DESC_ERROR_L4E_M = BIT(4),
+ VIRTCHNL2_RX_BASE_DESC_ERROR_EIPE_M = BIT(5),
+ VIRTCHNL2_RX_BASE_DESC_ERROR_OVERSIZE_M = BIT(6),
+ VIRTCHNL2_RX_BASE_DESC_ERROR_PPRS_M = BIT(7),
+};
+
+/* Bitmasks for singleq (base) virtchnl2_rx_base_desc */
+#define VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH_M GENMASK(13, 12)
+
+/**
+ * struct virtchnl2_splitq_rx_buf_desc - SplitQ RX buffer descriptor format
+ * @qword0: RX buffer struct.
+ * @qword0.buf_id: Buffer identifier.
+ * @qword0.rsvd0: Reserved.
+ * @qword0.rsvd1: Reserved.
+ * @pkt_addr: Packet buffer address.
+ * @hdr_addr: Header buffer address.
+ * @rsvd2: Rerserved.
+ *
+ * Receive Descriptors
+ * SplitQ buffer
+ * | 16| 0|
+ * ----------------------------------------------------------------
+ * | RSV | Buffer ID |
+ * ----------------------------------------------------------------
+ * | Rx packet buffer address |
+ * ----------------------------------------------------------------
+ * | Rx header buffer address |
+ * ----------------------------------------------------------------
+ * | RSV |
+ * ----------------------------------------------------------------
+ * | 0|
+ */
+struct virtchnl2_splitq_rx_buf_desc {
+ struct {
+ __le16 buf_id;
+ __le16 rsvd0;
+ __le32 rsvd1;
+ } qword0;
+ __le64 pkt_addr;
+ __le64 hdr_addr;
+ __le64 rsvd2;
+};
+
+/**
+ * struct virtchnl2_singleq_rx_buf_desc - SingleQ RX buffer descriptor format.
+ * @pkt_addr: Packet buffer address.
+ * @hdr_addr: Header buffer address.
+ * @rsvd1: Reserved.
+ * @rsvd2: Reserved.
+ *
+ * SingleQ buffer
+ * | 0|
+ * ----------------------------------------------------------------
+ * | Rx packet buffer address |
+ * ----------------------------------------------------------------
+ * | Rx header buffer address |
+ * ----------------------------------------------------------------
+ * | RSV |
+ * ----------------------------------------------------------------
+ * | RSV |
+ * ----------------------------------------------------------------
+ * | 0|
+ */
+struct virtchnl2_singleq_rx_buf_desc {
+ __le64 pkt_addr;
+ __le64 hdr_addr;
+ __le64 rsvd1;
+ __le64 rsvd2;
+};
+
+/**
+ * struct virtchnl2_singleq_base_rx_desc - RX descriptor writeback format.
+ * @qword0: First quad word struct.
+ * @qword0.lo_dword: Lower dual word struct.
+ * @qword0.lo_dword.mirroring_status: Mirrored packet status.
+ * @qword0.lo_dword.l2tag1: Stripped L2 tag from the received packet.
+ * @qword0.hi_dword: High dual word union.
+ * @qword0.hi_dword.rss: RSS hash.
+ * @qword0.hi_dword.fd_id: Flow director filter id.
+ * @qword1: Second quad word struct.
+ * @qword1.status_error_ptype_len: Status/error/PTYPE/length.
+ * @qword2: Third quad word struct.
+ * @qword2.ext_status: Extended status.
+ * @qword2.rsvd: Reserved.
+ * @qword2.l2tag2_1: Extracted L2 tag 2 from the packet.
+ * @qword2.l2tag2_2: Reserved.
+ * @qword3: Fourth quad word struct.
+ * @qword3.reserved: Reserved.
+ * @qword3.fd_id: Flow director filter id.
+ *
+ * Profile ID 0x1, SingleQ, base writeback format
+ */
+struct virtchnl2_singleq_base_rx_desc {
+ struct {
+ struct {
+ __le16 mirroring_status;
+ __le16 l2tag1;
+ } lo_dword;
+ union {
+ __le32 rss;
+ __le32 fd_id;
+ } hi_dword;
+ } qword0;
+ struct {
+ __le64 status_error_ptype_len;
+ } qword1;
+ struct {
+ __le16 ext_status;
+ __le16 rsvd;
+ __le16 l2tag2_1;
+ __le16 l2tag2_2;
+ } qword2;
+ struct {
+ __le32 reserved;
+ __le32 fd_id;
+ } qword3;
+};
+
+/**
+ * struct virtchnl2_rx_flex_desc_nic - RX descriptor writeback format.
+ *
+ * @rxdid: Descriptor builder profile id.
+ * @mir_id_umb_cast: umb_cast=[7:6], mirror=[5:0]
+ * @ptype_flex_flags0: ff0=[15:10], ptype=[9:0]
+ * @pkt_len: Packet length, [15:14] are reserved.
+ * @hdr_len_sph_flex_flags1: ff1/ext=[15:12], sph=[11], header=[10:0].
+ * @status_error0: Status/Error section 0.
+ * @l2tag1: Stripped L2 tag from the received packet
+ * @rss_hash: RSS hash.
+ * @status_error1: Status/Error section 1.
+ * @flexi_flags2: Flexible flags section 2.
+ * @ts_low: Lower word of timestamp value.
+ * @l2tag2_1st: First L2TAG2.
+ * @l2tag2_2nd: Second L2TAG2.
+ * @flow_id: Flow id.
+ * @flex_ts: Timestamp and flexible flow id union.
+ * @flex_ts.ts_high: Timestamp higher word of the timestamp value.
+ * @flex_ts.flex.rsvd: Reserved.
+ * @flex_ts.flex.flow_id_ipv6: IPv6 flow id.
+ *
+ * Profile ID 0x2, SingleQ, flex writeback format
+ */
+struct virtchnl2_rx_flex_desc_nic {
+ /* Qword 0 */
+ u8 rxdid;
+ u8 mir_id_umb_cast;
+ __le16 ptype_flex_flags0;
+ __le16 pkt_len;
+ __le16 hdr_len_sph_flex_flags1;
+ /* Qword 1 */
+ __le16 status_error0;
+ __le16 l2tag1;
+ __le32 rss_hash;
+ /* Qword 2 */
+ __le16 status_error1;
+ u8 flexi_flags2;
+ u8 ts_low;
+ __le16 l2tag2_1st;
+ __le16 l2tag2_2nd;
+ /* Qword 3 */
+ __le32 flow_id;
+ union {
+ struct {
+ __le16 rsvd;
+ __le16 flow_id_ipv6;
+ } flex;
+ __le32 ts_high;
+ } flex_ts;
+};
+
+/**
+ * struct virtchnl2_rx_flex_desc_adv_nic_3 - RX descriptor writeback format.
+ * @rxdid_ucast: ucast=[7:6], rsvd=[5:4], profile_id=[3:0].
+ * @status_err0_qw0: Status/Error section 0 in quad word 0.
+ * @ptype_err_fflags0: ff0=[15:12], udp_len_err=[11], ip_hdr_err=[10],
+ * ptype=[9:0].
+ * @pktlen_gen_bufq_id: bufq_id=[15] only in splitq, gen=[14] only in splitq,
+ * plen=[13:0].
+ * @hdrlen_flags: miss_prepend=[15], trunc_mirr=[14], int_udp_0=[13],
+ * ext_udp0=[12], sph=[11] only in splitq, rsc=[10]
+ * only in splitq, header=[9:0].
+ * @status_err0_qw1: Status/Error section 0 in quad word 1.
+ * @status_err1: Status/Error section 1.
+ * @fflags1: Flexible flags section 1.
+ * @ts_low: Lower word of timestamp value.
+ * @buf_id: Buffer identifier. Only in splitq mode.
+ * @misc: Union.
+ * @misc.raw_cs: Raw checksum.
+ * @misc.l2tag1: Stripped L2 tag from the received packet
+ * @misc.rscseglen:
+ * @hash1: Lower bits of Rx hash value.
+ * @ff2_mirrid_hash2: Union.
+ * @ff2_mirrid_hash2.fflags2: Flexible flags section 2.
+ * @ff2_mirrid_hash2.mirrorid: Mirror id.
+ * @ff2_mirrid_hash2.rscseglen: RSC segment length.
+ * @hash3: Upper bits of Rx hash value.
+ * @l2tag2: Extracted L2 tag 2 from the packet.
+ * @fmd4: Flexible metadata container 4.
+ * @l2tag1: Stripped L2 tag from the received packet
+ * @fmd6: Flexible metadata container 6.
+ * @ts_high: Timestamp higher word of the timestamp value.
+ *
+ * Profile ID 0x2, SplitQ, flex writeback format
+ *
+ * Flex-field 0: BufferID
+ * Flex-field 1: Raw checksum/L2TAG1/RSC Seg Len (determined by HW)
+ * Flex-field 2: Hash[15:0]
+ * Flex-flags 2: Hash[23:16]
+ * Flex-field 3: L2TAG2
+ * Flex-field 5: L2TAG1
+ * Flex-field 7: Timestamp (upper 32 bits)
+ */
+struct virtchnl2_rx_flex_desc_adv_nic_3 {
+ /* Qword 0 */
+ u8 rxdid_ucast;
+ u8 status_err0_qw0;
+ __le16 ptype_err_fflags0;
+ __le16 pktlen_gen_bufq_id;
+ __le16 hdrlen_flags;
+ /* Qword 1 */
+ u8 status_err0_qw1;
+ u8 status_err1;
+ u8 fflags1;
+ u8 ts_low;
+ __le16 buf_id;
+ union {
+ __le16 raw_cs;
+ __le16 l2tag1;
+ __le16 rscseglen;
+ } misc;
+ /* Qword 2 */
+ __le16 hash1;
+ union {
+ u8 fflags2;
+ u8 mirrorid;
+ u8 hash2;
+ } ff2_mirrid_hash2;
+ u8 hash3;
+ __le16 l2tag2;
+ __le16 fmd4;
+ /* Qword 3 */
+ __le16 l2tag1;
+ __le16 fmd6;
+ __le32 ts_high;
+};
+
+/* Common union for accessing descriptor format structs */
+union virtchnl2_rx_desc {
+ struct virtchnl2_singleq_base_rx_desc base_wb;
+ struct virtchnl2_rx_flex_desc_nic flex_nic_wb;
+ struct virtchnl2_rx_flex_desc_adv_nic_3 flex_adv_nic_3_wb;
+};
+
+#endif /* _VIRTCHNL_LAN_DESC_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 4ee849985e2b..16d2a55d5e17 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2356,10 +2356,10 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
break;
case ETH_SS_STATS:
for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++)
- ethtool_sprintf(&p,
+ ethtool_sprintf(&p, "%s",
igb_gstrings_stats[i].stat_string);
for (i = 0; i < IGB_NETDEV_STATS_LEN; i++)
- ethtool_sprintf(&p,
+ ethtool_sprintf(&p, "%s",
igb_gstrings_net_stats[i].stat_string);
for (i = 0; i < adapter->num_tx_queues; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 76b34cee1da3..b2295caa2f0a 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3069,6 +3069,7 @@ void igb_set_fw_version(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct e1000_fw_version fw;
+ char *lbuf;
igb_get_fw_version(hw, &fw);
@@ -3076,36 +3077,34 @@ void igb_set_fw_version(struct igb_adapter *adapter)
case e1000_i210:
case e1000_i211:
if (!(igb_get_flash_presence_i210(hw))) {
- snprintf(adapter->fw_version,
- sizeof(adapter->fw_version),
- "%2d.%2d-%d",
- fw.invm_major, fw.invm_minor,
- fw.invm_img_type);
+ lbuf = kasprintf(GFP_KERNEL, "%2d.%2d-%d",
+ fw.invm_major, fw.invm_minor,
+ fw.invm_img_type);
break;
}
fallthrough;
default:
- /* if option is rom valid, display its version too */
+ /* if option rom is valid, display its version too */
if (fw.or_valid) {
- snprintf(adapter->fw_version,
- sizeof(adapter->fw_version),
- "%d.%d, 0x%08x, %d.%d.%d",
- fw.eep_major, fw.eep_minor, fw.etrack_id,
- fw.or_major, fw.or_build, fw.or_patch);
+ lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x, %d.%d.%d",
+ fw.eep_major, fw.eep_minor,
+ fw.etrack_id, fw.or_major, fw.or_build,
+ fw.or_patch);
/* no option rom */
} else if (fw.etrack_id != 0X0000) {
- snprintf(adapter->fw_version,
- sizeof(adapter->fw_version),
- "%d.%d, 0x%08x",
- fw.eep_major, fw.eep_minor, fw.etrack_id);
+ lbuf = kasprintf(GFP_KERNEL, "%d.%d, 0x%08x",
+ fw.eep_major, fw.eep_minor,
+ fw.etrack_id);
} else {
- snprintf(adapter->fw_version,
- sizeof(adapter->fw_version),
- "%d.%d.%d",
- fw.eep_major, fw.eep_minor, fw.eep_build);
+ lbuf = kasprintf(GFP_KERNEL, "%d.%d.%d", fw.eep_major,
+ fw.eep_minor, fw.eep_build);
}
break;
}
+
+ /* the truncate happens here if it doesn't fit */
+ strscpy(adapter->fw_version, lbuf, sizeof(adapter->fw_version));
+ kfree(lbuf);
}
/**
@@ -3264,7 +3263,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
igb_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+ strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
netdev->mem_start = pci_resource_start(pdev, 0);
netdev->mem_end = pci_resource_end(pdev, 0);
@@ -7857,8 +7856,8 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
{
struct pci_dev *pdev = adapter->pdev;
struct vf_data_storage *vf_data = &adapter->vf_data[vf];
- struct list_head *pos;
- struct vf_mac_filter *entry = NULL;
+ struct vf_mac_filter *entry;
+ bool found = false;
int ret = 0;
if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
@@ -7878,8 +7877,7 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
switch (info) {
case E1000_VF_MAC_FILTER_CLR:
/* remove all unicast MAC filters related to the current VF */
- list_for_each(pos, &adapter->vf_macs.l) {
- entry = list_entry(pos, struct vf_mac_filter, l);
+ list_for_each_entry(entry, &adapter->vf_macs.l, l) {
if (entry->vf == vf) {
entry->vf = -1;
entry->free = true;
@@ -7889,13 +7887,14 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
break;
case E1000_VF_MAC_FILTER_ADD:
/* try to find empty slot in the list */
- list_for_each(pos, &adapter->vf_macs.l) {
- entry = list_entry(pos, struct vf_mac_filter, l);
- if (entry->free)
+ list_for_each_entry(entry, &adapter->vf_macs.l, l) {
+ if (entry->free) {
+ found = true;
break;
+ }
}
- if (entry && entry->free) {
+ if (found) {
entry->free = false;
entry->vf = vf;
ether_addr_copy(entry->vf_mac, addr);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 7ff2752dd763..fd712585af27 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2785,7 +2785,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
igbvf_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+ strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
adapter->bd_number = cards_found++;
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index dd8a9d27a167..785eaa8e0ba8 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -773,9 +773,10 @@ static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset,
break;
case ETH_SS_STATS:
for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++)
- ethtool_sprintf(&p, igc_gstrings_stats[i].stat_string);
+ ethtool_sprintf(&p, "%s",
+ igc_gstrings_stats[i].stat_string);
for (i = 0; i < IGC_NETDEV_STATS_LEN; i++)
- ethtool_sprintf(&p,
+ ethtool_sprintf(&p, "%s",
igc_gstrings_net_stats[i].stat_string);
for (i = 0; i < adapter->num_tx_queues; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 98de34d0ce07..e9bb403bbacf 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -6935,7 +6935,7 @@ static int igc_probe(struct pci_dev *pdev,
*/
igc_get_hw_control(adapter);
- strncpy(netdev->name, "eth%d", IFNAMSIZ);
+ strscpy(netdev->name, "eth%d", sizeof(netdev->name));
err = register_netdev(netdev);
if (err)
goto err_register;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 0bbad4a5cc2f..4dd897806fa5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1413,11 +1413,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
switch (stringset) {
case ETH_SS_TEST:
for (i = 0; i < IXGBE_TEST_LEN; i++)
- ethtool_sprintf(&p, ixgbe_gstrings_test[i]);
+ ethtool_sprintf(&p, "%s", ixgbe_gstrings_test[i]);
break;
case ETH_SS_STATS:
for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++)
- ethtool_sprintf(&p,
+ ethtool_sprintf(&p, "%s",
ixgbe_gstrings_stats[i].stat_string);
for (i = 0; i < netdev->num_tx_queues; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index dd03b017dfc5..94bde2cad0f4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2421,7 +2421,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
}
if (xdp_xmit & IXGBE_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_xmit & IXGBE_XDP_TX) {
struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index ea88ac04ab9a..9cfdfa8a4355 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -640,12 +640,11 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
int vf, int index, unsigned char *mac_addr)
{
struct vf_macvlans *entry;
- struct list_head *pos;
+ bool found = false;
int retval = 0;
if (index <= 1) {
- list_for_each(pos, &adapter->vf_mvs.l) {
- entry = list_entry(pos, struct vf_macvlans, l);
+ list_for_each_entry(entry, &adapter->vf_mvs.l, l) {
if (entry->vf == vf) {
entry->vf = -1;
entry->free = true;
@@ -663,23 +662,22 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
if (!index)
return 0;
- entry = NULL;
-
- list_for_each(pos, &adapter->vf_mvs.l) {
- entry = list_entry(pos, struct vf_macvlans, l);
- if (entry->free)
+ list_for_each_entry(entry, &adapter->vf_mvs.l, l) {
+ if (entry->free) {
+ found = true;
break;
+ }
}
/*
* If we traversed the entire list and didn't find a free entry
- * then we're out of space on the RAR table. Also entry may
- * be NULL because the original memory allocation for the list
- * failed, which is not fatal but does mean we can't support
- * VF requests for MACVLAN because we couldn't allocate
- * memory for the list management required.
+ * then we're out of space on the RAR table. It's also possible
+ * for the &adapter->vf_mvs.l list to be empty because the original
+ * memory allocation for the list failed, which is not fatal but does
+ * mean we can't support VF requests for MACVLAN because we couldn't
+ * allocate memory for the list management required.
*/
- if (!entry || !entry->free)
+ if (!found)
return -ENOSPC;
retval = ixgbe_add_mac_filter(adapter, mac_addr, vf);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 1703c640a434..59798bc33298 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -351,7 +351,7 @@ construct_skb:
}
if (xdp_xmit & IXGBE_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_xmit & IXGBE_XDP_TX) {
struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 5f6ae11212ae..81cf3361a1e5 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1380,13 +1380,11 @@ static int korina_probe(struct platform_device *pdev)
return rc;
}
-static int korina_remove(struct platform_device *pdev)
+static void korina_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
unregister_netdev(dev);
-
- return 0;
}
#ifdef CONFIG_OF
@@ -1405,7 +1403,7 @@ static struct platform_driver korina_driver = {
.of_match_table = of_match_ptr(korina_match),
},
.probe = korina_probe,
- .remove = korina_remove,
+ .remove_new = korina_remove,
};
module_platform_driver(korina_driver);
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index f5961bdcc480..1d5b7bb6380f 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -721,8 +721,7 @@ err_out:
return err;
}
-static int
-ltq_etop_remove(struct platform_device *pdev)
+static void ltq_etop_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
@@ -732,11 +731,10 @@ ltq_etop_remove(struct platform_device *pdev)
ltq_etop_mdio_cleanup(dev);
unregister_netdev(dev);
}
- return 0;
}
static struct platform_driver ltq_mii_driver = {
- .remove = ltq_etop_remove,
+ .remove_new = ltq_etop_remove,
.driver = {
.name = "ltq_etop",
},
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 8d646c7f8c82..8bd4def3622e 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -641,7 +641,7 @@ err_uninit_dma:
return err;
}
-static int xrx200_remove(struct platform_device *pdev)
+static void xrx200_remove(struct platform_device *pdev)
{
struct xrx200_priv *priv = platform_get_drvdata(pdev);
struct net_device *net_dev = priv->net_dev;
@@ -659,8 +659,6 @@ static int xrx200_remove(struct platform_device *pdev)
/* shut down hardware */
xrx200_hw_cleanup(priv);
-
- return 0;
}
static const struct of_device_id xrx200_match[] = {
@@ -671,7 +669,7 @@ MODULE_DEVICE_TABLE(of, xrx200_match);
static struct platform_driver xrx200_driver = {
.probe = xrx200_probe,
- .remove = xrx200_remove,
+ .remove_new = xrx200_remove,
.driver = {
.name = "lantiq,xrx200-net",
.of_match_table = xrx200_match,
diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c
index ffa96059079c..5182fe737c37 100644
--- a/drivers/net/ethernet/litex/litex_liteeth.c
+++ b/drivers/net/ethernet/litex/litex_liteeth.c
@@ -294,13 +294,11 @@ static int liteeth_probe(struct platform_device *pdev)
return 0;
}
-static int liteeth_remove(struct platform_device *pdev)
+static void liteeth_remove(struct platform_device *pdev)
{
struct net_device *netdev = platform_get_drvdata(pdev);
unregister_netdev(netdev);
-
- return 0;
}
static const struct of_device_id liteeth_of_match[] = {
@@ -311,7 +309,7 @@ MODULE_DEVICE_TABLE(of, liteeth_of_match);
static struct platform_driver liteeth_driver = {
.probe = liteeth_probe,
- .remove = liteeth_remove,
+ .remove_new = liteeth_remove,
.driver = {
.name = DRV_NAME,
.of_match_table = liteeth_of_match,
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 3b129a1c3381..f0bdc06d253d 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2892,19 +2892,18 @@ err_put_clk:
return ret;
}
-static int mv643xx_eth_shared_remove(struct platform_device *pdev)
+static void mv643xx_eth_shared_remove(struct platform_device *pdev)
{
struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
mv643xx_eth_shared_of_remove();
if (!IS_ERR(msp->clk))
clk_disable_unprepare(msp->clk);
- return 0;
}
static struct platform_driver mv643xx_eth_shared_driver = {
.probe = mv643xx_eth_shared_probe,
- .remove = mv643xx_eth_shared_remove,
+ .remove_new = mv643xx_eth_shared_remove,
.driver = {
.name = MV643XX_ETH_SHARED_NAME,
.of_match_table = of_match_ptr(mv643xx_eth_shared_ids),
@@ -3279,7 +3278,7 @@ out:
return err;
}
-static int mv643xx_eth_remove(struct platform_device *pdev)
+static void mv643xx_eth_remove(struct platform_device *pdev)
{
struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
struct net_device *dev = mp->dev;
@@ -3293,8 +3292,6 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
clk_disable_unprepare(mp->clk);
free_netdev(mp->dev);
-
- return 0;
}
static void mv643xx_eth_shutdown(struct platform_device *pdev)
@@ -3311,7 +3308,7 @@ static void mv643xx_eth_shutdown(struct platform_device *pdev)
static struct platform_driver mv643xx_eth_driver = {
.probe = mv643xx_eth_probe,
- .remove = mv643xx_eth_remove,
+ .remove_new = mv643xx_eth_remove,
.shutdown = mv643xx_eth_shutdown,
.driver = {
.name = MV643XX_ETH_NAME,
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 674913184ebf..89f26402f8fb 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -388,7 +388,7 @@ out_clk:
return ret;
}
-static int orion_mdio_remove(struct platform_device *pdev)
+static void orion_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus = platform_get_drvdata(pdev);
struct orion_mdio_dev *dev = bus->priv;
@@ -404,8 +404,6 @@ static int orion_mdio_remove(struct platform_device *pdev)
clk_disable_unprepare(dev->clk[i]);
clk_put(dev->clk[i]);
}
-
- return 0;
}
static const struct of_device_id orion_mdio_match[] = {
@@ -426,7 +424,7 @@ MODULE_DEVICE_TABLE(acpi, orion_mdio_acpi_match);
static struct platform_driver orion_mdio_driver = {
.probe = orion_mdio_probe,
- .remove = orion_mdio_remove,
+ .remove_new = orion_mdio_remove,
.driver = {
.name = "orion-mdio",
.of_match_table = orion_mdio_match,
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index d483b8c00ec0..90817136808d 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2520,7 +2520,7 @@ next:
mvneta_xdp_put_buff(pp, rxq, &xdp_buf, -1);
if (ps.xdp_redirect)
- xdp_do_flush_map();
+ xdp_do_flush();
if (ps.rx_packets)
mvneta_update_stats(pp, &ps);
@@ -5725,7 +5725,7 @@ err_free_irq:
}
/* Device removal routine */
-static int mvneta_remove(struct platform_device *pdev)
+static void mvneta_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct mvneta_port *pp = netdev_priv(dev);
@@ -5744,8 +5744,6 @@ static int mvneta_remove(struct platform_device *pdev)
1 << pp->id);
mvneta_bm_put(pp->bm_priv);
}
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -5871,7 +5869,7 @@ MODULE_DEVICE_TABLE(of, mvneta_match);
static struct platform_driver mvneta_driver = {
.probe = mvneta_probe,
- .remove = mvneta_remove,
+ .remove_new = mvneta_remove,
.driver = {
.name = MVNETA_DRIVER_NAME,
.of_match_table = mvneta_match,
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
index 46c942ef2287..3f46a0fed048 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.c
+++ b/drivers/net/ethernet/marvell/mvneta_bm.c
@@ -457,7 +457,7 @@ err_clk:
return err;
}
-static int mvneta_bm_remove(struct platform_device *pdev)
+static void mvneta_bm_remove(struct platform_device *pdev)
{
struct mvneta_bm *priv = platform_get_drvdata(pdev);
u8 all_ports_map = 0xff;
@@ -475,8 +475,6 @@ static int mvneta_bm_remove(struct platform_device *pdev)
mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_STOP_MASK);
clk_disable_unprepare(priv->clk);
-
- return 0;
}
static const struct of_device_id mvneta_bm_match[] = {
@@ -487,7 +485,7 @@ MODULE_DEVICE_TABLE(of, mvneta_bm_match);
static struct platform_driver mvneta_bm_driver = {
.probe = mvneta_bm_probe,
- .remove = mvneta_bm_remove,
+ .remove_new = mvneta_bm_remove,
.driver = {
.name = MVNETA_BM_DRIVER_NAME,
.of_match_table = mvneta_bm_match,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 21c3f9b015c8..93137606869e 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -4027,7 +4027,7 @@ err_drop_frame:
}
if (xdp_ret & MVPP2_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (ps.rx_packets) {
struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
@@ -5831,7 +5831,7 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
v->type = MVPP2_QUEUE_VECTOR_SHARED;
if (port->flags & MVPP2_F_DT_COMPAT)
- strncpy(irqname, "rx-shared", sizeof(irqname));
+ strscpy(irqname, "rx-shared", sizeof(irqname));
}
if (port_node)
@@ -7662,7 +7662,7 @@ err_pp_clk:
return err;
}
-static int mvpp2_remove(struct platform_device *pdev)
+static void mvpp2_remove(struct platform_device *pdev)
{
struct mvpp2 *priv = platform_get_drvdata(pdev);
struct fwnode_handle *fwnode = pdev->dev.fwnode;
@@ -7700,15 +7700,13 @@ static int mvpp2_remove(struct platform_device *pdev)
}
if (is_acpi_node(port_fwnode))
- return 0;
+ return;
clk_disable_unprepare(priv->axi_clk);
clk_disable_unprepare(priv->mg_core_clk);
clk_disable_unprepare(priv->mg_clk);
clk_disable_unprepare(priv->pp_clk);
clk_disable_unprepare(priv->gop_clk);
-
- return 0;
}
static const struct of_device_id mvpp2_match[] = {
@@ -7734,7 +7732,7 @@ MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
static struct platform_driver mvpp2_driver = {
.probe = mvpp2_probe,
- .remove = mvpp2_remove,
+ .remove_new = mvpp2_remove,
.driver = {
.name = MVPP2_DRIVER_NAME,
.of_match_table = mvpp2_match,
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
index 90c3a419932d..d4ee2454675b 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c
@@ -16,9 +16,6 @@
#define CTRL_MBOX_MAX_PF 128
#define CTRL_MBOX_SZ ((size_t)(0x400000 / CTRL_MBOX_MAX_PF))
-#define FW_HB_INTERVAL_IN_SECS 1
-#define FW_HB_MISS_COUNT 10
-
/* Names of Hardware non-queue generic interrupts */
static char *cn93_non_ioq_msix_names[] = {
"epf_ire_rint",
@@ -250,12 +247,11 @@ static void octep_init_config_cn93_pf(struct octep_device *oct)
link = PCI_DEVFN(PCI_SLOT(oct->pdev->devfn), link);
}
conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr +
- (0x400000ull * 7) +
+ CN93_PEM_BAR4_INDEX_OFFSET +
(link * CTRL_MBOX_SZ);
- conf->hb_interval = FW_HB_INTERVAL_IN_SECS;
- conf->max_hb_miss_cnt = FW_HB_MISS_COUNT;
-
+ conf->fw_info.hb_interval = OCTEP_DEFAULT_FW_HB_INTERVAL;
+ conf->fw_info.hb_miss_count = OCTEP_DEFAULT_FW_HB_MISS_COUNT;
}
/* Setup registers for a hardware Tx Queue */
@@ -373,34 +369,40 @@ static void octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no)
mbox->mbox_read_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_VF_PF_DATA(q_no);
}
-/* Process non-ioq interrupts required to keep pf interface running.
- * OEI_RINT is needed for control mailbox
- */
-static bool octep_poll_non_ioq_interrupts_cn93_pf(struct octep_device *oct)
-{
- bool handled = false;
- u64 reg0;
-
- /* Check for OEI INTR */
- reg0 = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT);
- if (reg0) {
- dev_info(&oct->pdev->dev,
- "Received OEI_RINT intr: 0x%llx\n",
- reg0);
- octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg0);
- if (reg0 & CN93_SDP_EPF_OEI_RINT_DATA_BIT_MBOX)
+/* Poll OEI events like heartbeat */
+static void octep_poll_oei_cn93_pf(struct octep_device *oct)
+{
+ u64 reg;
+
+ reg = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT);
+ if (reg) {
+ octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg);
+ if (reg & CN93_SDP_EPF_OEI_RINT_DATA_BIT_MBOX)
queue_work(octep_wq, &oct->ctrl_mbox_task);
- else if (reg0 & CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT)
+ else if (reg & CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT)
atomic_set(&oct->hb_miss_cnt, 0);
-
- handled = true;
}
+}
+
+/* OEI interrupt handler */
+static irqreturn_t octep_oei_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+
+ octep_poll_oei_cn93_pf(oct);
+ return IRQ_HANDLED;
+}
- return handled;
+/* Process non-ioq interrupts required to keep pf interface running.
+ * OEI_RINT is needed for control mailbox
+ */
+static void octep_poll_non_ioq_interrupts_cn93_pf(struct octep_device *oct)
+{
+ octep_poll_oei_cn93_pf(oct);
}
-/* Interrupts handler for all non-queue generic interrupts. */
-static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
+/* Interrupt handler for input ring error interrupts. */
+static irqreturn_t octep_ire_intr_handler_cn93_pf(void *dev)
{
struct octep_device *oct = (struct octep_device *)dev;
struct pci_dev *pdev = oct->pdev;
@@ -425,8 +427,17 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
reg_val);
}
}
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for output ring error interrupts. */
+static irqreturn_t octep_ore_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
+ int i = 0;
/* Check for ORERR INTR */
reg_val = octep_read_csr64(oct, CN93_SDP_EPF_ORERR_RINT);
@@ -444,9 +455,16 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
reg_val);
}
}
-
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for vf input ring error interrupts. */
+static irqreturn_t octep_vfire_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
/* Check for VFIRE INTR */
reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0));
@@ -454,8 +472,16 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
dev_info(&pdev->dev,
"Received VFIRE_RINT intr: 0x%llx\n", reg_val);
octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0), reg_val);
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for vf output ring error interrupts. */
+static irqreturn_t octep_vfore_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
/* Check for VFORE INTR */
reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0));
@@ -463,19 +489,30 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
dev_info(&pdev->dev,
"Received VFORE_RINT intr: 0x%llx\n", reg_val);
octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0), reg_val);
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
- /* Check for MBOX INTR and OEI INTR */
- if (octep_poll_non_ioq_interrupts_cn93_pf(oct))
- goto irq_handled;
+/* Interrupt handler for dpi dma related interrupts. */
+static irqreturn_t octep_dma_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ u64 reg_val = 0;
/* Check for DMA INTR */
reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_RINT);
if (reg_val) {
octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT, reg_val);
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for dpi dma transaction error interrupts for VFs */
+static irqreturn_t octep_dma_vf_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
/* Check for DMA VF INTR */
reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0));
@@ -483,8 +520,16 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
dev_info(&pdev->dev,
"Received DMA_VF_RINT intr: 0x%llx\n", reg_val);
octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0), reg_val);
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for pp transaction error interrupts for VFs */
+static irqreturn_t octep_pp_vf_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
/* Check for PPVF INTR */
reg_val = octep_read_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0));
@@ -492,8 +537,16 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
dev_info(&pdev->dev,
"Received PP_VF_RINT intr: 0x%llx\n", reg_val);
octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0), reg_val);
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
+
+/* Interrupt handler for mac related interrupts. */
+static irqreturn_t octep_misc_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
+ u64 reg_val = 0;
/* Check for MISC INTR */
reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MISC_RINT);
@@ -501,11 +554,17 @@ static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev)
dev_info(&pdev->dev,
"Received MISC_RINT intr: 0x%llx\n", reg_val);
octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT, reg_val);
- goto irq_handled;
}
+ return IRQ_HANDLED;
+}
+
+/* Interrupts handler for all reserved interrupts. */
+static irqreturn_t octep_rsvd_intr_handler_cn93_pf(void *dev)
+{
+ struct octep_device *oct = (struct octep_device *)dev;
+ struct pci_dev *pdev = oct->pdev;
dev_info(&pdev->dev, "Reserved interrupts raised; Ignore\n");
-irq_handled:
return IRQ_HANDLED;
}
@@ -569,8 +628,15 @@ static void octep_enable_interrupts_cn93_pf(struct octep_device *oct)
octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1S, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1S, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1S, -1ULL);
+
+ octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT_ENA_W1S(0), -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT_ENA_W1S(0), -1ULL);
+
octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask);
+
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S(0), -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT_ENA_W1S(0), -1ULL);
}
/* Disable all interrupts */
@@ -588,8 +654,15 @@ static void octep_disable_interrupts_cn93_pf(struct octep_device *oct)
octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1C, -1ULL);
+
+ octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT_ENA_W1C(0), -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT_ENA_W1C(0), -1ULL);
+
octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask);
octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask);
+
+ octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C(0), -1ULL);
+ octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT_ENA_W1C(0), -1ULL);
}
/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
@@ -722,7 +795,16 @@ void octep_device_setup_cn93_pf(struct octep_device *oct)
oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cn93_pf;
oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cn93_pf;
- oct->hw_ops.non_ioq_intr_handler = octep_non_ioq_intr_handler_cn93_pf;
+ oct->hw_ops.oei_intr_handler = octep_oei_intr_handler_cn93_pf;
+ oct->hw_ops.ire_intr_handler = octep_ire_intr_handler_cn93_pf;
+ oct->hw_ops.ore_intr_handler = octep_ore_intr_handler_cn93_pf;
+ oct->hw_ops.vfire_intr_handler = octep_vfire_intr_handler_cn93_pf;
+ oct->hw_ops.vfore_intr_handler = octep_vfore_intr_handler_cn93_pf;
+ oct->hw_ops.dma_intr_handler = octep_dma_intr_handler_cn93_pf;
+ oct->hw_ops.dma_vf_intr_handler = octep_dma_vf_intr_handler_cn93_pf;
+ oct->hw_ops.pp_vf_intr_handler = octep_pp_vf_intr_handler_cn93_pf;
+ oct->hw_ops.misc_intr_handler = octep_misc_intr_handler_cn93_pf;
+ oct->hw_ops.rsvd_intr_handler = octep_rsvd_intr_handler_cn93_pf;
oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cn93_pf;
oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf;
oct->hw_ops.reinit_regs = octep_reinit_regs_cn93_pf;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
index df7cd39d9fce..1622a6ebf036 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h
@@ -49,6 +49,11 @@
/* Default MTU */
#define OCTEP_DEFAULT_MTU 1500
+/* pf heartbeat interval in milliseconds */
+#define OCTEP_DEFAULT_FW_HB_INTERVAL 1000
+/* pf heartbeat miss count */
+#define OCTEP_DEFAULT_FW_HB_MISS_COUNT 20
+
/* Macros to get octeon config params */
#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
#define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs)
@@ -181,6 +186,16 @@ struct octep_ctrl_mbox_config {
void __iomem *barmem_addr;
};
+/* Info from firmware */
+struct octep_fw_info {
+ /* interface pkind */
+ u16 pkind;
+ /* heartbeat interval in milliseconds */
+ u16 hb_interval;
+ /* heartbeat miss count */
+ u16 hb_miss_count;
+};
+
/* Data Structure to hold configuration limits and active config */
struct octep_config {
/* Input Queue attributes. */
@@ -201,10 +216,7 @@ struct octep_config {
/* ctrl mbox config */
struct octep_ctrl_mbox_config ctrl_mbox_cfg;
- /* Configured maximum heartbeat miss count */
- u32 max_hb_miss_cnt;
-
- /* Configured firmware heartbeat interval in secs */
- u32 hb_interval;
+ /* fw info */
+ struct octep_fw_info fw_info;
};
#endif /* _OCTEP_CONFIG_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
index 17bfd5cdf462..0594607a2585 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
@@ -26,7 +26,7 @@ static atomic_t ctrl_net_msg_id;
/* Control plane version in which OCTEP_CTRL_NET_H2F_CMD was added */
static const u32 octep_ctrl_net_h2f_cmd_versions[OCTEP_CTRL_NET_H2F_CMD_MAX] = {
- [OCTEP_CTRL_NET_H2F_CMD_INVALID ... OCTEP_CTRL_NET_H2F_CMD_LINK_INFO] =
+ [OCTEP_CTRL_NET_H2F_CMD_INVALID ... OCTEP_CTRL_NET_H2F_CMD_GET_INFO] =
OCTEP_CP_VERSION(1, 0, 0)
};
@@ -353,6 +353,28 @@ void octep_ctrl_net_recv_fw_messages(struct octep_device *oct)
}
}
+int octep_ctrl_net_get_info(struct octep_device *oct, int vfid,
+ struct octep_fw_info *info)
+{
+ struct octep_ctrl_net_wait_data d = {0};
+ struct octep_ctrl_net_h2f_resp *resp;
+ struct octep_ctrl_net_h2f_req *req;
+ int err;
+
+ req = &d.data.req;
+ init_send_req(&d.msg, req, 0, vfid);
+ req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_GET_INFO;
+ req->link_info.cmd = OCTEP_CTRL_NET_CMD_GET;
+ err = octep_send_mbox_req(oct, &d, true);
+ if (err < 0)
+ return err;
+
+ resp = &d.data.resp;
+ memcpy(info, &resp->info.fw_info, sizeof(struct octep_fw_info));
+
+ return 0;
+}
+
int octep_ctrl_net_uninit(struct octep_device *oct)
{
struct octep_ctrl_net_wait_data *pos, *n;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
index 1c2ef4ee31d9..b330f370131b 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h
@@ -41,6 +41,7 @@ enum octep_ctrl_net_h2f_cmd {
OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS,
OCTEP_CTRL_NET_H2F_CMD_RX_STATE,
OCTEP_CTRL_NET_H2F_CMD_LINK_INFO,
+ OCTEP_CTRL_NET_H2F_CMD_GET_INFO,
OCTEP_CTRL_NET_H2F_CMD_MAX
};
@@ -161,6 +162,11 @@ struct octep_ctrl_net_h2f_resp_cmd_state {
u16 state;
};
+/* get info request */
+struct octep_ctrl_net_h2f_resp_cmd_get_info {
+ struct octep_fw_info fw_info;
+};
+
/* Host to fw response data */
struct octep_ctrl_net_h2f_resp {
union octep_ctrl_net_resp_hdr hdr;
@@ -171,6 +177,7 @@ struct octep_ctrl_net_h2f_resp {
struct octep_ctrl_net_h2f_resp_cmd_state link;
struct octep_ctrl_net_h2f_resp_cmd_state rx;
struct octep_ctrl_net_link_info link_info;
+ struct octep_ctrl_net_h2f_resp_cmd_get_info info;
};
} __packed;
@@ -330,6 +337,17 @@ int octep_ctrl_net_set_link_info(struct octep_device *oct,
*/
void octep_ctrl_net_recv_fw_messages(struct octep_device *oct);
+/** Get info from firmware.
+ *
+ * @param oct: non-null pointer to struct octep_device.
+ * @param vfid: Index of virtual function.
+ * @param info: non-null pointer to struct octep_fw_info.
+ *
+ * return value: 0 on success, -errno on failure.
+ */
+int octep_ctrl_net_get_info(struct octep_device *oct, int vfid,
+ struct octep_fw_info *info);
+
/** Uninitialize data for ctrl net.
*
* @param oct: non-null pointer to struct octep_device.
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 5b46ca47c8e5..552970c7dec0 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -155,18 +155,153 @@ static void octep_disable_msix(struct octep_device *oct)
}
/**
- * octep_non_ioq_intr_handler() - common handler for all generic interrupts.
+ * octep_oei_intr_handler() - common handler for output endpoint interrupts.
*
* @irq: Interrupt number.
* @data: interrupt data.
*
- * this is common handler for all non-queue (generic) interrupts.
+ * this is common handler for all output endpoint interrupts.
+ */
+static irqreturn_t octep_oei_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.oei_intr_handler(oct);
+}
+
+/**
+ * octep_ire_intr_handler() - common handler for input ring error interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for input ring error interrupts.
+ */
+static irqreturn_t octep_ire_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.ire_intr_handler(oct);
+}
+
+/**
+ * octep_ore_intr_handler() - common handler for output ring error interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for output ring error interrupts.
+ */
+static irqreturn_t octep_ore_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.ore_intr_handler(oct);
+}
+
+/**
+ * octep_vfire_intr_handler() - common handler for vf input ring error interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for vf input ring error interrupts.
+ */
+static irqreturn_t octep_vfire_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.vfire_intr_handler(oct);
+}
+
+/**
+ * octep_vfore_intr_handler() - common handler for vf output ring error interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for vf output ring error interrupts.
+ */
+static irqreturn_t octep_vfore_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.vfore_intr_handler(oct);
+}
+
+/**
+ * octep_dma_intr_handler() - common handler for dpi dma related interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for dpi dma related interrupts.
+ */
+static irqreturn_t octep_dma_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.dma_intr_handler(oct);
+}
+
+/**
+ * octep_dma_vf_intr_handler() - common handler for dpi dma transaction error interrupts for VFs.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for dpi dma transaction error interrupts for VFs.
*/
-static irqreturn_t octep_non_ioq_intr_handler(int irq, void *data)
+static irqreturn_t octep_dma_vf_intr_handler(int irq, void *data)
{
struct octep_device *oct = data;
- return oct->hw_ops.non_ioq_intr_handler(oct);
+ return oct->hw_ops.dma_vf_intr_handler(oct);
+}
+
+/**
+ * octep_pp_vf_intr_handler() - common handler for pp transaction error interrupts for VFs.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for pp transaction error interrupts for VFs.
+ */
+static irqreturn_t octep_pp_vf_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.pp_vf_intr_handler(oct);
+}
+
+/**
+ * octep_misc_intr_handler() - common handler for mac related interrupts.
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for mac related interrupts.
+ */
+static irqreturn_t octep_misc_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.misc_intr_handler(oct);
+}
+
+/**
+ * octep_rsvd_intr_handler() - common handler for reserved interrupts (future use).
+ *
+ * @irq: Interrupt number.
+ * @data: interrupt data.
+ *
+ * this is common handler for all reserved interrupts.
+ */
+static irqreturn_t octep_rsvd_intr_handler(int irq, void *data)
+{
+ struct octep_device *oct = data;
+
+ return oct->hw_ops.rsvd_intr_handler(oct);
}
/**
@@ -222,9 +357,57 @@ static int octep_request_irqs(struct octep_device *oct)
snprintf(irq_name, OCTEP_MSIX_NAME_SIZE,
"%s-%s", netdev->name, non_ioq_msix_names[i]);
- ret = request_irq(msix_entry->vector,
- octep_non_ioq_intr_handler, 0,
- irq_name, oct);
+ if (!strncmp(non_ioq_msix_names[i], "epf_oei_rint",
+ strlen("epf_oei_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_oei_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_ire_rint",
+ strlen("epf_ire_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_ire_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_ore_rint",
+ strlen("epf_ore_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_ore_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_vfire_rint",
+ strlen("epf_vfire_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_vfire_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_vfore_rint",
+ strlen("epf_vfore_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_vfore_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_dma_rint",
+ strlen("epf_dma_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_dma_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_dma_vf_rint",
+ strlen("epf_dma_vf_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_dma_vf_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_pp_vf_rint",
+ strlen("epf_pp_vf_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_pp_vf_intr_handler, 0,
+ irq_name, oct);
+ } else if (!strncmp(non_ioq_msix_names[i], "epf_misc_rint",
+ strlen("epf_misc_rint"))) {
+ ret = request_irq(msix_entry->vector,
+ octep_misc_intr_handler, 0,
+ irq_name, oct);
+ } else {
+ ret = request_irq(msix_entry->vector,
+ octep_rsvd_intr_handler, 0,
+ irq_name, oct);
+ }
+
if (ret) {
netdev_err(netdev,
"request_irq failed for %s; err=%d",
@@ -917,9 +1100,9 @@ static void octep_hb_timeout_task(struct work_struct *work)
int miss_cnt;
miss_cnt = atomic_inc_return(&oct->hb_miss_cnt);
- if (miss_cnt < oct->conf->max_hb_miss_cnt) {
+ if (miss_cnt < oct->conf->fw_info.hb_miss_count) {
queue_delayed_work(octep_wq, &oct->hb_task,
- msecs_to_jiffies(oct->conf->hb_interval * 1000));
+ msecs_to_jiffies(oct->conf->fw_info.hb_interval));
return;
}
@@ -1012,8 +1195,7 @@ int octep_device_setup(struct octep_device *oct)
atomic_set(&oct->hb_miss_cnt, 0);
INIT_DELAYED_WORK(&oct->hb_task, octep_hb_timeout_task);
- queue_delayed_work(octep_wq, &oct->hb_task,
- msecs_to_jiffies(oct->conf->hb_interval * 1000));
+
return 0;
unsupported_dev:
@@ -1142,6 +1324,15 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "Device setup failed\n");
goto err_octep_config;
}
+
+ octep_ctrl_net_get_info(octep_dev, OCTEP_CTRL_NET_INVALID_VFID,
+ &octep_dev->conf->fw_info);
+ dev_info(&octep_dev->pdev->dev, "Heartbeat interval %u msecs Heartbeat miss count %u\n",
+ octep_dev->conf->fw_info.hb_interval,
+ octep_dev->conf->fw_info.hb_miss_count);
+ queue_delayed_work(octep_wq, &octep_dev->hb_task,
+ msecs_to_jiffies(octep_dev->conf->fw_info.hb_interval));
+
INIT_WORK(&octep_dev->tx_timeout_task, octep_tx_timeout_task);
INIT_WORK(&octep_dev->ctrl_mbox_task, octep_ctrl_mbox_task);
INIT_DELAYED_WORK(&octep_dev->intr_poll_task, octep_intr_poll_task);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
index e0907a719133..6df902ebb7f3 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h
@@ -65,7 +65,16 @@ struct octep_hw_ops {
void (*setup_oq_regs)(struct octep_device *oct, int q);
void (*setup_mbox_regs)(struct octep_device *oct, int mbox);
- irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector);
+ irqreturn_t (*oei_intr_handler)(void *ioq_vector);
+ irqreturn_t (*ire_intr_handler)(void *ioq_vector);
+ irqreturn_t (*ore_intr_handler)(void *ioq_vector);
+ irqreturn_t (*vfire_intr_handler)(void *ioq_vector);
+ irqreturn_t (*vfore_intr_handler)(void *ioq_vector);
+ irqreturn_t (*dma_intr_handler)(void *ioq_vector);
+ irqreturn_t (*dma_vf_intr_handler)(void *ioq_vector);
+ irqreturn_t (*pp_vf_intr_handler)(void *ioq_vector);
+ irqreturn_t (*misc_intr_handler)(void *ioq_vector);
+ irqreturn_t (*rsvd_intr_handler)(void *ioq_vector);
irqreturn_t (*ioq_intr_handler)(void *ioq_vector);
int (*soft_reset)(struct octep_device *oct);
void (*reinit_regs)(struct octep_device *oct);
@@ -73,7 +82,7 @@ struct octep_hw_ops {
void (*enable_interrupts)(struct octep_device *oct);
void (*disable_interrupts)(struct octep_device *oct);
- bool (*poll_non_ioq_interrupts)(struct octep_device *oct);
+ void (*poll_non_ioq_interrupts)(struct octep_device *oct);
void (*enable_io_queues)(struct octep_device *oct);
void (*disable_io_queues)(struct octep_device *oct);
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
index b25c3093dc7b..0a43983e9101 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h
@@ -370,4 +370,8 @@
/* bit 1 for firmware heartbeat interrupt */
#define CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT BIT_ULL(1)
+#define CN93_PEM_BAR4_INDEX 7
+#define CN93_PEM_BAR4_INDEX_SIZE 0x400000ULL
+#define CN93_PEM_BAR4_INDEX_OFFSET (CN93_PEM_BAR4_INDEX * CN93_PEM_BAR4_INDEX_SIZE)
+
#endif /* _OCTEP_REGS_CN9K_PF_H_ */
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
index 782a24f27f3e..49feae80d7d2 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h
@@ -20,6 +20,7 @@ struct octep_oq_desc_hw {
dma_addr_t buffer_ptr;
u64 info_ptr;
};
+static_assert(sizeof(struct octep_oq_desc_hw) == 16);
#define OCTEP_OQ_DESC_SIZE (sizeof(struct octep_oq_desc_hw))
@@ -39,6 +40,7 @@ struct octep_oq_resp_hw_ext {
/* checksum verified. */
u64 csum_verified:2;
};
+static_assert(sizeof(struct octep_oq_resp_hw_ext) == 8);
#define OCTEP_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_oq_resp_hw_ext))
@@ -50,6 +52,7 @@ struct octep_oq_resp_hw {
/* The Length of the packet. */
__be64 length;
};
+static_assert(sizeof(struct octep_oq_resp_hw) == 8);
#define OCTEP_OQ_RESP_HW_SIZE (sizeof(struct octep_oq_resp_hw))
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
index 21e75ff9f5e7..86c98b13fc44 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
@@ -36,6 +36,7 @@ struct octep_tx_sglist_desc {
u16 len[4];
dma_addr_t dma_ptr[4];
};
+static_assert(sizeof(struct octep_tx_sglist_desc) == 40);
/* Each Scatter/Gather entry sent to hardwar hold four pointers.
* So, number of entries required is (MAX_SKB_FRAGS + 1)/4, where '+1'
@@ -239,6 +240,7 @@ struct octep_instr_hdr {
/* Reserved3 */
u64 reserved3:1;
};
+static_assert(sizeof(struct octep_instr_hdr) == 8);
/* Hardware Tx completion response header */
struct octep_instr_resp_hdr {
@@ -263,6 +265,7 @@ struct octep_instr_resp_hdr {
/* Opcode for the return packet */
u64 opcode:16;
};
+static_assert(sizeof(struct octep_instr_hdr) == 8);
/* 64-byte Tx instruction format.
* Format of instruction for a 64-byte mode input queue.
@@ -293,6 +296,7 @@ struct octep_tx_desc_hw {
/* Additional headers available in a 64-byte instruction. */
u64 exhdr[4];
};
+static_assert(sizeof(struct octep_tx_desc_hw) == 64);
#define OCTEP_IQ_DESC_SIZE (sizeof(struct octep_tx_desc_hw))
#endif /* _OCTEP_TX_H_ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index e06f77ad6106..6c70c8498690 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -1218,8 +1218,6 @@ static inline void link_status_user_format(u64 lstat,
struct cgx_link_user_info *linfo,
struct cgx *cgx, u8 lmac_id)
{
- const char *lmac_string;
-
linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
@@ -1230,12 +1228,12 @@ static inline void link_status_user_format(u64 lstat,
if (linfo->lmac_type_id >= LMAC_MODE_MAX) {
dev_err(&cgx->pdev->dev, "Unknown lmac_type_id %d reported by firmware on cgx port%d:%d",
linfo->lmac_type_id, cgx->cgx_id, lmac_id);
- strncpy(linfo->lmac_type, "Unknown", LMACTYPE_STR_LEN - 1);
+ strscpy(linfo->lmac_type, "Unknown", sizeof(linfo->lmac_type));
return;
}
- lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
- strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
+ strscpy(linfo->lmac_type, cgx_lmactype_string[linfo->lmac_type_id],
+ sizeof(linfo->lmac_type));
}
/* Hardware event handlers */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 6b5b06c2b4e9..6845556581c3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -1473,6 +1473,12 @@ struct flow_msg {
u8 next_header;
};
__be16 vlan_itci;
+#define OTX2_FLOWER_MASK_MPLS_LB GENMASK(31, 12)
+#define OTX2_FLOWER_MASK_MPLS_TC GENMASK(11, 9)
+#define OTX2_FLOWER_MASK_MPLS_BOS BIT(8)
+#define OTX2_FLOWER_MASK_MPLS_TTL GENMASK(7, 0)
+#define OTX2_FLOWER_MASK_MPLS_NON_TTL GENMASK(31, 8)
+ u32 mpls_lse[4];
};
struct npc_install_flow_req {
@@ -1574,7 +1580,7 @@ enum ptp_op {
PTP_OP_GET_CLOCK = 1,
PTP_OP_GET_TSTMP = 2,
PTP_OP_SET_THRESH = 3,
- PTP_OP_EXTTS_ON = 4,
+ PTP_OP_PPS_ON = 4,
PTP_OP_ADJTIME = 5,
PTP_OP_SET_CLOCK = 6,
};
@@ -1584,7 +1590,8 @@ struct ptp_req {
u8 op;
s64 scaled_ppm;
u64 thresh;
- int extts_on;
+ u64 period;
+ int pps_on;
s64 delta;
u64 clk;
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index de9fbd98dfb7..ab3e39eef2eb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -206,6 +206,14 @@ enum key_fields {
NPC_SPORT_SCTP,
NPC_DPORT_SCTP,
NPC_IPSEC_SPI,
+ NPC_MPLS1_LBTCBOS,
+ NPC_MPLS1_TTL,
+ NPC_MPLS2_LBTCBOS,
+ NPC_MPLS2_TTL,
+ NPC_MPLS3_LBTCBOS,
+ NPC_MPLS3_TTL,
+ NPC_MPLS4_LBTCBOS,
+ NPC_MPLS4_TTL,
NPC_HEADER_FIELDS_MAX,
NPC_CHAN = NPC_HEADER_FIELDS_MAX, /* Valid when Rx */
NPC_PF_FUNC, /* Valid when Tx */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index ffbd22797163..bcc96eed2481 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -46,6 +46,7 @@
#define PTP_PPS_HI_INCR 0xF60ULL
#define PTP_PPS_LO_INCR 0xF68ULL
+#define PTP_PPS_THRESH_LO 0xF50ULL
#define PTP_PPS_THRESH_HI 0xF58ULL
#define PTP_CLOCK_LO 0xF08ULL
@@ -411,29 +412,12 @@ void ptp_start(struct rvu *rvu, u64 sclk, u32 ext_clk_freq, u32 extts)
}
clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
- clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV;
writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
clock_cfg &= ~PTP_CLOCK_CFG_ATOMIC_OP_MASK;
clock_cfg |= (ATOMIC_SET << 26);
writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
- /* Set 50% duty cycle for 1Hz output */
- writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
- writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
- if (cn10k_ptp_errata(ptp)) {
- /* The ptp_clock_hi rollsover to zero once clock cycle before it
- * reaches one second boundary. so, program the pps_lo_incr in
- * such a way that the pps threshold value comparison at one
- * second boundary will succeed and pps edge changes. After each
- * one second boundary, the hrtimer handler will be invoked and
- * reprograms the pps threshold value.
- */
- ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate;
- writeq((0x1dcd6500ULL - ptp->clock_period) << 32,
- ptp->reg_base + PTP_PPS_LO_INCR);
- }
-
if (cn10k_ptp_errata(ptp))
clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate);
else
@@ -465,20 +449,68 @@ static int ptp_set_thresh(struct ptp *ptp, u64 thresh)
return 0;
}
-static int ptp_extts_on(struct ptp *ptp, int on)
+static int ptp_config_hrtimer(struct ptp *ptp, int on)
{
u64 ptp_clock_hi;
- if (cn10k_ptp_errata(ptp)) {
- if (on) {
- ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
- ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi);
- } else {
- if (hrtimer_active(&ptp->hrtimer))
- hrtimer_cancel(&ptp->hrtimer);
+ if (on) {
+ ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
+ ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi);
+ } else {
+ if (hrtimer_active(&ptp->hrtimer))
+ hrtimer_cancel(&ptp->hrtimer);
+ }
+
+ return 0;
+}
+
+static int ptp_pps_on(struct ptp *ptp, int on, u64 period)
+{
+ u64 clock_cfg;
+
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+ if (on) {
+ if (cn10k_ptp_errata(ptp) && period != NSEC_PER_SEC) {
+ dev_err(&ptp->pdev->dev, "Supports max period value as 1 second\n");
+ return -EINVAL;
}
+
+ if (period > (8 * NSEC_PER_SEC)) {
+ dev_err(&ptp->pdev->dev, "Supports max period as 8 seconds\n");
+ return -EINVAL;
+ }
+
+ clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+
+ writeq(0, ptp->reg_base + PTP_PPS_THRESH_HI);
+ writeq(0, ptp->reg_base + PTP_PPS_THRESH_LO);
+
+ /* Configure high/low phase time */
+ period = period / 2;
+ writeq(((u64)period << 32), ptp->reg_base + PTP_PPS_HI_INCR);
+ writeq(((u64)period << 32), ptp->reg_base + PTP_PPS_LO_INCR);
+ } else {
+ clock_cfg &= ~(PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV);
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
}
+ if (on && cn10k_ptp_errata(ptp)) {
+ /* The ptp_clock_hi rollsover to zero once clock cycle before it
+ * reaches one second boundary. so, program the pps_lo_incr in
+ * such a way that the pps threshold value comparison at one
+ * second boundary will succeed and pps edge changes. After each
+ * one second boundary, the hrtimer handler will be invoked and
+ * reprograms the pps threshold value.
+ */
+ ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate;
+ writeq((0x1dcd6500ULL - ptp->clock_period) << 32,
+ ptp->reg_base + PTP_PPS_LO_INCR);
+ }
+
+ if (cn10k_ptp_errata(ptp))
+ ptp_config_hrtimer(ptp, on);
+
return 0;
}
@@ -613,8 +645,8 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
case PTP_OP_SET_THRESH:
err = ptp_set_thresh(rvu->ptp, req->thresh);
break;
- case PTP_OP_EXTTS_ON:
- err = ptp_extts_on(rvu->ptp, req->extts_on);
+ case PTP_OP_PPS_ON:
+ err = ptp_pps_on(rvu->ptp, req->pps_on, req->period);
break;
case PTP_OP_ADJTIME:
ptp_atomic_adjtime(rvu->ptp, req->delta);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index f2b1edf1bb43..15a319684ed3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -756,12 +756,11 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
return 0;
- /* This msg is expected only from PFs that are mapped to CGX LMACs,
+ /* This msg is expected only from PF/VFs that are mapped to CGX/RPM LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
- if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
- !is_pf_cgxmapped(rvu, pf))
- return -ENODEV;
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgxd = rvu_cgx_pdata(cgx_id, rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index d30e84803481..bd817ee88735 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -2756,6 +2756,27 @@ static int rvu_dbg_npc_rx_miss_stats_display(struct seq_file *filp,
RVU_DEBUG_SEQ_FOPS(npc_rx_miss_act, npc_rx_miss_stats_display, NULL);
+#define RVU_DBG_PRINT_MPLS_TTL(pkt, mask) \
+do { \
+ seq_printf(s, "%ld ", FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, pkt)); \
+ seq_printf(s, "mask 0x%lx\n", \
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL, mask)); \
+} while (0) \
+
+#define RVU_DBG_PRINT_MPLS_LBTCBOS(_pkt, _mask) \
+do { \
+ typeof(_pkt) (pkt) = (_pkt); \
+ typeof(_mask) (mask) = (_mask); \
+ seq_printf(s, "%ld %ld %ld\n", \
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, pkt), \
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, pkt), \
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, pkt)); \
+ seq_printf(s, "\tmask 0x%lx 0x%lx 0x%lx\n", \
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_LB, mask), \
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TC, mask), \
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_BOS, mask)); \
+} while (0) \
+
static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
struct rvu_npc_mcam_rule *rule)
{
@@ -2836,6 +2857,38 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
seq_printf(s, "0x%x ", ntohl(rule->packet.spi));
seq_printf(s, "mask 0x%x\n", ntohl(rule->mask.spi));
break;
+ case NPC_MPLS1_LBTCBOS:
+ RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[0],
+ rule->mask.mpls_lse[0]);
+ break;
+ case NPC_MPLS1_TTL:
+ RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[0],
+ rule->mask.mpls_lse[0]);
+ break;
+ case NPC_MPLS2_LBTCBOS:
+ RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[1],
+ rule->mask.mpls_lse[1]);
+ break;
+ case NPC_MPLS2_TTL:
+ RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[1],
+ rule->mask.mpls_lse[1]);
+ break;
+ case NPC_MPLS3_LBTCBOS:
+ RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[2],
+ rule->mask.mpls_lse[2]);
+ break;
+ case NPC_MPLS3_TTL:
+ RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[2],
+ rule->mask.mpls_lse[2]);
+ break;
+ case NPC_MPLS4_LBTCBOS:
+ RVU_DBG_PRINT_MPLS_LBTCBOS(rule->packet.mpls_lse[3],
+ rule->mask.mpls_lse[3]);
+ break;
+ case NPC_MPLS4_TTL:
+ RVU_DBG_PRINT_MPLS_TTL(rule->packet.mpls_lse[3],
+ rule->mask.mpls_lse[3]);
+ break;
default:
seq_puts(s, "\n");
break;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 41df5ac23f92..c70932625d0d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -14,26 +14,16 @@
#define DRV_NAME "octeontx2-af"
-static int rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
+static void rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
{
- int err;
-
- err = devlink_fmsg_pair_nest_start(fmsg, name);
- if (err)
- return err;
-
- return devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_pair_nest_start(fmsg, name);
+ devlink_fmsg_obj_nest_start(fmsg);
}
-static int rvu_report_pair_end(struct devlink_fmsg *fmsg)
+static void rvu_report_pair_end(struct devlink_fmsg *fmsg)
{
- int err;
-
- err = devlink_fmsg_obj_nest_end(fmsg);
- if (err)
- return err;
-
- return devlink_fmsg_pair_nest_end(fmsg);
+ devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_pair_nest_end(fmsg);
}
static bool rvu_common_request_irq(struct rvu *rvu, int offset,
@@ -284,175 +274,81 @@ static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx,
{
struct rvu_nix_event_ctx *nix_event_context;
u64 intr_val;
- int err;
nix_event_context = ctx;
switch (health_reporter) {
case NIX_AF_RVU_INTR:
intr_val = nix_event_context->nix_af_rvu_int;
- err = rvu_report_pair_start(fmsg, "NIX_AF_RVU");
- if (err)
- return err;
- err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ",
- nix_event_context->nix_af_rvu_int);
- if (err)
- return err;
- if (intr_val & BIT_ULL(0)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
- if (err)
- return err;
- }
- err = rvu_report_pair_end(fmsg);
- if (err)
- return err;
+ rvu_report_pair_start(fmsg, "NIX_AF_RVU");
+ devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ",
+ nix_event_context->nix_af_rvu_int);
+ if (intr_val & BIT_ULL(0))
+ devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
+ rvu_report_pair_end(fmsg);
break;
case NIX_AF_RVU_GEN:
intr_val = nix_event_context->nix_af_rvu_gen;
- err = rvu_report_pair_start(fmsg, "NIX_AF_GENERAL");
- if (err)
- return err;
- err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ",
- nix_event_context->nix_af_rvu_gen);
- if (err)
- return err;
- if (intr_val & BIT_ULL(0)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(1)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(4)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done");
- if (err)
- return err;
- }
- err = rvu_report_pair_end(fmsg);
- if (err)
- return err;
+ rvu_report_pair_start(fmsg, "NIX_AF_GENERAL");
+ devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ",
+ nix_event_context->nix_af_rvu_gen);
+ if (intr_val & BIT_ULL(0))
+ devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop");
+ if (intr_val & BIT_ULL(1))
+ devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop");
+ if (intr_val & BIT_ULL(4))
+ devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done");
+ rvu_report_pair_end(fmsg);
break;
case NIX_AF_RVU_ERR:
intr_val = nix_event_context->nix_af_rvu_err;
- err = rvu_report_pair_start(fmsg, "NIX_AF_ERR");
- if (err)
- return err;
- err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ",
- nix_event_context->nix_af_rvu_err);
- if (err)
- return err;
- if (intr_val & BIT_ULL(14)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(13)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(12)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(6)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(5)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(4)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(3)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(2)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(1)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(0)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write");
- if (err)
- return err;
- }
- err = rvu_report_pair_end(fmsg);
- if (err)
- return err;
+ rvu_report_pair_start(fmsg, "NIX_AF_ERR");
+ devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ",
+ nix_event_context->nix_af_rvu_err);
+ if (intr_val & BIT_ULL(14))
+ devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read");
+ if (intr_val & BIT_ULL(13))
+ devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write");
+ if (intr_val & BIT_ULL(12))
+ devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
+ if (intr_val & BIT_ULL(6))
+ devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC");
+ if (intr_val & BIT_ULL(5))
+ devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error");
+ if (intr_val & BIT_ULL(4))
+ devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read");
+ if (intr_val & BIT_ULL(3))
+ devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read");
+ if (intr_val & BIT_ULL(2))
+ devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read");
+ if (intr_val & BIT_ULL(1))
+ devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write");
+ if (intr_val & BIT_ULL(0))
+ devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write");
+ rvu_report_pair_end(fmsg);
break;
case NIX_AF_RVU_RAS:
intr_val = nix_event_context->nix_af_rvu_err;
- err = rvu_report_pair_start(fmsg, "NIX_AF_RAS");
- if (err)
- return err;
- err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ",
- nix_event_context->nix_af_rvu_err);
- if (err)
- return err;
- err = devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:");
- if (err)
- return err;
- if (intr_val & BIT_ULL(34)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(33)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(32)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tHW ctx");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(4)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(3)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer");
-
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(2)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(1)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer");
- if (err)
- return err;
- }
- if (intr_val & BIT_ULL(0)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read");
- if (err)
- return err;
- }
- err = rvu_report_pair_end(fmsg);
- if (err)
- return err;
+ rvu_report_pair_start(fmsg, "NIX_AF_RAS");
+ devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ",
+ nix_event_context->nix_af_rvu_err);
+ devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:");
+ if (intr_val & BIT_ULL(34))
+ devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S");
+ if (intr_val & BIT_ULL(33))
+ devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S");
+ if (intr_val & BIT_ULL(32))
+ devlink_fmsg_string_put(fmsg, "\n\tHW ctx");
+ if (intr_val & BIT_ULL(4))
+ devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer");
+ if (intr_val & BIT_ULL(3))
+ devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer");
+ if (intr_val & BIT_ULL(2))
+ devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer");
+ if (intr_val & BIT_ULL(1))
+ devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer");
+ if (intr_val & BIT_ULL(0))
+ devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read");
+ rvu_report_pair_end(fmsg);
break;
default:
return -EINVAL;
@@ -922,181 +818,87 @@ static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx,
struct rvu_npa_event_ctx *npa_event_context;
unsigned int alloc_dis, free_dis;
u64 intr_val;
- int err;
npa_event_context = ctx;
switch (health_reporter) {
case NPA_AF_RVU_GEN:
intr_val = npa_event_context->npa_af_rvu_gen;
- err = rvu_report_pair_start(fmsg, "NPA_AF_GENERAL");
- if (err)
- return err;
- err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ",
- npa_event_context->npa_af_rvu_gen);
- if (err)
- return err;
- if (intr_val & BIT_ULL(32)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error");
- if (err)
- return err;
- }
+ rvu_report_pair_start(fmsg, "NPA_AF_GENERAL");
+ devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ",
+ npa_event_context->npa_af_rvu_gen);
+ if (intr_val & BIT_ULL(32))
+ devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error");
free_dis = FIELD_GET(GENMASK(15, 0), intr_val);
- if (free_dis & BIT(NPA_INPQ_NIX0_RX)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX");
- if (err)
- return err;
- }
- if (free_dis & BIT(NPA_INPQ_NIX0_TX)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX");
- if (err)
- return err;
- }
- if (free_dis & BIT(NPA_INPQ_NIX1_RX)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX");
- if (err)
- return err;
- }
- if (free_dis & BIT(NPA_INPQ_NIX1_TX)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX");
- if (err)
- return err;
- }
- if (free_dis & BIT(NPA_INPQ_SSO)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO");
- if (err)
- return err;
- }
- if (free_dis & BIT(NPA_INPQ_TIM)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM");
- if (err)
- return err;
- }
- if (free_dis & BIT(NPA_INPQ_DPI)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI");
- if (err)
- return err;
- }
- if (free_dis & BIT(NPA_INPQ_AURA_OP)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA");
- if (err)
- return err;
- }
+ if (free_dis & BIT(NPA_INPQ_NIX0_RX))
+ devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX");
+ if (free_dis & BIT(NPA_INPQ_NIX0_TX))
+ devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX");
+ if (free_dis & BIT(NPA_INPQ_NIX1_RX))
+ devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX");
+ if (free_dis & BIT(NPA_INPQ_NIX1_TX))
+ devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX");
+ if (free_dis & BIT(NPA_INPQ_SSO))
+ devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO");
+ if (free_dis & BIT(NPA_INPQ_TIM))
+ devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM");
+ if (free_dis & BIT(NPA_INPQ_DPI))
+ devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI");
+ if (free_dis & BIT(NPA_INPQ_AURA_OP))
+ devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA");
alloc_dis = FIELD_GET(GENMASK(31, 16), intr_val);
- if (alloc_dis & BIT(NPA_INPQ_NIX0_RX)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX");
- if (err)
- return err;
- }
- if (alloc_dis & BIT(NPA_INPQ_NIX0_TX)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX");
- if (err)
- return err;
- }
- if (alloc_dis & BIT(NPA_INPQ_NIX1_RX)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX");
- if (err)
- return err;
- }
- if (alloc_dis & BIT(NPA_INPQ_NIX1_TX)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX");
- if (err)
- return err;
- }
- if (alloc_dis & BIT(NPA_INPQ_SSO)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO");
- if (err)
- return err;
- }
- if (alloc_dis & BIT(NPA_INPQ_TIM)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM");
- if (err)
- return err;
- }
- if (alloc_dis & BIT(NPA_INPQ_DPI)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI");
- if (err)
- return err;
- }
- if (alloc_dis & BIT(NPA_INPQ_AURA_OP)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA");
- if (err)
- return err;
- }
- err = rvu_report_pair_end(fmsg);
- if (err)
- return err;
+ if (alloc_dis & BIT(NPA_INPQ_NIX0_RX))
+ devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX");
+ if (alloc_dis & BIT(NPA_INPQ_NIX0_TX))
+ devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX");
+ if (alloc_dis & BIT(NPA_INPQ_NIX1_RX))
+ devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX");
+ if (alloc_dis & BIT(NPA_INPQ_NIX1_TX))
+ devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX");
+ if (alloc_dis & BIT(NPA_INPQ_SSO))
+ devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO");
+ if (alloc_dis & BIT(NPA_INPQ_TIM))
+ devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM");
+ if (alloc_dis & BIT(NPA_INPQ_DPI))
+ devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI");
+ if (alloc_dis & BIT(NPA_INPQ_AURA_OP))
+ devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA");
+
+ rvu_report_pair_end(fmsg);
break;
case NPA_AF_RVU_ERR:
- err = rvu_report_pair_start(fmsg, "NPA_AF_ERR");
- if (err)
- return err;
- err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ",
- npa_event_context->npa_af_rvu_err);
- if (err)
- return err;
-
- if (npa_event_context->npa_af_rvu_err & BIT_ULL(14)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read");
- if (err)
- return err;
- }
- if (npa_event_context->npa_af_rvu_err & BIT_ULL(13)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write");
- if (err)
- return err;
- }
- if (npa_event_context->npa_af_rvu_err & BIT_ULL(12)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
- if (err)
- return err;
- }
- err = rvu_report_pair_end(fmsg);
- if (err)
- return err;
+ rvu_report_pair_start(fmsg, "NPA_AF_ERR");
+ devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ",
+ npa_event_context->npa_af_rvu_err);
+ if (npa_event_context->npa_af_rvu_err & BIT_ULL(14))
+ devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read");
+ if (npa_event_context->npa_af_rvu_err & BIT_ULL(13))
+ devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write");
+ if (npa_event_context->npa_af_rvu_err & BIT_ULL(12))
+ devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
+ rvu_report_pair_end(fmsg);
break;
case NPA_AF_RVU_RAS:
- err = rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS");
- if (err)
- return err;
- err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ",
- npa_event_context->npa_af_rvu_ras);
- if (err)
- return err;
- if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S");
- if (err)
- return err;
- }
- if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S");
- if (err)
- return err;
- }
- if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context");
- if (err)
- return err;
- }
- err = rvu_report_pair_end(fmsg);
- if (err)
- return err;
+ rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS");
+ devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ",
+ npa_event_context->npa_af_rvu_ras);
+ if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34))
+ devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S");
+ if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33))
+ devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S");
+ if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32))
+ devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context");
+ rvu_report_pair_end(fmsg);
break;
case NPA_AF_RVU_INTR:
- err = rvu_report_pair_start(fmsg, "NPA_AF_RVU");
- if (err)
- return err;
- err = devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ",
- npa_event_context->npa_af_rvu_int);
- if (err)
- return err;
- if (npa_event_context->npa_af_rvu_int & BIT_ULL(0)) {
- err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
- if (err)
- return err;
- }
- return rvu_report_pair_end(fmsg);
+ rvu_report_pair_start(fmsg, "NPA_AF_RVU");
+ devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ",
+ npa_event_context->npa_af_rvu_int);
+ if (npa_event_context->npa_af_rvu_int & BIT_ULL(0))
+ devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
+ rvu_report_pair_end(fmsg);
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 237f82082ebe..114e4ec21802 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -43,6 +43,14 @@ static const char * const npc_flow_names[] = {
[NPC_DPORT_SCTP] = "sctp destination port",
[NPC_LXMB] = "Mcast/Bcast header ",
[NPC_IPSEC_SPI] = "SPI ",
+ [NPC_MPLS1_LBTCBOS] = "lse depth 1 label tc bos",
+ [NPC_MPLS1_TTL] = "lse depth 1 ttl",
+ [NPC_MPLS2_LBTCBOS] = "lse depth 2 label tc bos",
+ [NPC_MPLS2_TTL] = "lse depth 2 ttl",
+ [NPC_MPLS3_LBTCBOS] = "lse depth 3 label tc bos",
+ [NPC_MPLS3_TTL] = "lse depth 3 ttl",
+ [NPC_MPLS4_LBTCBOS] = "lse depth 4 label tc bos",
+ [NPC_MPLS4_TTL] = "lse depth 4",
[NPC_UNKNOWN] = "unknown",
};
@@ -528,6 +536,14 @@ do { \
NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LD, NPC_LT_LD_AH, 4, 4);
NPC_SCAN_HDR(NPC_IPSEC_SPI, NPC_LID_LE, NPC_LT_LE_ESP, 0, 4);
+ NPC_SCAN_HDR(NPC_MPLS1_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 0, 3);
+ NPC_SCAN_HDR(NPC_MPLS1_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 3, 1);
+ NPC_SCAN_HDR(NPC_MPLS2_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 4, 3);
+ NPC_SCAN_HDR(NPC_MPLS2_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 7, 1);
+ NPC_SCAN_HDR(NPC_MPLS3_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 8, 3);
+ NPC_SCAN_HDR(NPC_MPLS3_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 11, 1);
+ NPC_SCAN_HDR(NPC_MPLS4_LBTCBOS, NPC_LID_LC, NPC_LT_LC_MPLS, 12, 3);
+ NPC_SCAN_HDR(NPC_MPLS4_TTL, NPC_LID_LC, NPC_LT_LC_MPLS, 15, 1);
/* SMAC follows the DMAC(which is 6 bytes) */
NPC_SCAN_HDR(NPC_SMAC, NPC_LID_LA, la_ltype, la_start + 6, 6);
@@ -593,6 +609,11 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
/* for L2M/L2B/L3M/L3B, check if the type is present in the key */
if (npc_check_field(rvu, blkaddr, NPC_LXMB, intf))
*features |= BIT_ULL(NPC_LXMB);
+
+ for (hdr = NPC_MPLS1_LBTCBOS; hdr <= NPC_MPLS4_TTL; hdr++) {
+ if (npc_check_field(rvu, blkaddr, hdr, intf))
+ *features |= BIT_ULL(hdr);
+ }
}
/* Scan key extraction profile and record how fields of our interest
@@ -959,6 +980,47 @@ do { \
NPC_WRITE_FLOW(NPC_INNER_VID, vlan_itci, ntohs(pkt->vlan_itci), 0,
ntohs(mask->vlan_itci), 0);
+ NPC_WRITE_FLOW(NPC_MPLS1_LBTCBOS, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ pkt->mpls_lse[0]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ mask->mpls_lse[0]), 0);
+ NPC_WRITE_FLOW(NPC_MPLS1_TTL, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ pkt->mpls_lse[0]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ mask->mpls_lse[0]), 0);
+ NPC_WRITE_FLOW(NPC_MPLS2_LBTCBOS, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ pkt->mpls_lse[1]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ mask->mpls_lse[1]), 0);
+ NPC_WRITE_FLOW(NPC_MPLS2_TTL, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ pkt->mpls_lse[1]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ mask->mpls_lse[1]), 0);
+ NPC_WRITE_FLOW(NPC_MPLS3_LBTCBOS, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ pkt->mpls_lse[2]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ mask->mpls_lse[2]), 0);
+ NPC_WRITE_FLOW(NPC_MPLS3_TTL, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ pkt->mpls_lse[2]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ mask->mpls_lse[2]), 0);
+ NPC_WRITE_FLOW(NPC_MPLS4_LBTCBOS, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ pkt->mpls_lse[3]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_NON_TTL,
+ mask->mpls_lse[3]), 0);
+ NPC_WRITE_FLOW(NPC_MPLS4_TTL, mpls_lse,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ pkt->mpls_lse[3]), 0,
+ FIELD_GET(OTX2_FLOWER_MASK_MPLS_TTL,
+ mask->mpls_lse[3]), 0);
+
NPC_WRITE_FLOW(NPC_IPFRAG_IPV6, next_header, pkt->next_header, 0,
mask->next_header, 0);
npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 818ce76185b2..1a42bfded872 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -1404,7 +1404,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
}
pp_params.order = get_order(buf_size);
- pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP;
+ pp_params.flags = PP_FLAG_DMA_MAP;
pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
pp_params.nid = NUMA_NO_NODE;
pp_params.dev = pfvf->dev;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index 3a72b0793d4a..63130ba37e9d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -175,7 +175,7 @@ static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh)
return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
-static int ptp_extts_on(struct otx2_ptp *ptp, int on)
+static int ptp_pps_on(struct otx2_ptp *ptp, int on, u64 period)
{
struct ptp_req *req;
@@ -186,8 +186,9 @@ static int ptp_extts_on(struct otx2_ptp *ptp, int on)
if (!req)
return -ENOMEM;
- req->op = PTP_OP_EXTTS_ON;
- req->extts_on = on;
+ req->op = PTP_OP_PPS_ON;
+ req->pps_on = on;
+ req->period = period;
return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
@@ -276,8 +277,8 @@ static int otx2_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
switch (func) {
case PTP_PF_NONE:
case PTP_PF_EXTTS:
- break;
case PTP_PF_PEROUT:
+ break;
case PTP_PF_PHYSYNC:
return -1;
}
@@ -340,6 +341,7 @@ static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
ptp_info);
+ u64 period = 0;
int pin;
if (!ptp->nic)
@@ -351,12 +353,24 @@ static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
rq->extts.index);
if (pin < 0)
return -EBUSY;
- if (on) {
- ptp_extts_on(ptp, on);
+ if (on)
schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
- } else {
- ptp_extts_on(ptp, on);
+ else
cancel_delayed_work_sync(&ptp->extts_work);
+
+ return 0;
+ case PTP_CLK_REQ_PEROUT:
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
+ if (rq->perout.index >= ptp_info->n_pins)
+ return -EINVAL;
+ if (on) {
+ period = rq->perout.period.sec * NSEC_PER_SEC +
+ rq->perout.period.nsec;
+ ptp_pps_on(ptp, on, period);
+ } else {
+ ptp_pps_on(ptp, on, period);
}
return 0;
default:
@@ -411,6 +425,7 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
.name = "OcteonTX2 PTP",
.max_adj = 1000000000ull,
.n_ext_ts = 1,
+ .n_per_out = 1,
.n_pins = 1,
.pps = 0,
.pin_config = &ptp_ptr->extts_config,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index fab9d85bfb37..8a5e3987a482 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -27,6 +27,8 @@
#define CN10K_TLX_BURST_MANTISSA GENMASK_ULL(43, 29)
#define CN10K_TLX_BURST_EXPONENT GENMASK_ULL(47, 44)
+#define OTX2_UNSUPP_LSE_DEPTH GENMASK(6, 4)
+
struct otx2_tc_flow_stats {
u64 bytes;
u64 pkts;
@@ -519,6 +521,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
BIT(FLOW_DISSECTOR_KEY_IPSEC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) |
BIT_ULL(FLOW_DISSECTOR_KEY_IP)))) {
netdev_info(nic->netdev, "unsupported flow used key 0x%llx",
dissector->used_keys);
@@ -738,6 +741,61 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
}
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
+ struct flow_match_mpls match;
+ u8 bit;
+
+ flow_rule_match_mpls(rule, &match);
+
+ if (match.mask->used_lses & OTX2_UNSUPP_LSE_DEPTH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported LSE depth for MPLS match offload");
+ return -EOPNOTSUPP;
+ }
+
+ for_each_set_bit(bit, (unsigned long *)&match.mask->used_lses,
+ FLOW_DIS_MPLS_MAX) {
+ /* check if any of the fields LABEL,TC,BOS are set */
+ if (*((u32 *)&match.mask->ls[bit]) &
+ OTX2_FLOWER_MASK_MPLS_NON_TTL) {
+ /* Hardware will capture 4 byte MPLS header into
+ * two fields NPC_MPLSX_LBTCBOS and NPC_MPLSX_TTL.
+ * Derive the associated NPC key based on header
+ * index and offset.
+ */
+
+ req->features |= BIT_ULL(NPC_MPLS1_LBTCBOS +
+ 2 * bit);
+ flow_spec->mpls_lse[bit] =
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB,
+ match.key->ls[bit].mpls_label) |
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC,
+ match.key->ls[bit].mpls_tc) |
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS,
+ match.key->ls[bit].mpls_bos);
+
+ flow_mask->mpls_lse[bit] =
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB,
+ match.mask->ls[bit].mpls_label) |
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC,
+ match.mask->ls[bit].mpls_tc) |
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS,
+ match.mask->ls[bit].mpls_bos);
+ }
+
+ if (match.mask->ls[bit].mpls_ttl) {
+ req->features |= BIT_ULL(NPC_MPLS1_TTL +
+ 2 * bit);
+ flow_spec->mpls_lse[bit] |=
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL,
+ match.key->ls[bit].mpls_ttl);
+ flow_mask->mpls_lse[bit] |=
+ FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL,
+ match.mask->ls[bit].mpls_ttl);
+ }
+ }
+ }
+
return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
}
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index d5691b6a2bc5..dd6ca2e4fd51 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1528,7 +1528,7 @@ err_clk:
return err;
}
-static int pxa168_eth_remove(struct platform_device *pdev)
+static void pxa168_eth_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct pxa168_eth_private *pep = netdev_priv(dev);
@@ -1547,7 +1547,6 @@ static int pxa168_eth_remove(struct platform_device *pdev)
mdiobus_free(pep->smi_bus);
unregister_netdev(dev);
free_netdev(dev);
- return 0;
}
static void pxa168_eth_shutdown(struct platform_device *pdev)
@@ -1580,7 +1579,7 @@ MODULE_DEVICE_TABLE(of, pxa168_eth_of_match);
static struct platform_driver pxa168_eth_driver = {
.probe = pxa168_eth_probe,
- .remove = pxa168_eth_remove,
+ .remove_new = pxa168_eth_remove,
.shutdown = pxa168_eth_shutdown,
.resume = pxa168_eth_resume,
.suspend = pxa168_eth_suspend,
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 20afe79f380a..3cf6589cfdac 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -197,6 +197,7 @@ static const struct mtk_reg_map mt7988_reg_map = {
.wdma_base = {
[0] = 0x4800,
[1] = 0x4c00,
+ [2] = 0x5000,
},
.pse_iq_sta = 0x0180,
.pse_oq_sta = 0x01a0,
@@ -2210,7 +2211,7 @@ rx_done:
net_dim(&eth->rx_dim, dim_sample);
if (xdp_flush)
- xdp_do_flush_map();
+ xdp_do_flush();
return done;
}
@@ -3328,7 +3329,7 @@ static int mtk_device_event(struct notifier_block *n, unsigned long event, void
return NOTIFY_DONE;
found:
- if (!dsa_slave_dev_check(dev))
+ if (!dsa_user_dev_check(dev))
return NOTIFY_DONE;
if (__ethtool_get_link_ksettings(dev, &s))
@@ -5002,7 +5003,7 @@ err_destroy_sgmii:
return err;
}
-static int mtk_remove(struct platform_device *pdev)
+static void mtk_remove(struct platform_device *pdev)
{
struct mtk_eth *eth = platform_get_drvdata(pdev);
struct mtk_mac *mac;
@@ -5024,8 +5025,6 @@ static int mtk_remove(struct platform_device *pdev)
netif_napi_del(&eth->rx_napi);
mtk_cleanup(eth);
mtk_mdio_cleanup(eth);
-
- return 0;
}
static const struct mtk_soc_data mt2701_data = {
@@ -5226,7 +5225,7 @@ MODULE_DEVICE_TABLE(of, of_mtk_match);
static struct platform_driver mtk_driver = {
.probe = mtk_probe,
- .remove = mtk_remove,
+ .remove_new = mtk_remove,
.driver = {
.name = "mtk_soc_eth",
.of_match_table = of_mtk_match,
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 403219d987ef..9ae3b8a71d0e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -1132,7 +1132,7 @@ struct mtk_reg_map {
u32 gdm1_cnt;
u32 gdma_to_ppe;
u32 ppe_base;
- u32 wdma_base[2];
+ u32 wdma_base[3];
u32 pse_iq_sta;
u32 pse_oq_sta;
};
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 86f32f486043..b2a5d9c3733d 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -425,7 +425,8 @@ int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
}
int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
- int wdma_idx, int txq, int bss, int wcid)
+ int wdma_idx, int txq, int bss, int wcid,
+ bool amsdu_en)
{
struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
@@ -437,6 +438,7 @@ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
MTK_FOE_IB2_WDMA_WINFO_V2;
l2->w3info = FIELD_PREP(MTK_FOE_WINFO_WCID_V3, wcid) |
FIELD_PREP(MTK_FOE_WINFO_BSS_V3, bss);
+ l2->amsdu = FIELD_PREP(MTK_FOE_WINFO_AMSDU_EN, amsdu_en);
break;
case 2:
*ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index e3d0ec72bc69..691806bca372 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -88,13 +88,13 @@ enum {
#define MTK_FOE_WINFO_BSS_V3 GENMASK(23, 16)
#define MTK_FOE_WINFO_WCID_V3 GENMASK(15, 0)
-#define MTK_FOE_WINFO_PAO_USR_INFO GENMASK(15, 0)
-#define MTK_FOE_WINFO_PAO_TID GENMASK(19, 16)
-#define MTK_FOE_WINFO_PAO_IS_FIXEDRATE BIT(20)
-#define MTK_FOE_WINFO_PAO_IS_PRIOR BIT(21)
-#define MTK_FOE_WINFO_PAO_IS_SP BIT(22)
-#define MTK_FOE_WINFO_PAO_HF BIT(23)
-#define MTK_FOE_WINFO_PAO_AMSDU_EN BIT(24)
+#define MTK_FOE_WINFO_AMSDU_USR_INFO GENMASK(15, 0)
+#define MTK_FOE_WINFO_AMSDU_TID GENMASK(19, 16)
+#define MTK_FOE_WINFO_AMSDU_IS_FIXEDRATE BIT(20)
+#define MTK_FOE_WINFO_AMSDU_IS_PRIOR BIT(21)
+#define MTK_FOE_WINFO_AMSDU_IS_SP BIT(22)
+#define MTK_FOE_WINFO_AMSDU_HF BIT(23)
+#define MTK_FOE_WINFO_AMSDU_EN BIT(24)
enum {
MTK_FOE_STATE_INVALID,
@@ -123,7 +123,7 @@ struct mtk_foe_mac_info {
/* netsys_v3 */
u32 w3info;
- u32 wpao;
+ u32 amsdu;
};
/* software-only entry type */
@@ -392,7 +392,8 @@ int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
int sid);
int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
- int wdma_idx, int txq, int bss, int wcid);
+ int wdma_idx, int txq, int bss, int wcid,
+ bool amsdu_en);
int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
unsigned int queue);
int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index a4efbeb16208..fbb5e9d5af13 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -111,6 +111,7 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i
info->queue = path->mtk_wdma.queue;
info->bss = path->mtk_wdma.bss;
info->wcid = path->mtk_wdma.wcid;
+ info->amsdu = path->mtk_wdma.amsdu;
return 0;
}
@@ -174,7 +175,7 @@ mtk_flow_get_dsa_port(struct net_device **dev)
if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
return -ENODEV;
- *dev = dsa_port_to_master(dp);
+ *dev = dsa_port_to_conduit(dp);
return dp->index;
#else
@@ -192,14 +193,17 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
- info.bss, info.wcid);
+ info.bss, info.wcid, info.amsdu);
if (mtk_is_netsys_v2_or_greater(eth)) {
switch (info.wdma_idx) {
case 0:
- pse_port = 8;
+ pse_port = PSE_WDMA0_PORT;
break;
case 1:
- pse_port = 9;
+ pse_port = PSE_WDMA1_PORT;
+ break;
+ case 2:
+ pse_port = PSE_WDMA2_PORT;
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 94376aa2b34c..9a6744c0d458 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -17,17 +17,21 @@
#include <net/flow_offload.h>
#include <net/pkt_cls.h>
#include "mtk_eth_soc.h"
-#include "mtk_wed_regs.h"
#include "mtk_wed.h"
#include "mtk_ppe.h"
#include "mtk_wed_wo.h"
#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
-#define MTK_WED_PKT_SIZE 1900
+#define MTK_WED_PKT_SIZE 1920
#define MTK_WED_BUF_SIZE 2048
+#define MTK_WED_PAGE_BUF_SIZE 128
#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
+#define MTK_WED_RX_BUF_PER_PAGE (PAGE_SIZE / MTK_WED_PAGE_BUF_SIZE)
#define MTK_WED_RX_RING_SIZE 1536
+#define MTK_WED_RX_PG_BM_CNT 8192
+#define MTK_WED_AMSDU_BUF_SIZE (PAGE_SIZE << 4)
+#define MTK_WED_AMSDU_NPAGES 32
#define MTK_WED_TX_RING_SIZE 2048
#define MTK_WED_WDMA_RING_SIZE 1024
@@ -41,7 +45,10 @@
#define MTK_WED_RRO_QUE_CNT 8192
#define MTK_WED_MIOD_ENTRY_CNT 128
-static struct mtk_wed_hw *hw_list[2];
+#define MTK_WED_TX_BM_DMA_SIZE 65536
+#define MTK_WED_TX_BM_PKT_CNT 32768
+
+static struct mtk_wed_hw *hw_list[3];
static DEFINE_MUTEX(hw_lock);
struct mtk_wed_flow_block_priv {
@@ -49,6 +56,39 @@ struct mtk_wed_flow_block_priv {
struct net_device *dev;
};
+static const struct mtk_wed_soc_data mt7622_data = {
+ .regmap = {
+ .tx_bm_tkid = 0x088,
+ .wpdma_rx_ring0 = 0x770,
+ .reset_idx_tx_mask = GENMASK(3, 0),
+ .reset_idx_rx_mask = GENMASK(17, 16),
+ },
+ .tx_ring_desc_size = sizeof(struct mtk_wdma_desc),
+ .wdma_desc_size = sizeof(struct mtk_wdma_desc),
+};
+
+static const struct mtk_wed_soc_data mt7986_data = {
+ .regmap = {
+ .tx_bm_tkid = 0x0c8,
+ .wpdma_rx_ring0 = 0x770,
+ .reset_idx_tx_mask = GENMASK(1, 0),
+ .reset_idx_rx_mask = GENMASK(7, 6),
+ },
+ .tx_ring_desc_size = sizeof(struct mtk_wdma_desc),
+ .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc),
+};
+
+static const struct mtk_wed_soc_data mt7988_data = {
+ .regmap = {
+ .tx_bm_tkid = 0x0c8,
+ .wpdma_rx_ring0 = 0x7d0,
+ .reset_idx_tx_mask = GENMASK(1, 0),
+ .reset_idx_rx_mask = GENMASK(7, 6),
+ },
+ .tx_ring_desc_size = sizeof(struct mtk_wed_bm_desc),
+ .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc),
+};
+
static void
wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
{
@@ -109,6 +149,90 @@ mtk_wdma_read_reset(struct mtk_wed_device *dev)
return wdma_r32(dev, MTK_WDMA_GLO_CFG);
}
+static void
+mtk_wdma_v3_rx_reset(struct mtk_wed_device *dev)
+{
+ u32 status;
+
+ if (!mtk_wed_is_v3_or_greater(dev->hw))
+ return;
+
+ wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
+ wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
+
+ if (read_poll_timeout(wdma_r32, status,
+ !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY),
+ 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG))
+ dev_err(dev->hw->dev, "rx reset failed\n");
+
+ if (read_poll_timeout(wdma_r32, status,
+ !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY),
+ 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG))
+ dev_err(dev->hw->dev, "rx reset failed\n");
+
+ wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
+ wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
+
+ if (read_poll_timeout(wdma_r32, status,
+ !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY),
+ 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG))
+ dev_err(dev->hw->dev, "rx reset failed\n");
+
+ if (read_poll_timeout(wdma_r32, status,
+ !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY),
+ 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG))
+ dev_err(dev->hw->dev, "rx reset failed\n");
+
+ /* prefetch FIFO */
+ wdma_w32(dev, MTK_WDMA_PREF_RX_FIFO_CFG,
+ MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR |
+ MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR);
+ wdma_clr(dev, MTK_WDMA_PREF_RX_FIFO_CFG,
+ MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR |
+ MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR);
+
+ /* core FIFO */
+ wdma_w32(dev, MTK_WDMA_XDMA_RX_FIFO_CFG,
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR);
+ wdma_clr(dev, MTK_WDMA_XDMA_RX_FIFO_CFG,
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR |
+ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR);
+
+ /* writeback FIFO */
+ wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0),
+ MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
+ wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1),
+ MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
+
+ wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0),
+ MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
+ wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1),
+ MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
+
+ /* prefetch ring status */
+ wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG,
+ MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR);
+ wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG,
+ MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR);
+
+ /* writeback ring status */
+ wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG,
+ MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR);
+ wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG,
+ MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR);
+}
+
static int
mtk_wdma_rx_reset(struct mtk_wed_device *dev)
{
@@ -121,6 +245,7 @@ mtk_wdma_rx_reset(struct mtk_wed_device *dev)
if (ret)
dev_err(dev->hw->dev, "rx reset failed\n");
+ mtk_wdma_v3_rx_reset(dev);
wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
@@ -135,6 +260,101 @@ mtk_wdma_rx_reset(struct mtk_wed_device *dev)
return ret;
}
+static u32
+mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ return !!(wed_r32(dev, reg) & mask);
+}
+
+static int
+mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ int sleep = 15000;
+ int timeout = 100 * sleep;
+ u32 val;
+
+ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
+ timeout, false, dev, reg, mask);
+}
+
+static void
+mtk_wdma_v3_tx_reset(struct mtk_wed_device *dev)
+{
+ u32 status;
+
+ if (!mtk_wed_is_v3_or_greater(dev->hw))
+ return;
+
+ wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
+ wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
+
+ if (read_poll_timeout(wdma_r32, status,
+ !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY),
+ 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG))
+ dev_err(dev->hw->dev, "tx reset failed\n");
+
+ if (read_poll_timeout(wdma_r32, status,
+ !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY),
+ 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG))
+ dev_err(dev->hw->dev, "tx reset failed\n");
+
+ wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
+ wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
+
+ if (read_poll_timeout(wdma_r32, status,
+ !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY),
+ 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG))
+ dev_err(dev->hw->dev, "tx reset failed\n");
+
+ if (read_poll_timeout(wdma_r32, status,
+ !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY),
+ 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG))
+ dev_err(dev->hw->dev, "tx reset failed\n");
+
+ /* prefetch FIFO */
+ wdma_w32(dev, MTK_WDMA_PREF_TX_FIFO_CFG,
+ MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR |
+ MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR);
+ wdma_clr(dev, MTK_WDMA_PREF_TX_FIFO_CFG,
+ MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR |
+ MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR);
+
+ /* core FIFO */
+ wdma_w32(dev, MTK_WDMA_XDMA_TX_FIFO_CFG,
+ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR |
+ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR |
+ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR |
+ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR);
+ wdma_clr(dev, MTK_WDMA_XDMA_TX_FIFO_CFG,
+ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR |
+ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR |
+ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR |
+ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR);
+
+ /* writeback FIFO */
+ wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0),
+ MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
+ wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1),
+ MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
+
+ wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0),
+ MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
+ wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1),
+ MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
+
+ /* prefetch ring status */
+ wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG,
+ MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR);
+ wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG,
+ MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR);
+
+ /* writeback ring status */
+ wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG,
+ MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR);
+ wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG,
+ MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR);
+}
+
static void
mtk_wdma_tx_reset(struct mtk_wed_device *dev)
{
@@ -146,6 +366,7 @@ mtk_wdma_tx_reset(struct mtk_wed_device *dev)
!(status & mask), 0, 10000))
dev_err(dev->hw->dev, "tx reset failed\n");
+ mtk_wdma_v3_tx_reset(dev);
wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
@@ -278,7 +499,7 @@ mtk_wed_assign(struct mtk_wed_device *dev)
if (!hw->wed_dev)
goto out;
- if (hw->version == 1)
+ if (mtk_wed_is_v1(hw))
return NULL;
/* MT7986 WED devices do not have any pcie slot restrictions */
@@ -298,35 +519,152 @@ out:
}
static int
+mtk_wed_amsdu_buffer_alloc(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_hw *hw = dev->hw;
+ struct mtk_wed_amsdu *wed_amsdu;
+ int i;
+
+ if (!mtk_wed_is_v3_or_greater(hw))
+ return 0;
+
+ wed_amsdu = devm_kcalloc(hw->dev, MTK_WED_AMSDU_NPAGES,
+ sizeof(*wed_amsdu), GFP_KERNEL);
+ if (!wed_amsdu)
+ return -ENOMEM;
+
+ for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) {
+ void *ptr;
+
+ /* each segment is 64K */
+ ptr = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN |
+ __GFP_ZERO | __GFP_COMP |
+ GFP_DMA32,
+ get_order(MTK_WED_AMSDU_BUF_SIZE));
+ if (!ptr)
+ goto error;
+
+ wed_amsdu[i].txd = ptr;
+ wed_amsdu[i].txd_phy = dma_map_single(hw->dev, ptr,
+ MTK_WED_AMSDU_BUF_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(hw->dev, wed_amsdu[i].txd_phy))
+ goto error;
+ }
+ dev->hw->wed_amsdu = wed_amsdu;
+
+ return 0;
+
+error:
+ for (i--; i >= 0; i--)
+ dma_unmap_single(hw->dev, wed_amsdu[i].txd_phy,
+ MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE);
+ return -ENOMEM;
+}
+
+static void
+mtk_wed_amsdu_free_buffer(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu;
+ int i;
+
+ if (!wed_amsdu)
+ return;
+
+ for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) {
+ dma_unmap_single(dev->hw->dev, wed_amsdu[i].txd_phy,
+ MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE);
+ free_pages((unsigned long)wed_amsdu[i].txd,
+ get_order(MTK_WED_AMSDU_BUF_SIZE));
+ }
+}
+
+static int
+mtk_wed_amsdu_init(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu;
+ int i, ret;
+
+ if (!wed_amsdu)
+ return 0;
+
+ for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++)
+ wed_w32(dev, MTK_WED_AMSDU_HIFTXD_BASE_L(i),
+ wed_amsdu[i].txd_phy);
+
+ /* init all sta parameter */
+ wed_w32(dev, MTK_WED_AMSDU_STA_INFO_INIT, MTK_WED_AMSDU_STA_RMVL |
+ MTK_WED_AMSDU_STA_WTBL_HDRT_MODE |
+ FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_LEN,
+ dev->wlan.amsdu_max_len >> 8) |
+ FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_NUM,
+ dev->wlan.amsdu_max_subframes));
+
+ wed_w32(dev, MTK_WED_AMSDU_STA_INFO, MTK_WED_AMSDU_STA_INFO_DO_INIT);
+
+ ret = mtk_wed_poll_busy(dev, MTK_WED_AMSDU_STA_INFO,
+ MTK_WED_AMSDU_STA_INFO_DO_INIT);
+ if (ret) {
+ dev_err(dev->hw->dev, "amsdu initialization failed\n");
+ return ret;
+ }
+
+ /* init partial amsdu offload txd src */
+ wed_set(dev, MTK_WED_AMSDU_HIFTXD_CFG,
+ FIELD_PREP(MTK_WED_AMSDU_HIFTXD_SRC, dev->hw->index));
+
+ /* init qmem */
+ wed_set(dev, MTK_WED_AMSDU_PSE, MTK_WED_AMSDU_PSE_RESET);
+ ret = mtk_wed_poll_busy(dev, MTK_WED_MON_AMSDU_QMEM_STS1, BIT(29));
+ if (ret) {
+ pr_info("%s: amsdu qmem initialization failed\n", __func__);
+ return ret;
+ }
+
+ /* eagle E1 PCIE1 tx ring 22 flow control issue */
+ if (dev->wlan.id == 0x7991)
+ wed_clr(dev, MTK_WED_AMSDU_FIFO, MTK_WED_AMSDU_IS_PRIOR0_RING);
+
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
+
+ return 0;
+}
+
+static int
mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
{
- struct mtk_wdma_desc *desc;
- dma_addr_t desc_phys;
- void **page_list;
+ u32 desc_size = dev->hw->soc->tx_ring_desc_size;
+ int i, page_idx = 0, n_pages, ring_size;
int token = dev->wlan.token_start;
- int ring_size;
- int n_pages;
- int i, page_idx;
+ struct mtk_wed_buf *page_list;
+ dma_addr_t desc_phys;
+ void *desc_ptr;
- ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
- n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
+ if (!mtk_wed_is_v3_or_greater(dev->hw)) {
+ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
+ dev->tx_buf_ring.size = ring_size;
+ } else {
+ dev->tx_buf_ring.size = MTK_WED_TX_BM_DMA_SIZE;
+ ring_size = MTK_WED_TX_BM_PKT_CNT;
+ }
+ n_pages = dev->tx_buf_ring.size / MTK_WED_BUF_PER_PAGE;
page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
if (!page_list)
return -ENOMEM;
- dev->tx_buf_ring.size = ring_size;
dev->tx_buf_ring.pages = page_list;
- desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
- &desc_phys, GFP_KERNEL);
- if (!desc)
+ desc_ptr = dma_alloc_coherent(dev->hw->dev,
+ dev->tx_buf_ring.size * desc_size,
+ &desc_phys, GFP_KERNEL);
+ if (!desc_ptr)
return -ENOMEM;
- dev->tx_buf_ring.desc = desc;
+ dev->tx_buf_ring.desc = desc_ptr;
dev->tx_buf_ring.desc_phys = desc_phys;
- for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
+ for (i = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
dma_addr_t page_phys, buf_phys;
struct page *page;
void *buf;
@@ -343,7 +681,8 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
return -ENOMEM;
}
- page_list[page_idx++] = page;
+ page_list[page_idx].p = page;
+ page_list[page_idx++].phy_addr = page_phys;
dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
DMA_BIDIRECTIONAL);
@@ -351,28 +690,31 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
buf_phys = page_phys;
for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
- u32 txd_size;
- u32 ctrl;
-
- txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
+ struct mtk_wdma_desc *desc = desc_ptr;
desc->buf0 = cpu_to_le32(buf_phys);
- desc->buf1 = cpu_to_le32(buf_phys + txd_size);
-
- if (dev->hw->version == 1)
- ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
- FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
- MTK_WED_BUF_SIZE - txd_size) |
- MTK_WDMA_DESC_CTRL_LAST_SEG1;
- else
- ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
- FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
- MTK_WED_BUF_SIZE - txd_size) |
- MTK_WDMA_DESC_CTRL_LAST_SEG0;
- desc->ctrl = cpu_to_le32(ctrl);
- desc->info = 0;
- desc++;
-
+ if (!mtk_wed_is_v3_or_greater(dev->hw)) {
+ u32 txd_size, ctrl;
+
+ txd_size = dev->wlan.init_buf(buf, buf_phys,
+ token++);
+ desc->buf1 = cpu_to_le32(buf_phys + txd_size);
+ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size);
+ if (mtk_wed_is_v1(dev->hw))
+ ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG1 |
+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
+ MTK_WED_BUF_SIZE - txd_size);
+ else
+ ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG0 |
+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
+ MTK_WED_BUF_SIZE - txd_size);
+ desc->ctrl = cpu_to_le32(ctrl);
+ desc->info = 0;
+ } else {
+ desc->ctrl = cpu_to_le32(token << 16);
+ }
+
+ desc_ptr += desc_size;
buf += MTK_WED_BUF_SIZE;
buf_phys += MTK_WED_BUF_SIZE;
}
@@ -387,42 +729,103 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
static void
mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
{
- struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc;
- void **page_list = dev->tx_buf_ring.pages;
- int page_idx;
- int i;
+ struct mtk_wed_buf *page_list = dev->tx_buf_ring.pages;
+ struct mtk_wed_hw *hw = dev->hw;
+ int i, page_idx = 0;
if (!page_list)
return;
- if (!desc)
+ if (!dev->tx_buf_ring.desc)
goto free_pagelist;
- for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size;
- i += MTK_WED_BUF_PER_PAGE) {
- void *page = page_list[page_idx++];
- dma_addr_t buf_addr;
+ for (i = 0; i < dev->tx_buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
+ dma_addr_t page_phy = page_list[page_idx].phy_addr;
+ void *page = page_list[page_idx++].p;
if (!page)
break;
- buf_addr = le32_to_cpu(desc[i].buf0);
- dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
+ dma_unmap_page(dev->hw->dev, page_phy, PAGE_SIZE,
DMA_BIDIRECTIONAL);
__free_page(page);
}
- dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc),
- desc, dev->tx_buf_ring.desc_phys);
+ dma_free_coherent(dev->hw->dev,
+ dev->tx_buf_ring.size * hw->soc->tx_ring_desc_size,
+ dev->tx_buf_ring.desc,
+ dev->tx_buf_ring.desc_phys);
free_pagelist:
kfree(page_list);
}
static int
+mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev)
+{
+ int n_pages = MTK_WED_RX_PG_BM_CNT / MTK_WED_RX_BUF_PER_PAGE;
+ struct mtk_wed_buf *page_list;
+ struct mtk_wed_bm_desc *desc;
+ dma_addr_t desc_phys;
+ int i, page_idx = 0;
+
+ if (!dev->wlan.hw_rro)
+ return 0;
+
+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+ dev->hw_rro.size = dev->wlan.rx_nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
+ dev->hw_rro.pages = page_list;
+ desc = dma_alloc_coherent(dev->hw->dev,
+ dev->wlan.rx_nbuf * sizeof(*desc),
+ &desc_phys, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ dev->hw_rro.desc = desc;
+ dev->hw_rro.desc_phys = desc_phys;
+
+ for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) {
+ dma_addr_t page_phys, buf_phys;
+ struct page *page;
+ int s;
+
+ page = __dev_alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev->hw->dev, page_phys)) {
+ __free_page(page);
+ return -ENOMEM;
+ }
+
+ page_list[page_idx].p = page;
+ page_list[page_idx++].phy_addr = page_phys;
+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ buf_phys = page_phys;
+ for (s = 0; s < MTK_WED_RX_BUF_PER_PAGE; s++) {
+ desc->buf0 = cpu_to_le32(buf_phys);
+ buf_phys += MTK_WED_PAGE_BUF_SIZE;
+ desc++;
+ }
+
+ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ }
+
+ return 0;
+}
+
+static int
mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
{
- struct mtk_rxbm_desc *desc;
+ struct mtk_wed_bm_desc *desc;
dma_addr_t desc_phys;
dev->rx_buf_ring.size = dev->wlan.rx_nbuf;
@@ -436,13 +839,48 @@ mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
dev->rx_buf_ring.desc_phys = desc_phys;
dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt);
- return 0;
+ return mtk_wed_hwrro_buffer_alloc(dev);
+}
+
+static void
+mtk_wed_hwrro_free_buffer(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_buf *page_list = dev->hw_rro.pages;
+ struct mtk_wed_bm_desc *desc = dev->hw_rro.desc;
+ int i, page_idx = 0;
+
+ if (!dev->wlan.hw_rro)
+ return;
+
+ if (!page_list)
+ return;
+
+ if (!desc)
+ goto free_pagelist;
+
+ for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) {
+ dma_addr_t buf_addr = page_list[page_idx].phy_addr;
+ void *page = page_list[page_idx++].p;
+
+ if (!page)
+ break;
+
+ dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ __free_page(page);
+ }
+
+ dma_free_coherent(dev->hw->dev, dev->hw_rro.size * sizeof(*desc),
+ desc, dev->hw_rro.desc_phys);
+
+free_pagelist:
+ kfree(page_list);
}
static void
mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
{
- struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
+ struct mtk_wed_bm_desc *desc = dev->rx_buf_ring.desc;
if (!desc)
return;
@@ -450,6 +888,28 @@ mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
dev->wlan.release_rx_buf(dev);
dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc),
desc, dev->rx_buf_ring.desc_phys);
+
+ mtk_wed_hwrro_free_buffer(dev);
+}
+
+static void
+mtk_wed_hwrro_init(struct mtk_wed_device *dev)
+{
+ if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro)
+ return;
+
+ wed_set(dev, MTK_WED_RRO_PG_BM_RX_DMAM,
+ FIELD_PREP(MTK_WED_RRO_PG_BM_RX_SDL0, 128));
+
+ wed_w32(dev, MTK_WED_RRO_PG_BM_BASE, dev->hw_rro.desc_phys);
+
+ wed_w32(dev, MTK_WED_RRO_PG_BM_INIT_PTR,
+ MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX |
+ FIELD_PREP(MTK_WED_RRO_PG_BM_SW_TAIL_IDX,
+ MTK_WED_RX_PG_BM_CNT));
+
+ /* enable rx_page_bm to fetch dmad */
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN);
}
static void
@@ -463,6 +923,8 @@ mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev)
wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH,
FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff));
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
+
+ mtk_wed_hwrro_init(dev);
}
static void
@@ -498,13 +960,23 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
{
u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
- if (dev->hw->version == 1)
+ switch (dev->hw->version) {
+ case 1:
mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
- else
+ break;
+ case 2:
mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
+ break;
+ case 3:
+ mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
+ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+ break;
+ default:
+ break;
+ }
if (!dev->hw->num_flows)
mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
@@ -516,6 +988,9 @@ mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
static void
mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
{
+ if (!mtk_wed_is_v2(dev->hw))
+ return;
+
if (enable) {
wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
wed_w32(dev, MTK_WED_TXP_DW1,
@@ -527,22 +1002,15 @@ mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
}
}
-#define MTK_WFMDA_RX_DMA_EN BIT(2)
-static void
-mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
+static int
+mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev,
+ struct mtk_wed_ring *ring)
{
- u32 val;
int i;
- if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED))
- return; /* queue is not configured by mt76 */
-
for (i = 0; i < 3; i++) {
- u32 cur_idx;
+ u32 cur_idx = readl(ring->wpdma + MTK_WED_RING_OFS_CPU_IDX);
- cur_idx = wed_r32(dev,
- MTK_WED_WPDMA_RING_RX_DATA(idx) +
- MTK_WED_RING_OFS_CPU_IDX);
if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
break;
@@ -551,12 +1019,10 @@ mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
if (i == 3) {
dev_err(dev->hw->dev, "rx dma enable failed\n");
- return;
+ return -ETIMEDOUT;
}
- val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) |
- MTK_WFMDA_RX_DMA_EN;
- wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val);
+ return 0;
}
static void
@@ -577,7 +1043,7 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
- if (dev->hw->version == 1) {
+ if (mtk_wed_is_v1(dev->hw)) {
regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
wdma_clr(dev, MTK_WDMA_GLO_CFG,
MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
@@ -590,6 +1056,14 @@ mtk_wed_dma_disable(struct mtk_wed_device *dev)
MTK_WED_WPDMA_RX_D_RX_DRV_EN);
wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
+
+ if (mtk_wed_is_v3_or_greater(dev->hw) &&
+ mtk_wed_get_rx_capa(dev)) {
+ wdma_clr(dev, MTK_WDMA_PREF_TX_CFG,
+ MTK_WDMA_PREF_TX_CFG_PREF_EN);
+ wdma_clr(dev, MTK_WDMA_PREF_RX_CFG,
+ MTK_WDMA_PREF_RX_CFG_PREF_EN);
+ }
}
mtk_wed_set_512_support(dev, false);
@@ -606,7 +1080,7 @@ mtk_wed_stop(struct mtk_wed_device *dev)
wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
- if (dev->hw->version == 1)
+ if (!mtk_wed_get_rx_capa(dev))
return;
wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
@@ -625,13 +1099,21 @@ mtk_wed_deinit(struct mtk_wed_device *dev)
MTK_WED_CTRL_WED_TX_BM_EN |
MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
- if (dev->hw->version == 1)
+ if (mtk_wed_is_v1(dev->hw))
return;
wed_clr(dev, MTK_WED_CTRL,
MTK_WED_CTRL_RX_ROUTE_QM_EN |
MTK_WED_CTRL_WED_RX_BM_EN |
MTK_WED_CTRL_RX_RRO_QM_EN);
+
+ if (mtk_wed_is_v3_or_greater(dev->hw)) {
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
+ wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_TX_AMSDU);
+ wed_clr(dev, MTK_WED_PCIE_INT_CTRL,
+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
+ MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER);
+ }
}
static void
@@ -643,6 +1125,7 @@ __mtk_wed_detach(struct mtk_wed_device *dev)
mtk_wdma_rx_reset(dev);
mtk_wed_reset(dev, MTK_WED_RESET_WED);
+ mtk_wed_amsdu_free_buffer(dev);
mtk_wed_free_tx_buffer(dev);
mtk_wed_free_tx_rings(dev);
@@ -681,21 +1164,37 @@ mtk_wed_detach(struct mtk_wed_device *dev)
mutex_unlock(&hw_lock);
}
-#define PCIE_BASE_ADDR0 0x11280000
static void
mtk_wed_bus_init(struct mtk_wed_device *dev)
{
switch (dev->wlan.bus_type) {
case MTK_WED_BUS_PCIE: {
struct device_node *np = dev->hw->eth->dev->of_node;
- struct regmap *regs;
- regs = syscon_regmap_lookup_by_phandle(np,
- "mediatek,wed-pcie");
- if (IS_ERR(regs))
- break;
+ if (mtk_wed_is_v2(dev->hw)) {
+ struct regmap *regs;
+
+ regs = syscon_regmap_lookup_by_phandle(np,
+ "mediatek,wed-pcie");
+ if (IS_ERR(regs))
+ break;
- regmap_update_bits(regs, 0, BIT(0), BIT(0));
+ regmap_update_bits(regs, 0, BIT(0), BIT(0));
+ }
+
+ if (dev->wlan.msi) {
+ wed_w32(dev, MTK_WED_PCIE_CFG_INTM,
+ dev->hw->pcie_base | 0xc08);
+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
+ dev->hw->pcie_base | 0xc04);
+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(8));
+ } else {
+ wed_w32(dev, MTK_WED_PCIE_CFG_INTM,
+ dev->hw->pcie_base | 0x180);
+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
+ dev->hw->pcie_base | 0x184);
+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
+ }
wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
@@ -703,19 +1202,9 @@ mtk_wed_bus_init(struct mtk_wed_device *dev)
/* pcie interrupt control: pola/source selection */
wed_set(dev, MTK_WED_PCIE_INT_CTRL,
MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
- FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
- wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
-
- wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
- wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
-
- /* pcie interrupt status trigger register */
- wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
- wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
-
- /* pola setting */
- wed_set(dev, MTK_WED_PCIE_INT_CTRL,
- MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
+ MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER |
+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL,
+ dev->hw->index));
break;
}
case MTK_WED_BUS_AXI:
@@ -731,38 +1220,55 @@ mtk_wed_bus_init(struct mtk_wed_device *dev)
static void
mtk_wed_set_wpdma(struct mtk_wed_device *dev)
{
- if (dev->hw->version == 1) {
- wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
- } else {
- mtk_wed_bus_init(dev);
+ int i;
- wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
- wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
- wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
- wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
- wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
- wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
+ if (mtk_wed_is_v1(dev->hw)) {
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+ return;
}
+
+ mtk_wed_bus_init(dev);
+
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
+
+ if (!mtk_wed_get_rx_capa(dev))
+ return;
+
+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
+ wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring0, dev->wlan.wpdma_rx);
+
+ if (!dev->wlan.hw_rro)
+ return;
+
+ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(0), dev->wlan.wpdma_rx_rro[0]);
+ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(1), dev->wlan.wpdma_rx_rro[1]);
+ for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++)
+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING_CFG(i),
+ dev->wlan.wpdma_rx_pg + i * 0x10);
}
static void
mtk_wed_hw_init_early(struct mtk_wed_device *dev)
{
- u32 mask, set;
+ u32 set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2);
+ u32 mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE;
mtk_wed_deinit(dev);
mtk_wed_reset(dev, MTK_WED_RESET_WED);
mtk_wed_set_wpdma(dev);
- mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
- MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
- MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
- set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
- MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
- MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
+ if (!mtk_wed_is_v3_or_greater(dev->hw)) {
+ mask |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
+ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
+ set |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
+ }
wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
- if (dev->hw->version == 1) {
+ if (mtk_wed_is_v1(dev->hw)) {
u32 offset = dev->hw->index ? 0x04000400 : 0;
wdma_set(dev, MTK_WDMA_GLO_CFG,
@@ -907,11 +1413,18 @@ mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
}
/* configure RX_ROUTE_QM */
- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
- wed_set(dev, MTK_WED_RTQM_GLO_CFG,
- FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
+ if (mtk_wed_is_v2(dev->hw)) {
+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
+ FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT,
+ 0x3 + dev->hw->index));
+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
+ } else {
+ wed_set(dev, MTK_WED_RTQM_ENQ_CFG0,
+ FIELD_PREP(MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT,
+ 0x3 + dev->hw->index));
+ }
/* enable RX_ROUTE_QM */
wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
}
@@ -924,34 +1437,30 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
dev->init_done = true;
mtk_wed_set_ext_int(dev, false);
- wed_w32(dev, MTK_WED_TX_BM_CTRL,
- MTK_WED_TX_BM_CTRL_PAUSE |
- FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
- dev->tx_buf_ring.size / 128) |
- FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
- MTK_WED_TX_RING_SIZE / 256));
wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys);
-
wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
- if (dev->hw->version == 1) {
- wed_w32(dev, MTK_WED_TX_BM_TKID,
- FIELD_PREP(MTK_WED_TX_BM_TKID_START,
- dev->wlan.token_start) |
- FIELD_PREP(MTK_WED_TX_BM_TKID_END,
- dev->wlan.token_start +
- dev->wlan.nbuf - 1));
+ if (mtk_wed_is_v1(dev->hw)) {
+ wed_w32(dev, MTK_WED_TX_BM_CTRL,
+ MTK_WED_TX_BM_CTRL_PAUSE |
+ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
+ dev->tx_buf_ring.size / 128) |
+ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
+ MTK_WED_TX_RING_SIZE / 256));
wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
MTK_WED_TX_BM_DYN_THR_HI);
- } else {
- wed_w32(dev, MTK_WED_TX_BM_TKID_V2,
- FIELD_PREP(MTK_WED_TX_BM_TKID_START,
- dev->wlan.token_start) |
- FIELD_PREP(MTK_WED_TX_BM_TKID_END,
- dev->wlan.token_start +
- dev->wlan.nbuf - 1));
+ } else if (mtk_wed_is_v2(dev->hw)) {
+ wed_w32(dev, MTK_WED_TX_BM_CTRL,
+ MTK_WED_TX_BM_CTRL_PAUSE |
+ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
+ dev->tx_buf_ring.size / 128) |
+ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
+ MTK_WED_TX_RING_SIZE / 256));
+ wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
+ MTK_WED_TX_TKID_DYN_THR_HI);
wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) |
MTK_WED_TX_BM_DYN_THR_HI_V2);
@@ -961,31 +1470,71 @@ mtk_wed_hw_init(struct mtk_wed_device *dev)
dev->tx_buf_ring.size / 128) |
FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
dev->tx_buf_ring.size / 128));
- wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
- FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
- MTK_WED_TX_TKID_DYN_THR_HI);
}
+ wed_w32(dev, dev->hw->soc->regmap.tx_bm_tkid,
+ FIELD_PREP(MTK_WED_TX_BM_TKID_START, dev->wlan.token_start) |
+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+ dev->wlan.token_start + dev->wlan.nbuf - 1));
+
mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
- if (dev->hw->version == 1) {
+ if (mtk_wed_is_v3_or_greater(dev->hw)) {
+ /* switch to new bm architecture */
+ wed_clr(dev, MTK_WED_TX_BM_CTRL,
+ MTK_WED_TX_BM_CTRL_LEGACY_EN);
+
+ wed_w32(dev, MTK_WED_TX_TKID_CTRL,
+ MTK_WED_TX_TKID_CTRL_PAUSE |
+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3,
+ dev->wlan.nbuf / 128) |
+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3,
+ dev->wlan.nbuf / 128));
+ /* return SKBID + SDP back to bm */
+ wed_set(dev, MTK_WED_TX_TKID_CTRL,
+ MTK_WED_TX_TKID_CTRL_FREE_FORMAT);
+
+ wed_w32(dev, MTK_WED_TX_BM_INIT_PTR,
+ MTK_WED_TX_BM_PKT_CNT |
+ MTK_WED_TX_BM_INIT_SW_TAIL_IDX);
+ }
+
+ if (mtk_wed_is_v1(dev->hw)) {
wed_set(dev, MTK_WED_CTRL,
MTK_WED_CTRL_WED_TX_BM_EN |
MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
- } else {
- wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
+ } else if (mtk_wed_get_rx_capa(dev)) {
/* rx hw init */
wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
+ /* reset prefetch index of ring */
+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
+
+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
+ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
+
+ /* reset prefetch FIFO of ring */
+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG,
+ MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR |
+ MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR);
+ wed_w32(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, 0);
+
mtk_wed_rx_buffer_hw_init(dev);
mtk_wed_rro_hw_init(dev);
mtk_wed_route_qm_hw_init(dev);
}
wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
+ if (!mtk_wed_is_v1(dev->hw))
+ wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
}
static void
@@ -1008,23 +1557,6 @@ mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx)
}
}
-static u32
-mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
-{
- return !!(wed_r32(dev, reg) & mask);
-}
-
-static int
-mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
-{
- int sleep = 15000;
- int timeout = 100 * sleep;
- u32 val;
-
- return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
- timeout, false, dev, reg, mask);
-}
-
static int
mtk_wed_rx_reset(struct mtk_wed_device *dev)
{
@@ -1038,13 +1570,33 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
if (ret)
return ret;
+ if (dev->wlan.hw_rro) {
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN);
+ mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_HW_STS,
+ MTK_WED_RX_IND_CMD_BUSY);
+ mtk_wed_reset(dev, MTK_WED_RESET_RRO_RX_TO_PG);
+ }
+
wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN);
ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
MTK_WED_WPDMA_RX_D_RX_DRV_BUSY);
+ if (!ret && mtk_wed_is_v3_or_greater(dev->hw))
+ ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
+ MTK_WED_WPDMA_RX_D_PREF_BUSY);
if (ret) {
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV);
} else {
+ if (mtk_wed_is_v3_or_greater(dev->hw)) {
+ /* 1.a. disable prefetch HW */
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
+ MTK_WED_WPDMA_RX_D_PREF_EN);
+ mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
+ MTK_WED_WPDMA_RX_D_PREF_BUSY);
+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL);
+ }
+
wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
@@ -1072,23 +1624,52 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
}
+ if (dev->wlan.hw_rro) {
+ /* disable rro msdu page drv */
+ wed_clr(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
+ MTK_WED_RRO_MSDU_PG_DRV_EN);
+
+ /* disable rro data drv */
+ wed_clr(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN);
+
+ /* rro msdu page drv reset */
+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
+ MTK_WED_RRO_MSDU_PG_DRV_CLR);
+ mtk_wed_poll_busy(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
+ MTK_WED_RRO_MSDU_PG_DRV_CLR);
+
+ /* rro data drv reset */
+ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(2),
+ MTK_WED_RRO_RX_D_DRV_CLR);
+ mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_D_CFG(2),
+ MTK_WED_RRO_RX_D_DRV_CLR);
+ }
+
/* reset route qm */
wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
MTK_WED_CTRL_RX_ROUTE_QM_BUSY);
- if (ret)
+ if (ret) {
mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
- else
- wed_set(dev, MTK_WED_RTQM_GLO_CFG,
- MTK_WED_RTQM_Q_RST);
+ } else if (mtk_wed_is_v3_or_greater(dev->hw)) {
+ wed_set(dev, MTK_WED_RTQM_RST, BIT(0));
+ wed_clr(dev, MTK_WED_RTQM_RST, BIT(0));
+ mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
+ } else {
+ wed_set(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
+ }
/* reset tx wdma */
mtk_wdma_tx_reset(dev);
/* reset tx wdma drv */
wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN);
- mtk_wed_poll_busy(dev, MTK_WED_CTRL,
- MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
+ if (mtk_wed_is_v3_or_greater(dev->hw))
+ mtk_wed_poll_busy(dev, MTK_WED_WPDMA_STATUS,
+ MTK_WED_WPDMA_STATUS_TX_DRV);
+ else
+ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV);
/* reset wed rx dma */
@@ -1098,13 +1679,8 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
if (ret) {
mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA);
} else {
- struct mtk_eth *eth = dev->hw->eth;
-
- if (mtk_is_netsys_v2_or_greater(eth))
- wed_set(dev, MTK_WED_RESET_IDX,
- MTK_WED_RESET_IDX_RX_V2);
- else
- wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX);
+ wed_set(dev, MTK_WED_RESET_IDX,
+ dev->hw->soc->regmap.reset_idx_rx_mask);
wed_w32(dev, MTK_WED_RESET_IDX, 0);
}
@@ -1114,6 +1690,14 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
MTK_WED_CTRL_WED_RX_BM_BUSY);
mtk_wed_reset(dev, MTK_WED_RESET_RX_BM);
+ if (dev->wlan.hw_rro) {
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN);
+ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WED_RX_PG_BM_BUSY);
+ wed_set(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM);
+ wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM);
+ }
+
/* wo change to enable state */
val = MTK_WED_WO_STATE_ENABLE;
ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
@@ -1131,6 +1715,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *dev)
false);
}
mtk_wed_free_rx_buffer(dev);
+ mtk_wed_hwrro_free_buffer(dev);
return 0;
}
@@ -1157,21 +1742,48 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
if (busy) {
mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
} else {
- wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX);
+ wed_w32(dev, MTK_WED_RESET_IDX,
+ dev->hw->soc->regmap.reset_idx_tx_mask);
wed_w32(dev, MTK_WED_RESET_IDX, 0);
}
/* 2. reset WDMA rx DMA */
busy = !!mtk_wdma_rx_reset(dev);
- wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+ if (mtk_wed_is_v3_or_greater(dev->hw)) {
+ val = MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE |
+ wed_r32(dev, MTK_WED_WDMA_GLO_CFG);
+ val &= ~MTK_WED_WDMA_GLO_CFG_RX_DRV_EN;
+ wed_w32(dev, MTK_WED_WDMA_GLO_CFG, val);
+ } else {
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+ }
+
if (!busy)
busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY);
+ if (!busy && mtk_wed_is_v3_or_greater(dev->hw))
+ busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG,
+ MTK_WED_WDMA_RX_PREF_BUSY);
if (busy) {
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
} else {
+ if (mtk_wed_is_v3_or_greater(dev->hw)) {
+ /* 1.a. disable prefetch HW */
+ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
+ MTK_WED_WDMA_RX_PREF_EN);
+ mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG,
+ MTK_WED_WDMA_RX_PREF_BUSY);
+ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
+ MTK_WED_WDMA_RX_PREF_DDONE2_EN);
+
+ /* 2. Reset dma index */
+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
+ MTK_WED_WDMA_RESET_IDX_RX_ALL);
+ }
+
wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
@@ -1187,8 +1799,13 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
for (i = 0; i < 100; i++) {
- val = wed_r32(dev, MTK_WED_TX_BM_INTF);
- if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
+ if (mtk_wed_is_v1(dev->hw))
+ val = FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP,
+ wed_r32(dev, MTK_WED_TX_BM_INTF));
+ else
+ val = FIELD_GET(MTK_WED_TX_TKID_INTF_TKFIFO_FDEP,
+ wed_r32(dev, MTK_WED_TX_TKID_INTF));
+ if (val == 0x40)
break;
}
@@ -1210,6 +1827,8 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
+ if (mtk_wed_is_v3_or_greater(dev->hw))
+ wed_w32(dev, MTK_WED_RX1_CTRL2, 0);
} else {
wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
MTK_WED_WPDMA_RESET_IDX_TX |
@@ -1218,7 +1837,7 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
}
dev->init_done = false;
- if (dev->hw->version == 1)
+ if (mtk_wed_is_v1(dev->hw))
return;
if (!busy) {
@@ -1226,7 +1845,14 @@ mtk_wed_reset_dma(struct mtk_wed_device *dev)
wed_w32(dev, MTK_WED_RESET_IDX, 0);
}
- mtk_wed_rx_reset(dev);
+ if (mtk_wed_is_v3_or_greater(dev->hw)) {
+ /* reset amsdu engine */
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_AMSDU);
+ }
+
+ if (mtk_wed_get_rx_capa(dev))
+ mtk_wed_rx_reset(dev);
}
static int
@@ -1249,7 +1875,6 @@ static int
mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
bool reset)
{
- u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
struct mtk_wed_ring *wdma;
if (idx >= ARRAY_SIZE(dev->rx_wdma))
@@ -1257,7 +1882,7 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
wdma = &dev->rx_wdma[idx];
if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
- desc_size, true))
+ dev->hw->soc->wdma_desc_size, true))
return -ENOMEM;
wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
@@ -1278,7 +1903,6 @@ static int
mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
bool reset)
{
- u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
struct mtk_wed_ring *wdma;
if (idx >= ARRAY_SIZE(dev->tx_wdma))
@@ -1286,9 +1910,27 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
wdma = &dev->tx_wdma[idx];
if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
- desc_size, true))
+ dev->hw->soc->wdma_desc_size, true))
return -ENOMEM;
+ if (mtk_wed_is_v3_or_greater(dev->hw)) {
+ struct mtk_wdma_desc *desc = wdma->desc;
+ int i;
+
+ for (i = 0; i < MTK_WED_WDMA_RING_SIZE; i++) {
+ desc->buf0 = 0;
+ desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
+ desc->buf1 = 0;
+ desc->info = cpu_to_le32(MTK_WDMA_TXD0_DESC_INFO_DMA_DONE);
+ desc++;
+ desc->buf0 = 0;
+ desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
+ desc->buf1 = 0;
+ desc->info = cpu_to_le32(MTK_WDMA_TXD1_DESC_INFO_DMA_DONE);
+ desc++;
+ }
+ }
+
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
wdma->desc_phys);
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
@@ -1344,7 +1986,7 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
MTK_WED_CTRL_WED_TX_BM_EN |
MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
- if (dev->hw->version == 1) {
+ if (mtk_wed_is_v1(dev->hw)) {
wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
MTK_WED_PCIE_INT_TRIGGER_STATUS);
@@ -1354,8 +1996,9 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
} else {
- wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
- GENMASK(1, 0));
+ if (mtk_wed_is_v3_or_greater(dev->hw))
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN);
+
/* initail tx interrupt trigger */
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
@@ -1374,15 +2017,20 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
dev->wlan.txfree_tbit));
- wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
- MTK_WED_WPDMA_INT_CTRL_RX0_EN |
- MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
- MTK_WED_WPDMA_INT_CTRL_RX1_EN |
- MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
- FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
- dev->wlan.rx_tbit[0]) |
- FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
- dev->wlan.rx_tbit[1]));
+ if (mtk_wed_get_rx_capa(dev)) {
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
+ MTK_WED_WPDMA_INT_CTRL_RX0_EN |
+ MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
+ MTK_WED_WPDMA_INT_CTRL_RX1_EN |
+ MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
+ dev->wlan.rx_tbit[0]) |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
+ dev->wlan.rx_tbit[1]));
+
+ wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
+ GENMASK(1, 0));
+ }
wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
wed_set(dev, MTK_WED_WDMA_INT_CTRL,
@@ -1398,55 +2046,280 @@ mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
}
+#define MTK_WFMDA_RX_DMA_EN BIT(2)
static void
mtk_wed_dma_enable(struct mtk_wed_device *dev)
{
- wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
+ int i;
+
+ if (!mtk_wed_is_v3_or_greater(dev->hw)) {
+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
+ MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_TX_DMA_EN |
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
+ wed_set(dev, MTK_WED_WPDMA_CTRL, MTK_WED_WPDMA_CTRL_SDL1_FIXED);
+ } else {
+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN |
+ MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR);
+ wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
+ }
wed_set(dev, MTK_WED_GLO_CFG,
MTK_WED_GLO_CFG_TX_DMA_EN |
MTK_WED_GLO_CFG_RX_DMA_EN);
- wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
- MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
- MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+
wed_set(dev, MTK_WED_WDMA_GLO_CFG,
MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
- wdma_set(dev, MTK_WDMA_GLO_CFG,
- MTK_WDMA_GLO_CFG_TX_DMA_EN |
- MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
-
- if (dev->hw->version == 1) {
+ if (mtk_wed_is_v1(dev->hw)) {
wdma_set(dev, MTK_WDMA_GLO_CFG,
MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
- } else {
- int i;
+ return;
+ }
- wed_set(dev, MTK_WED_WPDMA_CTRL,
- MTK_WED_WPDMA_CTRL_SDL1_FIXED);
+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
- wed_set(dev, MTK_WED_WDMA_GLO_CFG,
- MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
- MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
+ if (mtk_wed_is_v3_or_greater(dev->hw)) {
+ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
+ FIELD_PREP(MTK_WED_WDMA_RX_PREF_BURST_SIZE, 0x10) |
+ FIELD_PREP(MTK_WED_WDMA_RX_PREF_LOW_THRES, 0x8));
+ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
+ MTK_WED_WDMA_RX_PREF_DDONE2_EN);
+ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_EN);
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST);
wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
- MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
- MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
+ MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4);
- wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
- MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
- MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
+ wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
+ wdma_set(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
+ }
- wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
- MTK_WED_WPDMA_RX_D_RX_DRV_EN |
- FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
- FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
- 0x2));
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
+ MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
+
+ if (!mtk_wed_get_rx_capa(dev))
+ return;
+
+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
+
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RXD_READ_LEN);
+ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ MTK_WED_WPDMA_RX_D_RX_DRV_EN |
+ FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
+ FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, 0x2));
+
+ if (mtk_wed_is_v3_or_greater(dev->hw)) {
+ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
+ MTK_WED_WPDMA_RX_D_PREF_EN |
+ FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE, 0x10) |
+ FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_LOW_THRES, 0x8));
+
+ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN);
+ wdma_set(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
+ wdma_set(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
+ }
+
+ for (i = 0; i < MTK_WED_RX_QUEUES; i++) {
+ struct mtk_wed_ring *ring = &dev->rx_ring[i];
+ u32 val;
+
+ if (!(ring->flags & MTK_WED_RING_CONFIGURED))
+ continue; /* queue is not configured by mt76 */
+
+ if (mtk_wed_check_wfdma_rx_fill(dev, ring)) {
+ dev_err(dev->hw->dev,
+ "rx_ring(%d) dma enable failed\n", i);
+ continue;
+ }
+
+ val = wifi_r32(dev,
+ dev->wlan.wpdma_rx_glo -
+ dev->wlan.phy_base) | MTK_WFMDA_RX_DMA_EN;
+ wifi_w32(dev,
+ dev->wlan.wpdma_rx_glo - dev->wlan.phy_base,
+ val);
+ }
+}
+
+static void
+mtk_wed_start_hw_rro(struct mtk_wed_device *dev, u32 irq_mask, bool reset)
+{
+ int i;
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
+
+ if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro)
+ return;
+
+ if (reset) {
+ wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
+ MTK_WED_RRO_MSDU_PG_DRV_EN);
+ return;
+ }
+
+ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR);
+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
+ MTK_WED_RRO_MSDU_PG_DRV_CLR);
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_RX,
+ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN |
+ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR |
+ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN |
+ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG,
+ dev->wlan.rro_rx_tbit[0]) |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG,
+ dev->wlan.rro_rx_tbit[1]));
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG,
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN |
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR |
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN |
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR |
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN |
+ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG,
+ dev->wlan.rx_pg_tbit[0]) |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG,
+ dev->wlan.rx_pg_tbit[1]) |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG,
+ dev->wlan.rx_pg_tbit[2]));
+
+ /* RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after
+ * WM FWDL completed, otherwise RRO_MSDU_PG ring may broken
+ */
+ wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
+ MTK_WED_RRO_MSDU_PG_DRV_EN);
+
+ for (i = 0; i < MTK_WED_RX_QUEUES; i++) {
+ struct mtk_wed_ring *ring = &dev->rx_rro_ring[i];
+
+ if (!(ring->flags & MTK_WED_RING_CONFIGURED))
+ continue;
+
+ if (mtk_wed_check_wfdma_rx_fill(dev, ring))
+ dev_err(dev->hw->dev,
+ "rx_rro_ring(%d) initialization failed\n", i);
+ }
+
+ for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) {
+ struct mtk_wed_ring *ring = &dev->rx_page_ring[i];
+
+ if (!(ring->flags & MTK_WED_RING_CONFIGURED))
+ continue;
+
+ if (mtk_wed_check_wfdma_rx_fill(dev, ring))
+ dev_err(dev->hw->dev,
+ "rx_page_ring(%d) initialization failed\n", i);
+ }
+}
+
+static void
+mtk_wed_rro_rx_ring_setup(struct mtk_wed_device *dev, int idx,
+ void __iomem *regs)
+{
+ struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx];
+
+ ring->wpdma = regs;
+ wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_BASE,
+ readl(regs));
+ wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_COUNT,
+ readl(regs + MTK_WED_RING_OFS_COUNT));
+ ring->flags |= MTK_WED_RING_CONFIGURED;
+}
+
+static void
+mtk_wed_msdu_pg_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
+{
+ struct mtk_wed_ring *ring = &dev->rx_page_ring[idx];
+
+ ring->wpdma = regs;
+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_BASE,
+ readl(regs));
+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_COUNT,
+ readl(regs + MTK_WED_RING_OFS_COUNT));
+ ring->flags |= MTK_WED_RING_CONFIGURED;
+}
+
+static int
+mtk_wed_ind_rx_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
+{
+ struct mtk_wed_ring *ring = &dev->ind_cmd_ring;
+ u32 val = readl(regs + MTK_WED_RING_OFS_COUNT);
+ int i, count = 0;
- for (i = 0; i < MTK_WED_RX_QUEUES; i++)
- mtk_wed_check_wfdma_rx_fill(dev, i);
+ ring->wpdma = regs;
+ wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE,
+ readl(regs) & 0xfffffff0);
+
+ wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT,
+ readl(regs + MTK_WED_RING_OFS_COUNT));
+
+ /* ack sn cr */
+ wed_w32(dev, MTK_WED_RRO_CFG0, dev->wlan.phy_base +
+ dev->wlan.ind_cmd.ack_sn_addr);
+ wed_w32(dev, MTK_WED_RRO_CFG1,
+ FIELD_PREP(MTK_WED_RRO_CFG1_MAX_WIN_SZ,
+ dev->wlan.ind_cmd.win_size) |
+ FIELD_PREP(MTK_WED_RRO_CFG1_PARTICL_SE_ID,
+ dev->wlan.ind_cmd.particular_sid));
+
+ /* particular session addr element */
+ wed_w32(dev, MTK_WED_ADDR_ELEM_CFG0,
+ dev->wlan.ind_cmd.particular_se_phys);
+
+ for (i = 0; i < dev->wlan.ind_cmd.se_group_nums; i++) {
+ wed_w32(dev, MTK_WED_RADDR_ELEM_TBL_WDATA,
+ dev->wlan.ind_cmd.addr_elem_phys[i] >> 4);
+ wed_w32(dev, MTK_WED_ADDR_ELEM_TBL_CFG,
+ MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f));
+
+ val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
+ while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY) && count++ < 100)
+ val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
+ if (count >= 100)
+ dev_err(dev->hw->dev,
+ "write ba session base failed\n");
+ }
+
+ /* pn check init */
+ for (i = 0; i < dev->wlan.ind_cmd.particular_sid; i++) {
+ wed_w32(dev, MTK_WED_PN_CHECK_WDATA_M,
+ MTK_WED_PN_CHECK_IS_FIRST);
+
+ wed_w32(dev, MTK_WED_PN_CHECK_CFG, MTK_WED_PN_CHECK_WR |
+ FIELD_PREP(MTK_WED_PN_CHECK_SE_ID, i));
+
+ count = 0;
+ val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
+ while (!(val & MTK_WED_PN_CHECK_WR_RDY) && count++ < 100)
+ val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
+ if (count >= 100)
+ dev_err(dev->hw->dev,
+ "session(%d) initialization failed\n", i);
}
+
+ wed_w32(dev, MTK_WED_RX_IND_CMD_CNT0, MTK_WED_RX_IND_CMD_DBG_CNT_EN);
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN);
+
+ return 0;
}
static void
@@ -1466,14 +2339,14 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
mtk_wed_set_ext_int(dev, true);
- if (dev->hw->version == 1) {
+ if (mtk_wed_is_v1(dev->hw)) {
u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN |
FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID,
dev->hw->index);
val |= BIT(0) | (BIT(1) * !!dev->hw->index);
regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
- } else {
+ } else if (mtk_wed_get_rx_capa(dev)) {
/* driver set mid ready and only once */
wed_w32(dev, MTK_WED_EXT_INT_MASK1,
MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
@@ -1483,12 +2356,18 @@ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
wed_r32(dev, MTK_WED_EXT_INT_MASK1);
wed_r32(dev, MTK_WED_EXT_INT_MASK2);
+ if (mtk_wed_is_v3_or_greater(dev->hw)) {
+ wed_w32(dev, MTK_WED_EXT_INT_MASK3,
+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
+ wed_r32(dev, MTK_WED_EXT_INT_MASK3);
+ }
+
if (mtk_wed_rro_cfg(dev))
return;
-
}
mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
+ mtk_wed_amsdu_init(dev);
mtk_wed_dma_enable(dev);
dev->running = true;
@@ -1535,6 +2414,7 @@ mtk_wed_attach(struct mtk_wed_device *dev)
dev->irq = hw->irq;
dev->wdma_idx = hw->index;
dev->version = hw->version;
+ dev->hw->pcie_base = mtk_wed_get_pcie_base(dev);
if (hw->eth->dma_dev == hw->eth->dev &&
of_dma_is_coherent(hw->eth->dev->of_node))
@@ -1544,6 +2424,10 @@ mtk_wed_attach(struct mtk_wed_device *dev)
if (ret)
goto out;
+ ret = mtk_wed_amsdu_buffer_alloc(dev);
+ if (ret)
+ goto out;
+
if (mtk_wed_get_rx_capa(dev)) {
ret = mtk_wed_rro_alloc(dev);
if (ret)
@@ -1551,13 +2435,14 @@ mtk_wed_attach(struct mtk_wed_device *dev)
}
mtk_wed_hw_init_early(dev);
- if (hw->version == 1) {
+ if (mtk_wed_is_v1(hw))
regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
BIT(hw->index), 0);
- } else {
+ else
dev->rev_id = wed_r32(dev, MTK_WED_REV_ID);
+
+ if (mtk_wed_get_rx_capa(dev))
ret = mtk_wed_wo_init(hw);
- }
out:
if (ret) {
dev_err(dev->hw->dev, "failed to attach wed device\n");
@@ -1601,6 +2486,23 @@ mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs,
ring->reg_base = MTK_WED_RING_TX(idx);
ring->wpdma = regs;
+ if (mtk_wed_is_v3_or_greater(dev->hw) && idx == 1) {
+ /* reset prefetch index */
+ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
+ MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
+ MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
+
+ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
+ MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
+ MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
+
+ /* reset prefetch FIFO */
+ wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG,
+ MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR |
+ MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR);
+ wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, 0);
+ }
+
/* WED -> WPDMA */
wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
@@ -1619,7 +2521,7 @@ static int
mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
{
struct mtk_wed_ring *ring = &dev->txfree_ring;
- int i, index = dev->hw->version == 1;
+ int i, index = mtk_wed_is_v1(dev->hw);
/*
* For txfree event handling, the same DMA ring is shared between WED
@@ -1675,15 +2577,13 @@ mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs,
static u32
mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
{
- u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+ u32 val, ext_mask;
- if (dev->hw->version == 1)
- ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+ if (mtk_wed_is_v3_or_greater(dev->hw))
+ ext_mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
+ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
else
- ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
- MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
- MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
- MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
+ ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
@@ -1713,19 +2613,20 @@ mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
int mtk_wed_flow_add(int index)
{
struct mtk_wed_hw *hw = hw_list[index];
- int ret;
+ int ret = 0;
- if (!hw || !hw->wed_dev)
- return -ENODEV;
+ mutex_lock(&hw_lock);
- if (hw->num_flows) {
- hw->num_flows++;
- return 0;
+ if (!hw || !hw->wed_dev) {
+ ret = -ENODEV;
+ goto out;
}
- mutex_lock(&hw_lock);
- if (!hw->wed_dev) {
- ret = -ENODEV;
+ if (!hw->wed_dev->wlan.offload_enable)
+ goto out;
+
+ if (hw->num_flows) {
+ hw->num_flows++;
goto out;
}
@@ -1744,14 +2645,15 @@ void mtk_wed_flow_remove(int index)
{
struct mtk_wed_hw *hw = hw_list[index];
- if (!hw)
- return;
+ mutex_lock(&hw_lock);
- if (--hw->num_flows)
- return;
+ if (!hw || !hw->wed_dev)
+ goto out;
- mutex_lock(&hw_lock);
- if (!hw->wed_dev)
+ if (!hw->wed_dev->wlan.offload_disable)
+ goto out;
+
+ if (--hw->num_flows)
goto out;
hw->wed_dev->wlan.offload_disable(hw->wed_dev);
@@ -1842,7 +2744,7 @@ mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev,
{
struct mtk_wed_hw *hw = wed->hw;
- if (hw->version < 2)
+ if (mtk_wed_is_v1(hw))
return -EOPNOTSUPP;
switch (type) {
@@ -1874,6 +2776,10 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
.detach = mtk_wed_detach,
.ppe_check = mtk_wed_ppe_check,
.setup_tc = mtk_wed_setup_tc,
+ .start_hw_rro = mtk_wed_start_hw_rro,
+ .rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup,
+ .msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup,
+ .ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup,
};
struct device_node *eth_np = eth->dev->of_node;
struct platform_device *pdev;
@@ -1916,9 +2822,17 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
hw->wdma = wdma;
hw->index = index;
hw->irq = irq;
- hw->version = mtk_is_netsys_v1(eth) ? 1 : 2;
+ hw->version = eth->soc->version;
- if (hw->version == 1) {
+ switch (hw->version) {
+ case 2:
+ hw->soc = &mt7986_data;
+ break;
+ case 3:
+ hw->soc = &mt7988_data;
+ break;
+ default:
+ case 1:
hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
"mediatek,pcie-mirror");
hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
@@ -1932,6 +2846,8 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
regmap_write(hw->mirror, 0, 0);
regmap_write(hw->mirror, 4, 0);
}
+ hw->soc = &mt7622_data;
+ break;
}
mtk_wed_hw_add_debugfs(hw);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
index 43ab77eaf683..c1f0479d7a71 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
@@ -9,10 +9,29 @@
#include <linux/regmap.h>
#include <linux/netdevice.h>
+#include "mtk_wed_regs.h"
+
struct mtk_eth;
struct mtk_wed_wo;
+struct mtk_wed_soc_data {
+ struct {
+ u32 tx_bm_tkid;
+ u32 wpdma_rx_ring0;
+ u32 reset_idx_tx_mask;
+ u32 reset_idx_rx_mask;
+ } regmap;
+ u32 tx_ring_desc_size;
+ u32 wdma_desc_size;
+};
+
+struct mtk_wed_amsdu {
+ void *txd;
+ dma_addr_t txd_phy;
+};
+
struct mtk_wed_hw {
+ const struct mtk_wed_soc_data *soc;
struct device_node *node;
struct mtk_eth *eth;
struct regmap *regs;
@@ -24,6 +43,8 @@ struct mtk_wed_hw {
struct dentry *debugfs_dir;
struct mtk_wed_device *wed_dev;
struct mtk_wed_wo *wed_wo;
+ struct mtk_wed_amsdu *wed_amsdu;
+ u32 pcie_base;
u32 debugfs_reg;
u32 num_flows;
u8 version;
@@ -37,9 +58,30 @@ struct mtk_wdma_info {
u8 queue;
u16 wcid;
u8 bss;
+ u8 amsdu;
};
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+static inline bool mtk_wed_is_v1(struct mtk_wed_hw *hw)
+{
+ return hw->version == 1;
+}
+
+static inline bool mtk_wed_is_v2(struct mtk_wed_hw *hw)
+{
+ return hw->version == 2;
+}
+
+static inline bool mtk_wed_is_v3(struct mtk_wed_hw *hw)
+{
+ return hw->version == 3;
+}
+
+static inline bool mtk_wed_is_v3_or_greater(struct mtk_wed_hw *hw)
+{
+ return hw->version > 2;
+}
+
static inline void
wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
{
@@ -122,6 +164,21 @@ wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
writel(val, dev->txfree_ring.wpdma + reg);
}
+static inline u32 mtk_wed_get_pcie_base(struct mtk_wed_device *dev)
+{
+ if (!mtk_wed_is_v3_or_greater(dev->hw))
+ return MTK_WED_PCIE_BASE;
+
+ switch (dev->hw->index) {
+ case 1:
+ return MTK_WED_PCIE_BASE1;
+ case 2:
+ return MTK_WED_PCIE_BASE2;
+ default:
+ return MTK_WED_PCIE_BASE0;
+ }
+}
+
void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
void __iomem *wdma, phys_addr_t wdma_phy,
int index);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
index e24afeaea0da..781c691473e1 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
@@ -11,6 +11,7 @@ struct reg_dump {
u16 offset;
u8 type;
u8 base;
+ u32 mask;
};
enum {
@@ -25,6 +26,8 @@ enum {
#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
+#define DUMP_REG_MASK(_reg, _mask) \
+ { #_mask, MTK_##_reg, DUMP_TYPE_WED, 0, MTK_##_mask }
#define DUMP_RING(_prefix, _base, ...) \
{ _prefix " BASE", _base, __VA_ARGS__ }, \
{ _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
@@ -32,6 +35,7 @@ enum {
{ _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
+#define DUMP_WED_MASK(_reg, _mask) DUMP_REG_MASK(_reg, _mask)
#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
@@ -151,7 +155,7 @@ DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
static int
wed_rxinfo_show(struct seq_file *s, void *data)
{
- static const struct reg_dump regs[] = {
+ static const struct reg_dump regs_common[] = {
DUMP_STR("WPDMA RX"),
DUMP_WPDMA_RX_RING(0),
DUMP_WPDMA_RX_RING(1),
@@ -169,7 +173,7 @@ wed_rxinfo_show(struct seq_file *s, void *data)
DUMP_WED_RING(WED_RING_RX_DATA(0)),
DUMP_WED_RING(WED_RING_RX_DATA(1)),
- DUMP_STR("WED RRO"),
+ DUMP_STR("WED WO RRO"),
DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
DUMP_WED(WED_RROQM_MID_MIB),
DUMP_WED(WED_RROQM_MOD_MIB),
@@ -180,17 +184,6 @@ wed_rxinfo_show(struct seq_file *s, void *data)
DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
- DUMP_STR("WED Route QM"),
- DUMP_WED(WED_RTQM_R2H_MIB(0)),
- DUMP_WED(WED_RTQM_R2Q_MIB(0)),
- DUMP_WED(WED_RTQM_Q2H_MIB(0)),
- DUMP_WED(WED_RTQM_R2H_MIB(1)),
- DUMP_WED(WED_RTQM_R2Q_MIB(1)),
- DUMP_WED(WED_RTQM_Q2H_MIB(1)),
- DUMP_WED(WED_RTQM_Q2N_MIB),
- DUMP_WED(WED_RTQM_Q2B_MIB),
- DUMP_WED(WED_RTQM_PFDBK_MIB),
-
DUMP_STR("WED WDMA TX"),
DUMP_WED(WED_WDMA_TX_MIB),
DUMP_WED_RING(WED_WDMA_RING_TX),
@@ -211,6 +204,287 @@ wed_rxinfo_show(struct seq_file *s, void *data)
DUMP_WED(WED_RX_BM_INTF),
DUMP_WED(WED_RX_BM_ERR_STS),
};
+ static const struct reg_dump regs_wed_v2[] = {
+ DUMP_STR("WED Route QM"),
+ DUMP_WED(WED_RTQM_R2H_MIB(0)),
+ DUMP_WED(WED_RTQM_R2Q_MIB(0)),
+ DUMP_WED(WED_RTQM_Q2H_MIB(0)),
+ DUMP_WED(WED_RTQM_R2H_MIB(1)),
+ DUMP_WED(WED_RTQM_R2Q_MIB(1)),
+ DUMP_WED(WED_RTQM_Q2H_MIB(1)),
+ DUMP_WED(WED_RTQM_Q2N_MIB),
+ DUMP_WED(WED_RTQM_Q2B_MIB),
+ DUMP_WED(WED_RTQM_PFDBK_MIB),
+ };
+ static const struct reg_dump regs_wed_v3[] = {
+ DUMP_STR("WED RX RRO DATA"),
+ DUMP_WED_RING(WED_RRO_RX_D_RX(0)),
+ DUMP_WED_RING(WED_RRO_RX_D_RX(1)),
+
+ DUMP_STR("WED RX MSDU PAGE"),
+ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(0)),
+ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(1)),
+ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(2)),
+
+ DUMP_STR("WED RX IND CMD"),
+ DUMP_WED(WED_IND_CMD_RX_CTRL1),
+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL2, WED_IND_CMD_MAX_CNT),
+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_PROC_IDX),
+ DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_DMA_IDX),
+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_MAGIC_CNT),
+ DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_MAGIC_CNT),
+ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0,
+ WED_IND_CMD_PREFETCH_FREE_CNT),
+ DUMP_WED_MASK(WED_RRO_CFG1, WED_RRO_CFG1_PARTICL_SE_ID),
+
+ DUMP_STR("WED ADDR ELEM"),
+ DUMP_WED(WED_ADDR_ELEM_CFG0),
+ DUMP_WED_MASK(WED_ADDR_ELEM_CFG1,
+ WED_ADDR_ELEM_PREFETCH_FREE_CNT),
+
+ DUMP_STR("WED Route QM"),
+ DUMP_WED(WED_RTQM_ENQ_I2Q_DMAD_CNT),
+ DUMP_WED(WED_RTQM_ENQ_I2N_DMAD_CNT),
+ DUMP_WED(WED_RTQM_ENQ_I2Q_PKT_CNT),
+ DUMP_WED(WED_RTQM_ENQ_I2N_PKT_CNT),
+ DUMP_WED(WED_RTQM_ENQ_USED_ENTRY_CNT),
+ DUMP_WED(WED_RTQM_ENQ_ERR_CNT),
+
+ DUMP_WED(WED_RTQM_DEQ_DMAD_CNT),
+ DUMP_WED(WED_RTQM_DEQ_Q2I_DMAD_CNT),
+ DUMP_WED(WED_RTQM_DEQ_PKT_CNT),
+ DUMP_WED(WED_RTQM_DEQ_Q2I_PKT_CNT),
+ DUMP_WED(WED_RTQM_DEQ_USED_PFDBK_CNT),
+ DUMP_WED(WED_RTQM_DEQ_ERR_CNT),
+ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+ if (dev) {
+ dump_wed_regs(s, dev, regs_common, ARRAY_SIZE(regs_common));
+ if (mtk_wed_is_v2(hw))
+ dump_wed_regs(s, dev,
+ regs_wed_v2, ARRAY_SIZE(regs_wed_v2));
+ else
+ dump_wed_regs(s, dev,
+ regs_wed_v3, ARRAY_SIZE(regs_wed_v3));
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wed_rxinfo);
+
+static int
+wed_amsdu_show(struct seq_file *s, void *data)
+{
+ static const struct reg_dump regs[] = {
+ DUMP_STR("WED AMDSU INFO"),
+ DUMP_WED(WED_MON_AMSDU_FIFO_DMAD),
+
+ DUMP_STR("WED AMDSU ENG0 INFO"),
+ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(0)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(0)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENI(0)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENO(0)),
+ DUMP_WED(WED_MON_AMSDU_ENG_MERG(0)),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(0),
+ WED_AMSDU_ENG_MAX_PL_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(0),
+ WED_AMSDU_ENG_MAX_QGPP_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0),
+ WED_AMSDU_ENG_CUR_ENTRY),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0),
+ WED_AMSDU_ENG_MAX_BUF_MERGED),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0),
+ WED_AMSDU_ENG_MAX_MSDU_MERGED),
+
+ DUMP_STR("WED AMDSU ENG1 INFO"),
+ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(1)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(1)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENI(1)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENO(1)),
+ DUMP_WED(WED_MON_AMSDU_ENG_MERG(1)),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(1),
+ WED_AMSDU_ENG_MAX_PL_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(1),
+ WED_AMSDU_ENG_MAX_QGPP_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(1),
+ WED_AMSDU_ENG_CUR_ENTRY),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
+ WED_AMSDU_ENG_MAX_BUF_MERGED),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
+ WED_AMSDU_ENG_MAX_MSDU_MERGED),
+
+ DUMP_STR("WED AMDSU ENG2 INFO"),
+ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(2)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(2)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENI(2)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENO(2)),
+ DUMP_WED(WED_MON_AMSDU_ENG_MERG(2)),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(2),
+ WED_AMSDU_ENG_MAX_PL_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(2),
+ WED_AMSDU_ENG_MAX_QGPP_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
+ WED_AMSDU_ENG_CUR_ENTRY),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
+ WED_AMSDU_ENG_MAX_BUF_MERGED),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
+ WED_AMSDU_ENG_MAX_MSDU_MERGED),
+
+ DUMP_STR("WED AMDSU ENG3 INFO"),
+ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(3)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(3)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENI(3)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENO(3)),
+ DUMP_WED(WED_MON_AMSDU_ENG_MERG(3)),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(3),
+ WED_AMSDU_ENG_MAX_PL_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(3),
+ WED_AMSDU_ENG_MAX_QGPP_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3),
+ WED_AMSDU_ENG_CUR_ENTRY),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3),
+ WED_AMSDU_ENG_MAX_BUF_MERGED),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3),
+ WED_AMSDU_ENG_MAX_MSDU_MERGED),
+
+ DUMP_STR("WED AMDSU ENG4 INFO"),
+ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(4)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(4)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENI(4)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENO(4)),
+ DUMP_WED(WED_MON_AMSDU_ENG_MERG(4)),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(4),
+ WED_AMSDU_ENG_MAX_PL_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(4),
+ WED_AMSDU_ENG_MAX_QGPP_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
+ WED_AMSDU_ENG_CUR_ENTRY),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
+ WED_AMSDU_ENG_MAX_BUF_MERGED),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
+ WED_AMSDU_ENG_MAX_MSDU_MERGED),
+
+ DUMP_STR("WED AMDSU ENG5 INFO"),
+ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(5)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(5)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENI(5)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENO(5)),
+ DUMP_WED(WED_MON_AMSDU_ENG_MERG(5)),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(5),
+ WED_AMSDU_ENG_MAX_PL_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(5),
+ WED_AMSDU_ENG_MAX_QGPP_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5),
+ WED_AMSDU_ENG_CUR_ENTRY),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5),
+ WED_AMSDU_ENG_MAX_BUF_MERGED),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5),
+ WED_AMSDU_ENG_MAX_MSDU_MERGED),
+
+ DUMP_STR("WED AMDSU ENG6 INFO"),
+ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(6)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(6)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENI(6)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENO(6)),
+ DUMP_WED(WED_MON_AMSDU_ENG_MERG(6)),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(6),
+ WED_AMSDU_ENG_MAX_PL_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(6),
+ WED_AMSDU_ENG_MAX_QGPP_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6),
+ WED_AMSDU_ENG_CUR_ENTRY),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6),
+ WED_AMSDU_ENG_MAX_BUF_MERGED),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6),
+ WED_AMSDU_ENG_MAX_MSDU_MERGED),
+
+ DUMP_STR("WED AMDSU ENG7 INFO"),
+ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(7)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(7)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENI(7)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENO(7)),
+ DUMP_WED(WED_MON_AMSDU_ENG_MERG(7)),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(7),
+ WED_AMSDU_ENG_MAX_PL_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(7),
+ WED_AMSDU_ENG_MAX_QGPP_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(7),
+ WED_AMSDU_ENG_CUR_ENTRY),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(7),
+ WED_AMSDU_ENG_MAX_BUF_MERGED),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
+ WED_AMSDU_ENG_MAX_MSDU_MERGED),
+
+ DUMP_STR("WED AMDSU ENG8 INFO"),
+ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(8)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(8)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENI(8)),
+ DUMP_WED(WED_MON_AMSDU_ENG_QENO(8)),
+ DUMP_WED(WED_MON_AMSDU_ENG_MERG(8)),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(8),
+ WED_AMSDU_ENG_MAX_PL_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(8),
+ WED_AMSDU_ENG_MAX_QGPP_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8),
+ WED_AMSDU_ENG_CUR_ENTRY),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8),
+ WED_AMSDU_ENG_MAX_BUF_MERGED),
+ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8),
+ WED_AMSDU_ENG_MAX_MSDU_MERGED),
+
+ DUMP_STR("WED QMEM INFO"),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(0), WED_AMSDU_QMEM_FQ_CNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(0), WED_AMSDU_QMEM_SP_QCNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(1), WED_AMSDU_QMEM_TID0_QCNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(1), WED_AMSDU_QMEM_TID1_QCNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(2), WED_AMSDU_QMEM_TID2_QCNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(2), WED_AMSDU_QMEM_TID3_QCNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(3), WED_AMSDU_QMEM_TID4_QCNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(3), WED_AMSDU_QMEM_TID5_QCNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(4), WED_AMSDU_QMEM_TID6_QCNT),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(4), WED_AMSDU_QMEM_TID7_QCNT),
+
+ DUMP_STR("WED QMEM HEAD INFO"),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(0), WED_AMSDU_QMEM_FQ_HEAD),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(0), WED_AMSDU_QMEM_SP_QHEAD),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(1), WED_AMSDU_QMEM_TID0_QHEAD),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(1), WED_AMSDU_QMEM_TID1_QHEAD),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(2), WED_AMSDU_QMEM_TID2_QHEAD),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(2), WED_AMSDU_QMEM_TID3_QHEAD),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(3), WED_AMSDU_QMEM_TID4_QHEAD),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(3), WED_AMSDU_QMEM_TID5_QHEAD),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(4), WED_AMSDU_QMEM_TID6_QHEAD),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(4), WED_AMSDU_QMEM_TID7_QHEAD),
+
+ DUMP_STR("WED QMEM TAIL INFO"),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(5), WED_AMSDU_QMEM_FQ_TAIL),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(5), WED_AMSDU_QMEM_SP_QTAIL),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(6), WED_AMSDU_QMEM_TID0_QTAIL),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(6), WED_AMSDU_QMEM_TID1_QTAIL),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(7), WED_AMSDU_QMEM_TID2_QTAIL),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(7), WED_AMSDU_QMEM_TID3_QTAIL),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(8), WED_AMSDU_QMEM_TID4_QTAIL),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(8), WED_AMSDU_QMEM_TID5_QTAIL),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(9), WED_AMSDU_QMEM_TID6_QTAIL),
+ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(9), WED_AMSDU_QMEM_TID7_QTAIL),
+
+ DUMP_STR("WED HIFTXD MSDU INFO"),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(1)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(2)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(3)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(4)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(5)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(6)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(7)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(8)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(9)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(10)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(11)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(12)),
+ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(13)),
+ };
struct mtk_wed_hw *hw = s->private;
struct mtk_wed_device *dev = hw->wed_dev;
@@ -219,7 +493,94 @@ wed_rxinfo_show(struct seq_file *s, void *data)
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(wed_rxinfo);
+DEFINE_SHOW_ATTRIBUTE(wed_amsdu);
+
+static int
+wed_rtqm_show(struct seq_file *s, void *data)
+{
+ static const struct reg_dump regs[] = {
+ DUMP_STR("WED Route QM IGRS0(N2H + Recycle)"),
+ DUMP_WED(WED_RTQM_IGRS0_I2HW_DMAD_CNT),
+ DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(0)),
+ DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(1)),
+ DUMP_WED(WED_RTQM_IGRS0_I2HW_PKT_CNT),
+ DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)),
+ DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)),
+ DUMP_WED(WED_RTQM_IGRS0_FDROP_CNT),
+
+ DUMP_STR("WED Route QM IGRS1(Legacy)"),
+ DUMP_WED(WED_RTQM_IGRS1_I2HW_DMAD_CNT),
+ DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(0)),
+ DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(1)),
+ DUMP_WED(WED_RTQM_IGRS1_I2HW_PKT_CNT),
+ DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(0)),
+ DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(1)),
+ DUMP_WED(WED_RTQM_IGRS1_FDROP_CNT),
+
+ DUMP_STR("WED Route QM IGRS2(RRO3.0)"),
+ DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT),
+ DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(0)),
+ DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(1)),
+ DUMP_WED(WED_RTQM_IGRS2_I2HW_PKT_CNT),
+ DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(0)),
+ DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(1)),
+ DUMP_WED(WED_RTQM_IGRS2_FDROP_CNT),
+
+ DUMP_STR("WED Route QM IGRS3(DEBUG)"),
+ DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT),
+ DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(0)),
+ DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(1)),
+ DUMP_WED(WED_RTQM_IGRS3_I2HW_PKT_CNT),
+ DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(0)),
+ DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(1)),
+ DUMP_WED(WED_RTQM_IGRS3_FDROP_CNT),
+ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+ if (dev)
+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wed_rtqm);
+
+static int
+wed_rro_show(struct seq_file *s, void *data)
+{
+ static const struct reg_dump regs[] = {
+ DUMP_STR("RRO/IND CMD CNT"),
+ DUMP_WED(WED_RX_IND_CMD_CNT(1)),
+ DUMP_WED(WED_RX_IND_CMD_CNT(2)),
+ DUMP_WED(WED_RX_IND_CMD_CNT(3)),
+ DUMP_WED(WED_RX_IND_CMD_CNT(4)),
+ DUMP_WED(WED_RX_IND_CMD_CNT(5)),
+ DUMP_WED(WED_RX_IND_CMD_CNT(6)),
+ DUMP_WED(WED_RX_IND_CMD_CNT(7)),
+ DUMP_WED(WED_RX_IND_CMD_CNT(8)),
+ DUMP_WED_MASK(WED_RX_IND_CMD_CNT(9),
+ WED_IND_CMD_MAGIC_CNT_FAIL_CNT),
+
+ DUMP_WED(WED_RX_ADDR_ELEM_CNT(0)),
+ DUMP_WED_MASK(WED_RX_ADDR_ELEM_CNT(1),
+ WED_ADDR_ELEM_SIG_FAIL_CNT),
+ DUMP_WED(WED_RX_MSDU_PG_CNT(1)),
+ DUMP_WED(WED_RX_MSDU_PG_CNT(2)),
+ DUMP_WED(WED_RX_MSDU_PG_CNT(3)),
+ DUMP_WED(WED_RX_MSDU_PG_CNT(4)),
+ DUMP_WED(WED_RX_MSDU_PG_CNT(5)),
+ DUMP_WED_MASK(WED_RX_PN_CHK_CNT,
+ WED_PN_CHK_FAIL_CNT),
+ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+ if (dev)
+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wed_rro);
static int
mtk_wed_reg_set(void *data, u64 val)
@@ -261,7 +622,16 @@ void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
- if (hw->version != 1)
+ if (!mtk_wed_is_v1(hw)) {
debugfs_create_file_unsafe("rxinfo", 0400, dir, hw,
&wed_rxinfo_fops);
+ if (mtk_wed_is_v3_or_greater(hw)) {
+ debugfs_create_file_unsafe("amsdu", 0400, dir, hw,
+ &wed_amsdu_fops);
+ debugfs_create_file_unsafe("rtqm", 0400, dir, hw,
+ &wed_rtqm_fops);
+ debugfs_create_file_unsafe("rro", 0400, dir, hw,
+ &wed_rro_fops);
+ }
+ }
}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
index 071ed3dea860..ea0884186d76 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
@@ -16,14 +16,30 @@
#include "mtk_wed_wo.h"
#include "mtk_wed.h"
-static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg)
+static struct mtk_wed_wo_memory_region mem_region[] = {
+ [MTK_WED_WO_REGION_EMI] = {
+ .name = "wo-emi",
+ },
+ [MTK_WED_WO_REGION_ILM] = {
+ .name = "wo-ilm",
+ },
+ [MTK_WED_WO_REGION_DATA] = {
+ .name = "wo-data",
+ .shared = true,
+ },
+ [MTK_WED_WO_REGION_BOOT] = {
+ .name = "wo-boot",
+ },
+};
+
+static u32 wo_r32(u32 reg)
{
- return readl(wo->boot.addr + reg);
+ return readl(mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
}
-static void wo_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
+static void wo_w32(u32 reg, u32 val)
{
- writel(val, wo->boot.addr + reg);
+ writel(val, mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
}
static struct sk_buff *
@@ -68,6 +84,9 @@ mtk_wed_update_rx_stats(struct mtk_wed_device *wed, struct sk_buff *skb)
struct mtk_wed_wo_rx_stats *stats;
int i;
+ if (!wed->wlan.update_wo_rx_stats)
+ return;
+
if (count * sizeof(*stats) > skb->len - sizeof(u32))
return;
@@ -204,7 +223,7 @@ int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data,
{
struct mtk_wed_wo *wo = dev->hw->wed_wo;
- if (dev->hw->version == 1)
+ if (!mtk_wed_get_rx_capa(dev))
return 0;
if (WARN_ON(!wo))
@@ -215,19 +234,13 @@ int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data,
}
static int
-mtk_wed_get_memory_region(struct mtk_wed_wo *wo,
+mtk_wed_get_memory_region(struct mtk_wed_hw *hw, int index,
struct mtk_wed_wo_memory_region *region)
{
struct reserved_mem *rmem;
struct device_node *np;
- int index;
- index = of_property_match_string(wo->hw->node, "memory-region-names",
- region->name);
- if (index < 0)
- return index;
-
- np = of_parse_phandle(wo->hw->node, "memory-region", index);
+ np = of_parse_phandle(hw->node, "memory-region", index);
if (!np)
return -ENODEV;
@@ -239,14 +252,13 @@ mtk_wed_get_memory_region(struct mtk_wed_wo *wo,
region->phy_addr = rmem->base;
region->size = rmem->size;
- region->addr = devm_ioremap(wo->hw->dev, region->phy_addr, region->size);
+ region->addr = devm_ioremap(hw->dev, region->phy_addr, region->size);
return !region->addr ? -EINVAL : 0;
}
static int
-mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw,
- struct mtk_wed_wo_memory_region *region)
+mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw)
{
const u8 *first_region_ptr, *region_ptr, *trailer_ptr, *ptr = fw->data;
const struct mtk_wed_fw_trailer *trailer;
@@ -259,50 +271,46 @@ mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw,
while (region_ptr < trailer_ptr) {
u32 length;
+ int i;
fw_region = (const struct mtk_wed_fw_region *)region_ptr;
length = le32_to_cpu(fw_region->len);
-
- if (region->phy_addr != le32_to_cpu(fw_region->addr))
+ if (first_region_ptr < ptr + length)
goto next;
- if (region->size < length)
- goto next;
+ for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
+ struct mtk_wed_wo_memory_region *region;
- if (first_region_ptr < ptr + length)
- goto next;
+ region = &mem_region[i];
+ if (region->phy_addr != le32_to_cpu(fw_region->addr))
+ continue;
+
+ if (region->size < length)
+ continue;
- if (region->shared && region->consumed)
- return 0;
+ if (region->shared && region->consumed)
+ break;
- if (!region->shared || !region->consumed) {
- memcpy_toio(region->addr, ptr, length);
- region->consumed = true;
- return 0;
+ if (!region->shared || !region->consumed) {
+ memcpy_toio(region->addr, ptr, length);
+ region->consumed = true;
+ break;
+ }
}
+
+ if (i == ARRAY_SIZE(mem_region))
+ return -EINVAL;
next:
region_ptr += sizeof(*fw_region);
ptr += length;
}
- return -EINVAL;
+ return 0;
}
static int
mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
{
- static struct mtk_wed_wo_memory_region mem_region[] = {
- [MTK_WED_WO_REGION_EMI] = {
- .name = "wo-emi",
- },
- [MTK_WED_WO_REGION_ILM] = {
- .name = "wo-ilm",
- },
- [MTK_WED_WO_REGION_DATA] = {
- .name = "wo-data",
- .shared = true,
- },
- };
const struct mtk_wed_fw_trailer *trailer;
const struct firmware *fw;
const char *fw_name;
@@ -311,25 +319,38 @@ mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
/* load firmware region metadata */
for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
- ret = mtk_wed_get_memory_region(wo, &mem_region[i]);
+ int index = of_property_match_string(wo->hw->node,
+ "memory-region-names",
+ mem_region[i].name);
+ if (index < 0)
+ continue;
+
+ ret = mtk_wed_get_memory_region(wo->hw, index, &mem_region[i]);
if (ret)
return ret;
}
- wo->boot.name = "wo-boot";
- ret = mtk_wed_get_memory_region(wo, &wo->boot);
- if (ret)
- return ret;
-
/* set dummy cr */
wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL,
wo->hw->index + 1);
/* load firmware */
- if (of_device_is_compatible(wo->hw->node, "mediatek,mt7981-wed"))
- fw_name = MT7981_FIRMWARE_WO;
- else
- fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1 : MT7986_FIRMWARE_WO0;
+ switch (wo->hw->version) {
+ case 2:
+ if (of_device_is_compatible(wo->hw->node,
+ "mediatek,mt7981-wed"))
+ fw_name = MT7981_FIRMWARE_WO;
+ else
+ fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1
+ : MT7986_FIRMWARE_WO0;
+ break;
+ case 3:
+ fw_name = wo->hw->index ? MT7988_FIRMWARE_WO1
+ : MT7988_FIRMWARE_WO0;
+ break;
+ default:
+ return -EINVAL;
+ }
ret = request_firmware(&fw, fw_name, wo->hw->dev);
if (ret)
@@ -343,23 +364,22 @@ mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
dev_info(wo->hw->dev, "MTK WED WO Chip ID %02x Region %d\n",
trailer->chip_id, trailer->num_region);
- for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
- ret = mtk_wed_mcu_run_firmware(wo, fw, &mem_region[i]);
- if (ret)
- goto out;
- }
+ ret = mtk_wed_mcu_run_firmware(wo, fw);
+ if (ret)
+ goto out;
/* set the start address */
- boot_cr = wo->hw->index ? MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR
- : MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
- wo_w32(wo, boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16);
+ if (!mtk_wed_is_v3_or_greater(wo->hw) && wo->hw->index)
+ boot_cr = MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR;
+ else
+ boot_cr = MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
+ wo_w32(boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16);
/* wo firmware reset */
- wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
+ wo_w32(MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
- val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
- val |= wo->hw->index ? MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK
- : MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK;
- wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
+ val = wo_r32(MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR) |
+ MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK;
+ wo_w32(MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
out:
release_firmware(fw);
@@ -393,3 +413,5 @@ int mtk_wed_mcu_init(struct mtk_wed_wo *wo)
MODULE_FIRMWARE(MT7981_FIRMWARE_WO);
MODULE_FIRMWARE(MT7986_FIRMWARE_WO0);
MODULE_FIRMWARE(MT7986_FIRMWARE_WO1);
+MODULE_FIRMWARE(MT7988_FIRMWARE_WO0);
+MODULE_FIRMWARE(MT7988_FIRMWARE_WO1);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
index 47ea69feb3b2..c71190924816 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
@@ -13,6 +13,9 @@
#define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
#define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
+#define MTK_WDMA_TXD0_DESC_INFO_DMA_DONE BIT(29)
+#define MTK_WDMA_TXD1_DESC_INFO_DMA_DONE BIT(31)
+
struct mtk_wdma_desc {
__le32 buf0;
__le32 ctrl;
@@ -25,6 +28,8 @@ struct mtk_wdma_desc {
#define MTK_WED_RESET 0x008
#define MTK_WED_RESET_TX_BM BIT(0)
#define MTK_WED_RESET_RX_BM BIT(1)
+#define MTK_WED_RESET_RX_PG_BM BIT(2)
+#define MTK_WED_RESET_RRO_RX_TO_PG BIT(3)
#define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
@@ -37,6 +42,7 @@ struct mtk_wdma_desc {
#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
#define MTK_WED_RESET_RX_RRO_QM BIT(20)
#define MTK_WED_RESET_RX_ROUTE_QM BIT(21)
+#define MTK_WED_RESET_TX_AMSDU BIT(22)
#define MTK_WED_RESET_WED BIT(31)
#define MTK_WED_CTRL 0x00c
@@ -44,6 +50,9 @@ struct mtk_wdma_desc {
#define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1)
#define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2)
#define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3)
+#define MTK_WED_CTRL_WED_RX_IND_CMD_EN BIT(5)
+#define MTK_WED_CTRL_WED_RX_PG_BM_EN BIT(6)
+#define MTK_WED_CTRL_WED_RX_PG_BM_BUSY BIT(7)
#define MTK_WED_CTRL_WED_TX_BM_EN BIT(8)
#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
@@ -54,9 +63,14 @@ struct mtk_wdma_desc {
#define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15)
#define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16)
#define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17)
+#define MTK_WED_CTRL_TX_TKID_ALI_EN BIT(20)
+#define MTK_WED_CTRL_TX_TKID_ALI_BUSY BIT(21)
+#define MTK_WED_CTRL_TX_AMSDU_EN BIT(22)
+#define MTK_WED_CTRL_TX_AMSDU_BUSY BIT(23)
#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
#define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
+#define MTK_WED_CTRL_FLD_MIB_RD_CLR BIT(28)
#define MTK_WED_EXT_INT_STATUS 0x020
#define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0)
@@ -64,8 +78,8 @@ struct mtk_wdma_desc {
#define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
#define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
#define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(10) /* wed v2 */
+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(11) /* wed v2 */
#define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
#define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
#define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
@@ -89,19 +103,26 @@ struct mtk_wdma_desc {
#define MTK_WED_EXT_INT_MASK 0x028
#define MTK_WED_EXT_INT_MASK1 0x02c
#define MTK_WED_EXT_INT_MASK2 0x030
+#define MTK_WED_EXT_INT_MASK3 0x034
#define MTK_WED_STATUS 0x060
#define MTK_WED_STATUS_TX GENMASK(15, 8)
+#define MTK_WED_WPDMA_STATUS 0x068
+#define MTK_WED_WPDMA_STATUS_TX_DRV GENMASK(15, 8)
+
#define MTK_WED_TX_BM_CTRL 0x080
#define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
#define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
+#define MTK_WED_TX_BM_CTRL_LEGACY_EN BIT(26)
+#define MTK_WED_TX_TKID_CTRL_FREE_FORMAT BIT(27)
#define MTK_WED_TX_BM_CTRL_PAUSE BIT(28)
#define MTK_WED_TX_BM_BASE 0x084
+#define MTK_WED_TX_BM_INIT_PTR 0x088
+#define MTK_WED_TX_BM_SW_TAIL_IDX GENMASK(16, 0)
+#define MTK_WED_TX_BM_INIT_SW_TAIL_IDX BIT(16)
-#define MTK_WED_TX_BM_TKID 0x088
-#define MTK_WED_TX_BM_TKID_V2 0x0c8
#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
@@ -124,6 +145,12 @@ struct mtk_wdma_desc {
#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16)
#define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28)
+#define MTK_WED_TX_TKID_INTF 0x0dc
+#define MTK_WED_TX_TKID_INTF_TKFIFO_FDEP GENMASK(25, 16)
+
+#define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3 GENMASK(7, 0)
+#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3 GENMASK(23, 16)
+
#define MTK_WED_TX_TKID_DYN_THR 0x0e0
#define MTK_WED_TX_TKID_DYN_THR_LO GENMASK(6, 0)
#define MTK_WED_TX_TKID_DYN_THR_HI GENMASK(22, 16)
@@ -160,9 +187,6 @@ struct mtk_wdma_desc {
#define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31)
#define MTK_WED_RESET_IDX 0x20c
-#define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
-#define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
-#define MTK_WED_RESET_IDX_RX_V2 GENMASK(7, 6)
#define MTK_WED_RESET_WPDMA_IDX_RX GENMASK(31, 30)
#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
@@ -174,6 +198,7 @@ struct mtk_wdma_desc {
#define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10)
#define MTK_WED_SCR0 0x3c0
+#define MTK_WED_RX1_CTRL2 0x418
#define MTK_WED_WPDMA_INT_TRIGGER 0x504
#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
@@ -204,12 +229,15 @@ struct mtk_wdma_desc {
#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_PKT_PROC BIT(5)
#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC BIT(6)
#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_CRX_SYNC BIT(7)
-#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(18, 16)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(15, 12)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4 BIT(18)
#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNSUPPORT_FMT BIT(19)
-#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UEVENT_PKT_FMT_CHK BIT(20)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK BIT(20)
#define MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR BIT(21)
#define MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP BIT(24)
+#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST BIT(25)
#define MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV BIT(28)
+#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK BIT(30)
#define MTK_WED_WPDMA_RESET_IDX 0x50c
#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
@@ -255,9 +283,10 @@ struct mtk_wdma_desc {
#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
#define MTK_WED_PCIE_INT_CTRL 0x57c
-#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
-#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
#define MTK_WED_PCIE_INT_CTRL_POLL_EN GENMASK(13, 12)
+#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
+#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
+#define MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER BIT(21)
#define MTK_WED_WPDMA_CFG_BASE 0x580
#define MTK_WED_WPDMA_CFG_INT_MASK 0x584
@@ -283,15 +312,30 @@ struct mtk_wdma_desc {
#define MTK_WED_WPDMA_RX_D_RST_IDX 0x760
#define MTK_WED_WPDMA_RX_D_RST_CRX_IDX GENMASK(17, 16)
+#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL BIT(20)
#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24)
#define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
-#define MTK_WED_WPDMA_RX_RING 0x770
#define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4)
#define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4)
#define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c
+#define MTK_WED_WPDMA_RX_D_PREF_CFG 0x7b4
+#define MTK_WED_WPDMA_RX_D_PREF_EN BIT(0)
+#define MTK_WED_WPDMA_RX_D_PREF_BUSY BIT(1)
+#define MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE GENMASK(12, 8)
+#define MTK_WED_WPDMA_RX_D_PREF_LOW_THRES GENMASK(21, 16)
+
+#define MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX 0x7b8
+#define MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR BIT(15)
+
+#define MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX 0x7bc
+
+#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG 0x7c0
+#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR BIT(0)
+#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR BIT(16)
+
#define MTK_WED_WDMA_RING_TX 0x800
#define MTK_WED_WDMA_TX_MIB 0x810
@@ -299,6 +343,20 @@ struct mtk_wdma_desc {
#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
+#define MTK_WED_WDMA_RX_PREF_CFG 0x950
+#define MTK_WED_WDMA_RX_PREF_EN BIT(0)
+#define MTK_WED_WDMA_RX_PREF_BUSY BIT(1)
+#define MTK_WED_WDMA_RX_PREF_BURST_SIZE GENMASK(12, 8)
+#define MTK_WED_WDMA_RX_PREF_LOW_THRES GENMASK(21, 16)
+#define MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR BIT(24)
+#define MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR BIT(25)
+#define MTK_WED_WDMA_RX_PREF_DDONE2_EN BIT(26)
+#define MTK_WED_WDMA_RX_PREF_DDONE2_BUSY BIT(27)
+
+#define MTK_WED_WDMA_RX_PREF_FIFO_CFG 0x95C
+#define MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR BIT(0)
+#define MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR BIT(16)
+
#define MTK_WED_WDMA_GLO_CFG 0xa04
#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
#define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1)
@@ -322,6 +380,7 @@ struct mtk_wdma_desc {
#define MTK_WED_WDMA_RESET_IDX 0xa08
#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
+#define MTK_WED_WDMA_RESET_IDX_RX_ALL BIT(20)
#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
#define MTK_WED_WDMA_INT_CLR 0xa24
@@ -331,6 +390,7 @@ struct mtk_wdma_desc {
#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
#define MTK_WED_WDMA_INT_CTRL 0xa2c
+#define MTK_WED_WDMA_INT_POLL_PRD GENMASK(7, 0)
#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
#define MTK_WED_WDMA_CFG_BASE 0xaa0
@@ -391,9 +451,62 @@ struct mtk_wdma_desc {
#define MTK_WDMA_INT_MASK_RX_DELAY BIT(30)
#define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31)
+#define MTK_WDMA_XDMA_TX_FIFO_CFG 0x238
+#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR BIT(0)
+#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR BIT(4)
+#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR BIT(8)
+#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR BIT(12)
+
+#define MTK_WDMA_XDMA_RX_FIFO_CFG 0x23c
+#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR BIT(0)
+#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR BIT(4)
+#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR BIT(8)
+#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR BIT(12)
+#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR BIT(15)
+#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR BIT(18)
+#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR BIT(21)
+
#define MTK_WDMA_INT_GRP1 0x250
#define MTK_WDMA_INT_GRP2 0x254
+#define MTK_WDMA_PREF_TX_CFG 0x2d0
+#define MTK_WDMA_PREF_TX_CFG_PREF_EN BIT(0)
+#define MTK_WDMA_PREF_TX_CFG_PREF_BUSY BIT(1)
+
+#define MTK_WDMA_PREF_RX_CFG 0x2dc
+#define MTK_WDMA_PREF_RX_CFG_PREF_EN BIT(0)
+#define MTK_WDMA_PREF_RX_CFG_PREF_BUSY BIT(1)
+
+#define MTK_WDMA_PREF_RX_FIFO_CFG 0x2e0
+#define MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR BIT(0)
+#define MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR BIT(16)
+
+#define MTK_WDMA_PREF_TX_FIFO_CFG 0x2d4
+#define MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR BIT(0)
+#define MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR BIT(16)
+
+#define MTK_WDMA_PREF_SIDX_CFG 0x2e4
+#define MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR GENMASK(3, 0)
+#define MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR GENMASK(5, 4)
+
+#define MTK_WDMA_WRBK_TX_CFG 0x300
+#define MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY BIT(0)
+#define MTK_WDMA_WRBK_TX_CFG_WRBK_EN BIT(30)
+
+#define MTK_WDMA_WRBK_TX_FIFO_CFG(_n) (0x304 + (_n) * 0x4)
+#define MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR BIT(0)
+
+#define MTK_WDMA_WRBK_RX_CFG 0x344
+#define MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY BIT(0)
+#define MTK_WDMA_WRBK_RX_CFG_WRBK_EN BIT(30)
+
+#define MTK_WDMA_WRBK_RX_FIFO_CFG(_n) (0x348 + (_n) * 0x4)
+#define MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR BIT(0)
+
+#define MTK_WDMA_WRBK_SIDX_CFG 0x388
+#define MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR GENMASK(3, 0)
+#define MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR GENMASK(5, 4)
+
#define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
#define MTK_PCIE_MIRROR_MAP_EN BIT(0)
#define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
@@ -407,6 +520,32 @@ struct mtk_wdma_desc {
#define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
#define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
+#define MTK_WED_RTQM_RST 0xb04
+
+#define MTK_WED_RTQM_IGRS0_I2HW_DMAD_CNT 0xb1c
+#define MTK_WED_RTQM_IGRS0_I2H_DMAD_CNT(_n) (0xb20 + (_n) * 0x4)
+#define MTK_WED_RTQM_IGRS0_I2HW_PKT_CNT 0xb28
+#define MTK_WED_RTQM_IGRS0_I2H_PKT_CNT(_n) (0xb2c + (_n) * 0x4)
+#define MTK_WED_RTQM_IGRS0_FDROP_CNT 0xb34
+
+#define MTK_WED_RTQM_IGRS1_I2HW_DMAD_CNT 0xb44
+#define MTK_WED_RTQM_IGRS1_I2H_DMAD_CNT(_n) (0xb48 + (_n) * 0x4)
+#define MTK_WED_RTQM_IGRS1_I2HW_PKT_CNT 0xb50
+#define MTK_WED_RTQM_IGRS1_I2H_PKT_CNT(_n) (0xb54 + (_n) * 0x4)
+#define MTK_WED_RTQM_IGRS1_FDROP_CNT 0xb5c
+
+#define MTK_WED_RTQM_IGRS2_I2HW_DMAD_CNT 0xb6c
+#define MTK_WED_RTQM_IGRS2_I2H_DMAD_CNT(_n) (0xb70 + (_n) * 0x4)
+#define MTK_WED_RTQM_IGRS2_I2HW_PKT_CNT 0xb78
+#define MTK_WED_RTQM_IGRS2_I2H_PKT_CNT(_n) (0xb7c + (_n) * 0x4)
+#define MTK_WED_RTQM_IGRS2_FDROP_CNT 0xb84
+
+#define MTK_WED_RTQM_IGRS3_I2HW_DMAD_CNT 0xb94
+#define MTK_WED_RTQM_IGRS3_I2H_DMAD_CNT(_n) (0xb98 + (_n) * 0x4)
+#define MTK_WED_RTQM_IGRS3_I2HW_PKT_CNT 0xba0
+#define MTK_WED_RTQM_IGRS3_I2H_PKT_CNT(_n) (0xba4 + (_n) * 0x4)
+#define MTK_WED_RTQM_IGRS3_FDROP_CNT 0xbac
+
#define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4)
#define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4)
#define MTK_WED_RTQM_Q2N_MIB 0xb80
@@ -415,6 +554,24 @@ struct mtk_wdma_desc {
#define MTK_WED_RTQM_Q2B_MIB 0xb8c
#define MTK_WED_RTQM_PFDBK_MIB 0xb90
+#define MTK_WED_RTQM_ENQ_CFG0 0xbb8
+#define MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT GENMASK(15, 12)
+
+#define MTK_WED_RTQM_FDROP_MIB 0xb84
+#define MTK_WED_RTQM_ENQ_I2Q_DMAD_CNT 0xbbc
+#define MTK_WED_RTQM_ENQ_I2N_DMAD_CNT 0xbc0
+#define MTK_WED_RTQM_ENQ_I2Q_PKT_CNT 0xbc4
+#define MTK_WED_RTQM_ENQ_I2N_PKT_CNT 0xbc8
+#define MTK_WED_RTQM_ENQ_USED_ENTRY_CNT 0xbcc
+#define MTK_WED_RTQM_ENQ_ERR_CNT 0xbd0
+
+#define MTK_WED_RTQM_DEQ_DMAD_CNT 0xbd8
+#define MTK_WED_RTQM_DEQ_Q2I_DMAD_CNT 0xbdc
+#define MTK_WED_RTQM_DEQ_PKT_CNT 0xbe0
+#define MTK_WED_RTQM_DEQ_Q2I_PKT_CNT 0xbe4
+#define MTK_WED_RTQM_DEQ_USED_PFDBK_CNT 0xbe8
+#define MTK_WED_RTQM_DEQ_ERR_CNT 0xbec
+
#define MTK_WED_RROQM_GLO_CFG 0xc04
#define MTK_WED_RROQM_RST_IDX 0xc08
#define MTK_WED_RROQM_RST_IDX_MIOD BIT(0)
@@ -464,7 +621,195 @@ struct mtk_wdma_desc {
#define MTK_WED_RX_BM_INTF 0xd9c
#define MTK_WED_RX_BM_ERR_STS 0xda8
+#define MTK_RRO_IND_CMD_SIGNATURE 0xe00
+#define MTK_RRO_IND_CMD_DMA_IDX GENMASK(11, 0)
+#define MTK_RRO_IND_CMD_MAGIC_CNT GENMASK(30, 28)
+
+#define MTK_WED_IND_CMD_RX_CTRL0 0xe04
+#define MTK_WED_IND_CMD_PROC_IDX GENMASK(11, 0)
+#define MTK_WED_IND_CMD_PREFETCH_FREE_CNT GENMASK(19, 16)
+#define MTK_WED_IND_CMD_MAGIC_CNT GENMASK(30, 28)
+
+#define MTK_WED_IND_CMD_RX_CTRL1 0xe08
+#define MTK_WED_IND_CMD_RX_CTRL2 0xe0c
+#define MTK_WED_IND_CMD_MAX_CNT GENMASK(11, 0)
+#define MTK_WED_IND_CMD_BASE_M GENMASK(19, 16)
+
+#define MTK_WED_RRO_CFG0 0xe10
+#define MTK_WED_RRO_CFG1 0xe14
+#define MTK_WED_RRO_CFG1_MAX_WIN_SZ GENMASK(31, 29)
+#define MTK_WED_RRO_CFG1_ACK_SN_BASE_M GENMASK(19, 16)
+#define MTK_WED_RRO_CFG1_PARTICL_SE_ID GENMASK(11, 0)
+
+#define MTK_WED_ADDR_ELEM_CFG0 0xe18
+#define MTK_WED_ADDR_ELEM_CFG1 0xe1c
+#define MTK_WED_ADDR_ELEM_PREFETCH_FREE_CNT GENMASK(19, 16)
+
+#define MTK_WED_ADDR_ELEM_TBL_CFG 0xe20
+#define MTK_WED_ADDR_ELEM_TBL_OFFSET GENMASK(6, 0)
+#define MTK_WED_ADDR_ELEM_TBL_RD_RDY BIT(28)
+#define MTK_WED_ADDR_ELEM_TBL_WR_RDY BIT(29)
+#define MTK_WED_ADDR_ELEM_TBL_RD BIT(30)
+#define MTK_WED_ADDR_ELEM_TBL_WR BIT(31)
+
+#define MTK_WED_RADDR_ELEM_TBL_WDATA 0xe24
+#define MTK_WED_RADDR_ELEM_TBL_RDATA 0xe28
+
+#define MTK_WED_PN_CHECK_CFG 0xe30
+#define MTK_WED_PN_CHECK_SE_ID GENMASK(11, 0)
+#define MTK_WED_PN_CHECK_RD_RDY BIT(28)
+#define MTK_WED_PN_CHECK_WR_RDY BIT(29)
+#define MTK_WED_PN_CHECK_RD BIT(30)
+#define MTK_WED_PN_CHECK_WR BIT(31)
+
+#define MTK_WED_PN_CHECK_WDATA_M 0xe38
+#define MTK_WED_PN_CHECK_IS_FIRST BIT(17)
+
+#define MTK_WED_RRO_MSDU_PG_RING_CFG(_n) (0xe44 + (_n) * 0x8)
+
+#define MTK_WED_RRO_MSDU_PG_RING2_CFG 0xe58
+#define MTK_WED_RRO_MSDU_PG_DRV_CLR BIT(26)
+#define MTK_WED_RRO_MSDU_PG_DRV_EN BIT(31)
+
+#define MTK_WED_RRO_MSDU_PG_CTRL0(_n) (0xe5c + (_n) * 0xc)
+#define MTK_WED_RRO_MSDU_PG_CTRL1(_n) (0xe60 + (_n) * 0xc)
+#define MTK_WED_RRO_MSDU_PG_CTRL2(_n) (0xe64 + (_n) * 0xc)
+
+#define MTK_WED_RRO_RX_D_RX(_n) (0xe80 + (_n) * 0x10)
+
+#define MTK_WED_RRO_RX_MAGIC_CNT BIT(13)
+
+#define MTK_WED_RRO_RX_D_CFG(_n) (0xea0 + (_n) * 0x4)
+#define MTK_WED_RRO_RX_D_DRV_CLR BIT(26)
+#define MTK_WED_RRO_RX_D_DRV_EN BIT(31)
+
+#define MTK_WED_RRO_PG_BM_RX_DMAM 0xeb0
+#define MTK_WED_RRO_PG_BM_RX_SDL0 GENMASK(13, 0)
+
+#define MTK_WED_RRO_PG_BM_BASE 0xeb4
+#define MTK_WED_RRO_PG_BM_INIT_PTR 0xeb8
+#define MTK_WED_RRO_PG_BM_SW_TAIL_IDX GENMASK(15, 0)
+#define MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX BIT(16)
+
+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX 0xeec
+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN BIT(0)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR BIT(1)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG GENMASK(6, 2)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN BIT(8)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR BIT(9)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG GENMASK(14, 10)
+
+#define MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG 0xef4
+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN BIT(0)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR BIT(1)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG GENMASK(6, 2)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN BIT(8)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR BIT(9)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG GENMASK(14, 10)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN BIT(16)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR BIT(17)
+#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG GENMASK(22, 18)
+
+#define MTK_WED_RRO_RX_HW_STS 0xf00
+#define MTK_WED_RX_IND_CMD_BUSY GENMASK(31, 0)
+
+#define MTK_WED_RX_IND_CMD_CNT0 0xf20
+#define MTK_WED_RX_IND_CMD_DBG_CNT_EN BIT(31)
+
+#define MTK_WED_RX_IND_CMD_CNT(_n) (0xf20 + (_n) * 0x4)
+#define MTK_WED_IND_CMD_MAGIC_CNT_FAIL_CNT GENMASK(15, 0)
+
+#define MTK_WED_RX_ADDR_ELEM_CNT(_n) (0xf48 + (_n) * 0x4)
+#define MTK_WED_ADDR_ELEM_SIG_FAIL_CNT GENMASK(15, 0)
+#define MTK_WED_ADDR_ELEM_FIRST_SIG_FAIL_CNT GENMASK(31, 16)
+#define MTK_WED_ADDR_ELEM_ACKSN_CNT GENMASK(27, 0)
+
+#define MTK_WED_RX_MSDU_PG_CNT(_n) (0xf5c + (_n) * 0x4)
+
+#define MTK_WED_RX_PN_CHK_CNT 0xf70
+#define MTK_WED_PN_CHK_FAIL_CNT GENMASK(15, 0)
+
#define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
#define MTK_WED_PCIE_INT_MASK 0x0
+#define MTK_WED_AMSDU_FIFO 0x1800
+#define MTK_WED_AMSDU_IS_PRIOR0_RING BIT(10)
+
+#define MTK_WED_AMSDU_STA_INFO 0x01810
+#define MTK_WED_AMSDU_STA_INFO_DO_INIT BIT(0)
+#define MTK_WED_AMSDU_STA_INFO_SET_INIT BIT(1)
+
+#define MTK_WED_AMSDU_STA_INFO_INIT 0x01814
+#define MTK_WED_AMSDU_STA_WTBL_HDRT_MODE BIT(0)
+#define MTK_WED_AMSDU_STA_RMVL BIT(1)
+#define MTK_WED_AMSDU_STA_MAX_AMSDU_LEN GENMASK(7, 2)
+#define MTK_WED_AMSDU_STA_MAX_AMSDU_NUM GENMASK(11, 8)
+
+#define MTK_WED_AMSDU_HIFTXD_BASE_L(_n) (0x1980 + (_n) * 0x4)
+
+#define MTK_WED_AMSDU_PSE 0x1910
+#define MTK_WED_AMSDU_PSE_RESET BIT(16)
+
+#define MTK_WED_AMSDU_HIFTXD_CFG 0x1968
+#define MTK_WED_AMSDU_HIFTXD_SRC GENMASK(16, 15)
+
+#define MTK_WED_MON_AMSDU_FIFO_DMAD 0x1a34
+
+#define MTK_WED_MON_AMSDU_ENG_DMAD(_n) (0x1a80 + (_n) * 0x50)
+#define MTK_WED_MON_AMSDU_ENG_QFPL(_n) (0x1a84 + (_n) * 0x50)
+#define MTK_WED_MON_AMSDU_ENG_QENI(_n) (0x1a88 + (_n) * 0x50)
+#define MTK_WED_MON_AMSDU_ENG_QENO(_n) (0x1a8c + (_n) * 0x50)
+#define MTK_WED_MON_AMSDU_ENG_MERG(_n) (0x1a90 + (_n) * 0x50)
+
+#define MTK_WED_MON_AMSDU_ENG_CNT8(_n) (0x1a94 + (_n) * 0x50)
+#define MTK_WED_AMSDU_ENG_MAX_QGPP_CNT GENMASK(10, 0)
+#define MTK_WED_AMSDU_ENG_MAX_PL_CNT GENMASK(27, 16)
+
+#define MTK_WED_MON_AMSDU_ENG_CNT9(_n) (0x1a98 + (_n) * 0x50)
+#define MTK_WED_AMSDU_ENG_CUR_ENTRY GENMASK(10, 0)
+#define MTK_WED_AMSDU_ENG_MAX_BUF_MERGED GENMASK(20, 16)
+#define MTK_WED_AMSDU_ENG_MAX_MSDU_MERGED GENMASK(28, 24)
+
+#define MTK_WED_MON_AMSDU_QMEM_STS1 0x1e04
+
+#define MTK_WED_MON_AMSDU_QMEM_CNT(_n) (0x1e0c + (_n) * 0x4)
+#define MTK_WED_AMSDU_QMEM_FQ_CNT GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_SP_QCNT GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID0_QCNT GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID1_QCNT GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID2_QCNT GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID3_QCNT GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID4_QCNT GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID5_QCNT GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID6_QCNT GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID7_QCNT GENMASK(11, 0)
+
+#define MTK_WED_MON_AMSDU_QMEM_PTR(_n) (0x1e20 + (_n) * 0x4)
+#define MTK_WED_AMSDU_QMEM_FQ_HEAD GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_SP_QHEAD GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID0_QHEAD GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID1_QHEAD GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID2_QHEAD GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID3_QHEAD GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID4_QHEAD GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID5_QHEAD GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID6_QHEAD GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID7_QHEAD GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_FQ_TAIL GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_SP_QTAIL GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID0_QTAIL GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID1_QTAIL GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID2_QTAIL GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID3_QTAIL GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID4_QTAIL GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID5_QTAIL GENMASK(11, 0)
+#define MTK_WED_AMSDU_QMEM_TID6_QTAIL GENMASK(27, 16)
+#define MTK_WED_AMSDU_QMEM_TID7_QTAIL GENMASK(11, 0)
+
+#define MTK_WED_MON_AMSDU_HIFTXD_FETCH_MSDU(_n) (0x1ec4 + (_n) * 0x4)
+
+#define MTK_WED_PCIE_BASE 0x11280000
+#define MTK_WED_PCIE_BASE0 0x11300000
+#define MTK_WED_PCIE_BASE1 0x11310000
+#define MTK_WED_PCIE_BASE2 0x11290000
#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
index 7a1a2a28f1ac..87a67fa3868d 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
@@ -91,6 +91,8 @@ enum mtk_wed_dummy_cr_idx {
#define MT7981_FIRMWARE_WO "mediatek/mt7981_wo.bin"
#define MT7986_FIRMWARE_WO0 "mediatek/mt7986_wo_0.bin"
#define MT7986_FIRMWARE_WO1 "mediatek/mt7986_wo_1.bin"
+#define MT7988_FIRMWARE_WO0 "mediatek/mt7988_wo_0.bin"
+#define MT7988_FIRMWARE_WO1 "mediatek/mt7988_wo_1.bin"
#define MTK_WO_MCU_CFG_LS_BASE 0
#define MTK_WO_MCU_CFG_LS_HW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x000)
@@ -228,7 +230,6 @@ struct mtk_wed_wo_queue {
struct mtk_wed_wo {
struct mtk_wed_hw *hw;
- struct mtk_wed_wo_memory_region boot;
struct mtk_wed_wo_queue q_tx;
struct mtk_wed_wo_queue q_rx;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 332472fe4990..a09b6e05337d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -400,7 +400,7 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
for (ring = 0; ring < priv->rx_ring_num; ring++) {
if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) {
local_bh_disable();
- napi_reschedule(&priv->rx_cq[ring]->napi);
+ napi_schedule(&priv->rx_cq[ring]->napi);
local_bh_enable();
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index fe48d20d6118..0005d9e2c2d6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -1967,7 +1967,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DRIVER_VERSION_TO_FW) {
u8 *dst = (u8 *)(inbox + INIT_HCA_DRIVER_VERSION_OFFSET / 4);
- strncpy(dst, DRV_NAME_FOR_FW, INIT_HCA_DRIVER_VERSION_SZ - 1);
+ strscpy(dst, DRV_NAME_FOR_FW, INIT_HCA_DRIVER_VERSION_SZ);
mlx4_dbg(dev, "Reporting Driver Version to FW: %s\n", dst);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index c4f4de82e29e..685335832a93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -189,3 +189,11 @@ config MLX5_SF_MANAGER
port is managed through devlink. A subfunction supports RDMA, netdevice
and vdpa device. It is similar to a SRIOV VF but it doesn't require
SRIOV support.
+
+config MLX5_DPLL
+ tristate "Mellanox 5th generation network adapters (ConnectX series) DPLL support"
+ depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
+ select DPLL
+ help
+ DPLL support in Mellanox Technologies ConnectX NICs.
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 7e94caca4888..c44870b175f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -128,3 +128,6 @@ mlx5_core-$(CONFIG_MLX5_SF) += sf/vhca_event.o sf/dev/dev.o sf/dev/driver.o irq_
# SF manager
#
mlx5_core-$(CONFIG_MLX5_SF_MANAGER) += sf/cmd.o sf/hw_table.o sf/devlink.o
+
+obj-$(CONFIG_MLX5_DPLL) += mlx5_dpll.o
+mlx5_dpll-y := dpll.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index c22b0ad0c870..f8f0a712c943 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -525,6 +525,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_SAVE_VHCA_STATE:
case MLX5_CMD_OP_LOAD_VHCA_STATE:
case MLX5_CMD_OP_SYNC_CRYPTO:
+ case MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -ENOLINK;
@@ -728,6 +729,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(SAVE_VHCA_STATE);
MLX5_COMMAND_STR_CASE(LOAD_VHCA_STATE);
MLX5_COMMAND_STR_CASE(SYNC_CRYPTO);
+ MLX5_COMMAND_STR_CASE(ALLOW_OTHER_VHCA_ACCESS);
default: return "unknown command opcode";
}
}
@@ -2090,6 +2092,74 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
}
EXPORT_SYMBOL(mlx5_cmd_exec_cb);
+int mlx5_cmd_allow_other_vhca_access(struct mlx5_core_dev *dev,
+ struct mlx5_cmd_allow_other_vhca_access_attr *attr)
+{
+ u32 out[MLX5_ST_SZ_DW(allow_other_vhca_access_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(allow_other_vhca_access_in)] = {};
+ void *key;
+
+ MLX5_SET(allow_other_vhca_access_in,
+ in, opcode, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS);
+ MLX5_SET(allow_other_vhca_access_in,
+ in, object_type_to_be_accessed, attr->obj_type);
+ MLX5_SET(allow_other_vhca_access_in,
+ in, object_id_to_be_accessed, attr->obj_id);
+
+ key = MLX5_ADDR_OF(allow_other_vhca_access_in, in, access_key);
+ memcpy(key, attr->access_key, sizeof(attr->access_key));
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_alias_obj_create(struct mlx5_core_dev *dev,
+ struct mlx5_cmd_alias_obj_create_attr *alias_attr,
+ u32 *obj_id)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_alias_obj_in)] = {};
+ void *param;
+ void *attr;
+ void *key;
+ int ret;
+
+ attr = MLX5_ADDR_OF(create_alias_obj_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr,
+ attr, obj_type, alias_attr->obj_type);
+ param = MLX5_ADDR_OF(general_obj_in_cmd_hdr, in, op_param);
+ MLX5_SET(general_obj_create_param, param, alias_object, 1);
+
+ attr = MLX5_ADDR_OF(create_alias_obj_in, in, alias_ctx);
+ MLX5_SET(alias_context, attr, vhca_id_to_be_accessed, alias_attr->vhca_id);
+ MLX5_SET(alias_context, attr, object_id_to_be_accessed, alias_attr->obj_id);
+
+ key = MLX5_ADDR_OF(alias_context, attr, access_key);
+ memcpy(key, alias_attr->access_key, sizeof(alias_attr->access_key));
+
+ ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (ret)
+ return ret;
+
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ return 0;
+}
+
+int mlx5_cmd_alias_obj_destroy(struct mlx5_core_dev *dev, u32 obj_id,
+ u16 obj_type)
+{
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, obj_type);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
static void destroy_msg_cache(struct mlx5_core_dev *dev)
{
struct cmd_msg_cache *ch;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 7909f378dc93..cf0477f53dc4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -38,8 +38,6 @@
#include "devlink.h"
#include "lag/lag.h"
-/* intf dev list mutex */
-static DEFINE_MUTEX(mlx5_intf_mutex);
static DEFINE_IDA(mlx5_adev_ida);
static bool is_eth_rep_supported(struct mlx5_core_dev *dev)
@@ -206,6 +204,19 @@ static bool is_ib_enabled(struct mlx5_core_dev *dev)
return err ? false : val.vbool;
}
+static bool is_dpll_supported(struct mlx5_core_dev *dev)
+{
+ if (!IS_ENABLED(CONFIG_MLX5_DPLL))
+ return false;
+
+ if (!MLX5_CAP_MCAM_REG2(dev, synce_registers)) {
+ mlx5_core_warn(dev, "Missing SyncE capability\n");
+ return false;
+ }
+
+ return true;
+}
+
enum {
MLX5_INTERFACE_PROTOCOL_ETH,
MLX5_INTERFACE_PROTOCOL_ETH_REP,
@@ -215,6 +226,8 @@ enum {
MLX5_INTERFACE_PROTOCOL_MPIB,
MLX5_INTERFACE_PROTOCOL_VNET,
+
+ MLX5_INTERFACE_PROTOCOL_DPLL,
};
static const struct mlx5_adev_device {
@@ -237,6 +250,8 @@ static const struct mlx5_adev_device {
.is_supported = &is_ib_rep_supported },
[MLX5_INTERFACE_PROTOCOL_MPIB] = { .suffix = "multiport",
.is_supported = &is_mp_supported },
+ [MLX5_INTERFACE_PROTOCOL_DPLL] = { .suffix = "dpll",
+ .is_supported = &is_dpll_supported },
};
int mlx5_adev_idx_alloc(void)
@@ -320,9 +335,9 @@ static void del_adev(struct auxiliary_device *adev)
void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev)
{
- mutex_lock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
- mutex_unlock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
}
bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev)
@@ -338,7 +353,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
int ret = 0, i;
devl_assert_locked(priv_to_devlink(dev));
- mutex_lock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
if (!priv->adev[i]) {
@@ -383,7 +398,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
break;
}
}
- mutex_unlock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
return ret;
}
@@ -396,7 +411,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend)
int i;
devl_assert_locked(priv_to_devlink(dev));
- mutex_lock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
if (!priv->adev[i])
continue;
@@ -426,7 +441,7 @@ skip_suspend:
priv->adev[i] = NULL;
}
priv->flags |= MLX5_PRIV_FLAGS_DETACH;
- mutex_unlock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
}
int mlx5_register_device(struct mlx5_core_dev *dev)
@@ -434,10 +449,10 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
int ret;
devl_assert_locked(priv_to_devlink(dev));
- mutex_lock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
ret = mlx5_rescan_drivers_locked(dev);
- mutex_unlock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
if (ret)
mlx5_unregister_device(dev);
@@ -447,10 +462,10 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
void mlx5_unregister_device(struct mlx5_core_dev *dev)
{
devl_assert_locked(priv_to_devlink(dev));
- mutex_lock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
mlx5_rescan_drivers_locked(dev);
- mutex_unlock(&mlx5_intf_mutex);
+ mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
}
static int add_drivers(struct mlx5_core_dev *dev)
@@ -528,7 +543,6 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
- lockdep_assert_held(&mlx5_intf_mutex);
if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
return 0;
@@ -548,85 +562,3 @@ bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev
return (fsystem_guid && psystem_guid && fsystem_guid == psystem_guid);
}
-
-static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev)
-{
- return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
- (dev->pdev->bus->number << 8) |
- PCI_SLOT(dev->pdev->devfn));
-}
-
-static int _next_phys_dev(struct mlx5_core_dev *mdev,
- const struct mlx5_core_dev *curr)
-{
- if (!mlx5_core_is_pf(mdev))
- return 0;
-
- if (mdev == curr)
- return 0;
-
- if (!mlx5_same_hw_devs(mdev, (struct mlx5_core_dev *)curr) &&
- mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr))
- return 0;
-
- return 1;
-}
-
-static void *pci_get_other_drvdata(struct device *this, struct device *other)
-{
- if (this->driver != other->driver)
- return NULL;
-
- return pci_get_drvdata(to_pci_dev(other));
-}
-
-static int next_phys_dev_lag(struct device *dev, const void *data)
-{
- struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
-
- mdev = pci_get_other_drvdata(this->device, dev);
- if (!mdev)
- return 0;
-
- if (!mlx5_lag_is_supported(mdev))
- return 0;
-
- return _next_phys_dev(mdev, data);
-}
-
-static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
- int (*match)(struct device *dev, const void *data))
-{
- struct device *next;
-
- if (!mlx5_core_is_pf(dev))
- return NULL;
-
- next = bus_find_device(&pci_bus_type, NULL, dev, match);
- if (!next)
- return NULL;
-
- put_device(next);
- return pci_get_drvdata(to_pci_dev(next));
-}
-
-/* Must be called with intf_mutex held */
-struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev)
-{
- lockdep_assert_held(&mlx5_intf_mutex);
- return mlx5_get_next_dev(dev, &next_phys_dev_lag);
-}
-
-void mlx5_dev_list_lock(void)
-{
- mutex_lock(&mlx5_intf_mutex);
-}
-void mlx5_dev_list_unlock(void)
-{
- mutex_unlock(&mlx5_intf_mutex);
-}
-
-int mlx5_dev_list_trylock(void)
-{
- return mutex_trylock(&mlx5_intf_mutex);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index af8460bb257b..3e064234f6fe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -138,7 +138,6 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct pci_dev *pdev = dev->pdev;
- bool sf_dev_allocated;
int ret = 0;
if (mlx5_dev_is_lightweight(dev)) {
@@ -148,16 +147,6 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return 0;
}
- sf_dev_allocated = mlx5_sf_dev_allocated(dev);
- if (sf_dev_allocated) {
- /* Reload results in deleting SF device which further results in
- * unregistering devlink instance while holding devlink_mutext.
- * Hence, do not support reload.
- */
- NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated");
- return -EOPNOTSUPP;
- }
-
if (mlx5_lag_is_active(dev)) {
NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index ad789349c06e..76d27d2ee40c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -889,36 +889,16 @@ int mlx5_fw_tracer_trigger_core_dump_general(struct mlx5_core_dev *dev)
return 0;
}
-static int
+static void
mlx5_devlink_fmsg_fill_trace(struct devlink_fmsg *fmsg,
struct mlx5_fw_trace_data *trace_data)
{
- int err;
-
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_u64_pair_put(fmsg, "timestamp", trace_data->timestamp);
- if (err)
- return err;
-
- err = devlink_fmsg_bool_pair_put(fmsg, "lost", trace_data->lost);
- if (err)
- return err;
-
- err = devlink_fmsg_u8_pair_put(fmsg, "event_id", trace_data->event_id);
- if (err)
- return err;
-
- err = devlink_fmsg_string_pair_put(fmsg, "msg", trace_data->msg);
- if (err)
- return err;
-
- err = devlink_fmsg_obj_nest_end(fmsg);
- if (err)
- return err;
- return 0;
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_u64_pair_put(fmsg, "timestamp", trace_data->timestamp);
+ devlink_fmsg_bool_pair_put(fmsg, "lost", trace_data->lost);
+ devlink_fmsg_u8_pair_put(fmsg, "event_id", trace_data->event_id);
+ devlink_fmsg_string_pair_put(fmsg, "msg", trace_data->msg);
+ devlink_fmsg_obj_nest_end(fmsg);
}
int mlx5_fw_tracer_get_saved_traces_objects(struct mlx5_fw_tracer *tracer,
@@ -927,7 +907,6 @@ int mlx5_fw_tracer_get_saved_traces_objects(struct mlx5_fw_tracer *tracer,
struct mlx5_fw_trace_data *straces = tracer->st_arr.straces;
u32 index, start_index, end_index;
u32 saved_traces_index;
- int err;
if (!straces[0].timestamp)
return -ENOMSG;
@@ -940,22 +919,18 @@ int mlx5_fw_tracer_get_saved_traces_objects(struct mlx5_fw_tracer *tracer,
start_index = 0;
end_index = (saved_traces_index - 1) & (SAVED_TRACES_NUM - 1);
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "dump fw traces");
- if (err)
- goto unlock;
+ devlink_fmsg_arr_pair_nest_start(fmsg, "dump fw traces");
index = start_index;
while (index != end_index) {
- err = mlx5_devlink_fmsg_fill_trace(fmsg, &straces[index]);
- if (err)
- goto unlock;
+ mlx5_devlink_fmsg_fill_trace(fmsg, &straces[index]);
index = (index + 1) & (SAVED_TRACES_NUM - 1);
}
- err = devlink_fmsg_arr_pair_nest_end(fmsg);
-unlock:
+ devlink_fmsg_arr_pair_nest_end(fmsg);
mutex_unlock(&tracer->st_arr.lock);
- return err;
+
+ return 0;
}
static void mlx5_fw_tracer_update_db(struct work_struct *work)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
index e869c65d8e90..c7216e84ef8c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
@@ -13,106 +13,55 @@ struct mlx5_vnic_diag_stats {
__be64 query_vnic_env_out[MLX5_ST_SZ_QW(query_vnic_env_out)];
};
-int mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
- struct devlink_fmsg *fmsg,
- u16 vport_num, bool other_vport)
+void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
+ struct devlink_fmsg *fmsg,
+ u16 vport_num, bool other_vport)
{
u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
struct mlx5_vnic_diag_stats vnic;
- int err;
MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV);
MLX5_SET(query_vnic_env_in, in, vport_number, vport_num);
MLX5_SET(query_vnic_env_in, in, other_vport, !!other_vport);
- err = mlx5_cmd_exec_inout(dev, query_vnic_env, in, &vnic.query_vnic_env_out);
- if (err)
- return err;
+ mlx5_cmd_exec_inout(dev, query_vnic_env, in, &vnic.query_vnic_env_out);
- err = devlink_fmsg_pair_nest_start(fmsg, "vNIC env counters");
- if (err)
- return err;
-
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- return err;
+ devlink_fmsg_pair_nest_start(fmsg, "vNIC env counters");
+ devlink_fmsg_obj_nest_start(fmsg);
if (MLX5_CAP_GEN(dev, vnic_env_queue_counters)) {
- err = devlink_fmsg_u32_pair_put(fmsg, "total_error_queues",
- VNIC_ENV_GET(&vnic, total_error_queues));
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "send_queue_priority_update_flow",
- VNIC_ENV_GET(&vnic,
- send_queue_priority_update_flow));
- if (err)
- return err;
+ devlink_fmsg_u32_pair_put(fmsg, "total_error_queues",
+ VNIC_ENV_GET(&vnic, total_error_queues));
+ devlink_fmsg_u32_pair_put(fmsg, "send_queue_priority_update_flow",
+ VNIC_ENV_GET(&vnic, send_queue_priority_update_flow));
}
-
if (MLX5_CAP_GEN(dev, eq_overrun_count)) {
- err = devlink_fmsg_u32_pair_put(fmsg, "comp_eq_overrun",
- VNIC_ENV_GET(&vnic, comp_eq_overrun));
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "async_eq_overrun",
- VNIC_ENV_GET(&vnic, async_eq_overrun));
- if (err)
- return err;
- }
-
- if (MLX5_CAP_GEN(dev, vnic_env_cq_overrun)) {
- err = devlink_fmsg_u32_pair_put(fmsg, "cq_overrun",
- VNIC_ENV_GET(&vnic, cq_overrun));
- if (err)
- return err;
- }
-
- if (MLX5_CAP_GEN(dev, invalid_command_count)) {
- err = devlink_fmsg_u32_pair_put(fmsg, "invalid_command",
- VNIC_ENV_GET(&vnic, invalid_command));
- if (err)
- return err;
- }
-
- if (MLX5_CAP_GEN(dev, quota_exceeded_count)) {
- err = devlink_fmsg_u32_pair_put(fmsg, "quota_exceeded_command",
- VNIC_ENV_GET(&vnic, quota_exceeded_command));
- if (err)
- return err;
+ devlink_fmsg_u32_pair_put(fmsg, "comp_eq_overrun",
+ VNIC_ENV_GET(&vnic, comp_eq_overrun));
+ devlink_fmsg_u32_pair_put(fmsg, "async_eq_overrun",
+ VNIC_ENV_GET(&vnic, async_eq_overrun));
}
-
- if (MLX5_CAP_GEN(dev, nic_receive_steering_discard)) {
- err = devlink_fmsg_u64_pair_put(fmsg, "nic_receive_steering_discard",
- VNIC_ENV_GET64(&vnic,
- nic_receive_steering_discard));
- if (err)
- return err;
- }
-
+ if (MLX5_CAP_GEN(dev, vnic_env_cq_overrun))
+ devlink_fmsg_u32_pair_put(fmsg, "cq_overrun",
+ VNIC_ENV_GET(&vnic, cq_overrun));
+ if (MLX5_CAP_GEN(dev, invalid_command_count))
+ devlink_fmsg_u32_pair_put(fmsg, "invalid_command",
+ VNIC_ENV_GET(&vnic, invalid_command));
+ if (MLX5_CAP_GEN(dev, quota_exceeded_count))
+ devlink_fmsg_u32_pair_put(fmsg, "quota_exceeded_command",
+ VNIC_ENV_GET(&vnic, quota_exceeded_command));
+ if (MLX5_CAP_GEN(dev, nic_receive_steering_discard))
+ devlink_fmsg_u64_pair_put(fmsg, "nic_receive_steering_discard",
+ VNIC_ENV_GET64(&vnic, nic_receive_steering_discard));
if (MLX5_CAP_GEN(dev, vnic_env_cnt_steering_fail)) {
- err = devlink_fmsg_u64_pair_put(fmsg, "generated_pkt_steering_fail",
- VNIC_ENV_GET64(&vnic,
- generated_pkt_steering_fail));
- if (err)
- return err;
-
- err = devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail",
- VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail));
- if (err)
- return err;
+ devlink_fmsg_u64_pair_put(fmsg, "generated_pkt_steering_fail",
+ VNIC_ENV_GET64(&vnic, generated_pkt_steering_fail));
+ devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail",
+ VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail));
}
- err = devlink_fmsg_obj_nest_end(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_pair_nest_end(fmsg);
- if (err)
- return err;
-
- return 0;
+ devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_pair_nest_end(fmsg);
}
static int mlx5_reporter_vnic_diagnose(struct devlink_health_reporter *reporter,
@@ -121,7 +70,8 @@ static int mlx5_reporter_vnic_diagnose(struct devlink_health_reporter *reporter,
{
struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
- return mlx5_reporter_vnic_diagnose_counters(dev, fmsg, 0, false);
+ mlx5_reporter_vnic_diagnose_counters(dev, fmsg, 0, false);
+ return 0;
}
static const struct devlink_health_reporter_ops mlx5_reporter_vnic_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h
index eba87a39e9b1..fbc31256f7fe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.h
@@ -9,8 +9,8 @@
void mlx5_reporter_vnic_create(struct mlx5_core_dev *dev);
void mlx5_reporter_vnic_destroy(struct mlx5_core_dev *dev);
-int mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
- struct devlink_fmsg *fmsg,
- u16 vport_num, bool other_vport);
+void mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
+ struct devlink_fmsg *fmsg,
+ u16 vport_num, bool other_vport);
#endif /* __MLX5_REPORTER_VNIC_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dpll.c b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
new file mode 100644
index 000000000000..2cd81bb32c66
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dpll.c
@@ -0,0 +1,432 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/dpll.h>
+#include <linux/mlx5/driver.h>
+
+/* This structure represents a reference to DPLL, one is created
+ * per mdev instance.
+ */
+struct mlx5_dpll {
+ struct dpll_device *dpll;
+ struct dpll_pin *dpll_pin;
+ struct mlx5_core_dev *mdev;
+ struct workqueue_struct *wq;
+ struct delayed_work work;
+ struct {
+ bool valid;
+ enum dpll_lock_status lock_status;
+ enum dpll_pin_state pin_state;
+ } last;
+ struct notifier_block mdev_nb;
+ struct net_device *tracking_netdev;
+};
+
+static int mlx5_dpll_clock_id_get(struct mlx5_core_dev *mdev, u64 *clock_id)
+{
+ u32 out[MLX5_ST_SZ_DW(msecq_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(msecq_reg)] = {};
+ int err;
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MSECQ, 0, 0);
+ if (err)
+ return err;
+ *clock_id = MLX5_GET64(msecq_reg, out, local_clock_identity);
+ return 0;
+}
+
+static int
+mlx5_dpll_synce_status_get(struct mlx5_core_dev *mdev,
+ enum mlx5_msees_admin_status *admin_status,
+ enum mlx5_msees_oper_status *oper_status,
+ bool *ho_acq)
+{
+ u32 out[MLX5_ST_SZ_DW(msees_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(msees_reg)] = {};
+ int err;
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MSEES, 0, 0);
+ if (err)
+ return err;
+ if (admin_status)
+ *admin_status = MLX5_GET(msees_reg, out, admin_status);
+ *oper_status = MLX5_GET(msees_reg, out, oper_status);
+ if (ho_acq)
+ *ho_acq = MLX5_GET(msees_reg, out, ho_acq);
+ return 0;
+}
+
+static int
+mlx5_dpll_synce_status_set(struct mlx5_core_dev *mdev,
+ enum mlx5_msees_admin_status admin_status)
+{
+ u32 out[MLX5_ST_SZ_DW(msees_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(msees_reg)] = {};
+
+ MLX5_SET(msees_reg, in, field_select,
+ MLX5_MSEES_FIELD_SELECT_ENABLE |
+ MLX5_MSEES_FIELD_SELECT_ADMIN_STATUS);
+ MLX5_SET(msees_reg, in, admin_status, admin_status);
+ return mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_MSEES, 0, 1);
+}
+
+static enum dpll_lock_status
+mlx5_dpll_lock_status_get(enum mlx5_msees_oper_status oper_status, bool ho_acq)
+{
+ switch (oper_status) {
+ case MLX5_MSEES_OPER_STATUS_SELF_TRACK:
+ fallthrough;
+ case MLX5_MSEES_OPER_STATUS_OTHER_TRACK:
+ return ho_acq ? DPLL_LOCK_STATUS_LOCKED_HO_ACQ :
+ DPLL_LOCK_STATUS_LOCKED;
+ case MLX5_MSEES_OPER_STATUS_HOLDOVER:
+ fallthrough;
+ case MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER:
+ return DPLL_LOCK_STATUS_HOLDOVER;
+ default:
+ return DPLL_LOCK_STATUS_UNLOCKED;
+ }
+}
+
+static enum dpll_pin_state
+mlx5_dpll_pin_state_get(enum mlx5_msees_admin_status admin_status,
+ enum mlx5_msees_oper_status oper_status)
+{
+ return (admin_status == MLX5_MSEES_ADMIN_STATUS_TRACK &&
+ (oper_status == MLX5_MSEES_OPER_STATUS_SELF_TRACK ||
+ oper_status == MLX5_MSEES_OPER_STATUS_OTHER_TRACK)) ?
+ DPLL_PIN_STATE_CONNECTED : DPLL_PIN_STATE_DISCONNECTED;
+}
+
+static int mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll,
+ void *priv,
+ enum dpll_lock_status *status,
+ struct netlink_ext_ack *extack)
+{
+ enum mlx5_msees_oper_status oper_status;
+ struct mlx5_dpll *mdpll = priv;
+ bool ho_acq;
+ int err;
+
+ err = mlx5_dpll_synce_status_get(mdpll->mdev, NULL,
+ &oper_status, &ho_acq);
+ if (err)
+ return err;
+
+ *status = mlx5_dpll_lock_status_get(oper_status, ho_acq);
+ return 0;
+}
+
+static int mlx5_dpll_device_mode_get(const struct dpll_device *dpll,
+ void *priv, enum dpll_mode *mode,
+ struct netlink_ext_ack *extack)
+{
+ *mode = DPLL_MODE_MANUAL;
+ return 0;
+}
+
+static bool mlx5_dpll_device_mode_supported(const struct dpll_device *dpll,
+ void *priv,
+ enum dpll_mode mode,
+ struct netlink_ext_ack *extack)
+{
+ return mode == DPLL_MODE_MANUAL;
+}
+
+static const struct dpll_device_ops mlx5_dpll_device_ops = {
+ .lock_status_get = mlx5_dpll_device_lock_status_get,
+ .mode_get = mlx5_dpll_device_mode_get,
+ .mode_supported = mlx5_dpll_device_mode_supported,
+};
+
+static int mlx5_dpll_pin_direction_get(const struct dpll_pin *pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack)
+{
+ *direction = DPLL_PIN_DIRECTION_INPUT;
+ return 0;
+}
+
+static int mlx5_dpll_state_on_dpll_get(const struct dpll_pin *pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ enum mlx5_msees_admin_status admin_status;
+ enum mlx5_msees_oper_status oper_status;
+ struct mlx5_dpll *mdpll = pin_priv;
+ int err;
+
+ err = mlx5_dpll_synce_status_get(mdpll->mdev, &admin_status,
+ &oper_status, NULL);
+ if (err)
+ return err;
+ *state = mlx5_dpll_pin_state_get(admin_status, oper_status);
+ return 0;
+}
+
+static int mlx5_dpll_state_on_dpll_set(const struct dpll_pin *pin,
+ void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_dpll *mdpll = pin_priv;
+
+ return mlx5_dpll_synce_status_set(mdpll->mdev,
+ state == DPLL_PIN_STATE_CONNECTED ?
+ MLX5_MSEES_ADMIN_STATUS_TRACK :
+ MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING);
+}
+
+static const struct dpll_pin_ops mlx5_dpll_pins_ops = {
+ .direction_get = mlx5_dpll_pin_direction_get,
+ .state_on_dpll_get = mlx5_dpll_state_on_dpll_get,
+ .state_on_dpll_set = mlx5_dpll_state_on_dpll_set,
+};
+
+static const struct dpll_pin_properties mlx5_dpll_pin_properties = {
+ .type = DPLL_PIN_TYPE_SYNCE_ETH_PORT,
+ .capabilities = DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE,
+};
+
+#define MLX5_DPLL_PERIODIC_WORK_INTERVAL 500 /* ms */
+
+static void mlx5_dpll_periodic_work_queue(struct mlx5_dpll *mdpll)
+{
+ queue_delayed_work(mdpll->wq, &mdpll->work,
+ msecs_to_jiffies(MLX5_DPLL_PERIODIC_WORK_INTERVAL));
+}
+
+static void mlx5_dpll_periodic_work(struct work_struct *work)
+{
+ struct mlx5_dpll *mdpll = container_of(work, struct mlx5_dpll,
+ work.work);
+ enum mlx5_msees_admin_status admin_status;
+ enum mlx5_msees_oper_status oper_status;
+ enum dpll_lock_status lock_status;
+ enum dpll_pin_state pin_state;
+ bool ho_acq;
+ int err;
+
+ err = mlx5_dpll_synce_status_get(mdpll->mdev, &admin_status,
+ &oper_status, &ho_acq);
+ if (err)
+ goto err_out;
+ lock_status = mlx5_dpll_lock_status_get(oper_status, ho_acq);
+ pin_state = mlx5_dpll_pin_state_get(admin_status, oper_status);
+
+ if (!mdpll->last.valid)
+ goto invalid_out;
+
+ if (mdpll->last.lock_status != lock_status)
+ dpll_device_change_ntf(mdpll->dpll);
+ if (mdpll->last.pin_state != pin_state)
+ dpll_pin_change_ntf(mdpll->dpll_pin);
+
+invalid_out:
+ mdpll->last.lock_status = lock_status;
+ mdpll->last.pin_state = pin_state;
+ mdpll->last.valid = true;
+err_out:
+ mlx5_dpll_periodic_work_queue(mdpll);
+}
+
+static void mlx5_dpll_netdev_dpll_pin_set(struct mlx5_dpll *mdpll,
+ struct net_device *netdev)
+{
+ if (mdpll->tracking_netdev)
+ return;
+ netdev_dpll_pin_set(netdev, mdpll->dpll_pin);
+ mdpll->tracking_netdev = netdev;
+}
+
+static void mlx5_dpll_netdev_dpll_pin_clear(struct mlx5_dpll *mdpll)
+{
+ if (!mdpll->tracking_netdev)
+ return;
+ netdev_dpll_pin_clear(mdpll->tracking_netdev);
+ mdpll->tracking_netdev = NULL;
+}
+
+static int mlx5_dpll_mdev_notifier_event(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct mlx5_dpll *mdpll = container_of(nb, struct mlx5_dpll, mdev_nb);
+ struct net_device *netdev = data;
+
+ switch (event) {
+ case MLX5_DRIVER_EVENT_UPLINK_NETDEV:
+ if (netdev)
+ mlx5_dpll_netdev_dpll_pin_set(mdpll, netdev);
+ else
+ mlx5_dpll_netdev_dpll_pin_clear(mdpll);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static void mlx5_dpll_mdev_netdev_track(struct mlx5_dpll *mdpll,
+ struct mlx5_core_dev *mdev)
+{
+ mdpll->mdev_nb.notifier_call = mlx5_dpll_mdev_notifier_event;
+ mlx5_blocking_notifier_register(mdev, &mdpll->mdev_nb);
+ mlx5_core_uplink_netdev_event_replay(mdev);
+}
+
+static void mlx5_dpll_mdev_netdev_untrack(struct mlx5_dpll *mdpll,
+ struct mlx5_core_dev *mdev)
+{
+ mlx5_blocking_notifier_unregister(mdev, &mdpll->mdev_nb);
+ mlx5_dpll_netdev_dpll_pin_clear(mdpll);
+}
+
+static int mlx5_dpll_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
+ struct mlx5_core_dev *mdev = edev->mdev;
+ struct mlx5_dpll *mdpll;
+ u64 clock_id;
+ int err;
+
+ err = mlx5_dpll_synce_status_set(mdev,
+ MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING);
+ if (err)
+ return err;
+
+ err = mlx5_dpll_clock_id_get(mdev, &clock_id);
+ if (err)
+ return err;
+
+ mdpll = kzalloc(sizeof(*mdpll), GFP_KERNEL);
+ if (!mdpll)
+ return -ENOMEM;
+ mdpll->mdev = mdev;
+ auxiliary_set_drvdata(adev, mdpll);
+
+ /* Multiple mdev instances might share one DPLL device. */
+ mdpll->dpll = dpll_device_get(clock_id, 0, THIS_MODULE);
+ if (IS_ERR(mdpll->dpll)) {
+ err = PTR_ERR(mdpll->dpll);
+ goto err_free_mdpll;
+ }
+
+ err = dpll_device_register(mdpll->dpll, DPLL_TYPE_EEC,
+ &mlx5_dpll_device_ops, mdpll);
+ if (err)
+ goto err_put_dpll_device;
+
+ /* Multiple mdev instances might share one DPLL pin. */
+ mdpll->dpll_pin = dpll_pin_get(clock_id, mlx5_get_dev_index(mdev),
+ THIS_MODULE, &mlx5_dpll_pin_properties);
+ if (IS_ERR(mdpll->dpll_pin)) {
+ err = PTR_ERR(mdpll->dpll_pin);
+ goto err_unregister_dpll_device;
+ }
+
+ err = dpll_pin_register(mdpll->dpll, mdpll->dpll_pin,
+ &mlx5_dpll_pins_ops, mdpll);
+ if (err)
+ goto err_put_dpll_pin;
+
+ mdpll->wq = create_singlethread_workqueue("mlx5_dpll");
+ if (!mdpll->wq) {
+ err = -ENOMEM;
+ goto err_unregister_dpll_pin;
+ }
+
+ mlx5_dpll_mdev_netdev_track(mdpll, mdev);
+
+ INIT_DELAYED_WORK(&mdpll->work, &mlx5_dpll_periodic_work);
+ mlx5_dpll_periodic_work_queue(mdpll);
+
+ return 0;
+
+err_unregister_dpll_pin:
+ dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin,
+ &mlx5_dpll_pins_ops, mdpll);
+err_put_dpll_pin:
+ dpll_pin_put(mdpll->dpll_pin);
+err_unregister_dpll_device:
+ dpll_device_unregister(mdpll->dpll, &mlx5_dpll_device_ops, mdpll);
+err_put_dpll_device:
+ dpll_device_put(mdpll->dpll);
+err_free_mdpll:
+ kfree(mdpll);
+ return err;
+}
+
+static void mlx5_dpll_remove(struct auxiliary_device *adev)
+{
+ struct mlx5_dpll *mdpll = auxiliary_get_drvdata(adev);
+ struct mlx5_core_dev *mdev = mdpll->mdev;
+
+ cancel_delayed_work(&mdpll->work);
+ mlx5_dpll_mdev_netdev_untrack(mdpll, mdev);
+ destroy_workqueue(mdpll->wq);
+ dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin,
+ &mlx5_dpll_pins_ops, mdpll);
+ dpll_pin_put(mdpll->dpll_pin);
+ dpll_device_unregister(mdpll->dpll, &mlx5_dpll_device_ops, mdpll);
+ dpll_device_put(mdpll->dpll);
+ kfree(mdpll);
+
+ mlx5_dpll_synce_status_set(mdev,
+ MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING);
+}
+
+static int mlx5_dpll_suspend(struct auxiliary_device *adev, pm_message_t state)
+{
+ return 0;
+}
+
+static int mlx5_dpll_resume(struct auxiliary_device *adev)
+{
+ return 0;
+}
+
+static const struct auxiliary_device_id mlx5_dpll_id_table[] = {
+ { .name = MLX5_ADEV_NAME ".dpll", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, mlx5_dpll_id_table);
+
+static struct auxiliary_driver mlx5_dpll_driver = {
+ .name = "dpll",
+ .probe = mlx5_dpll_probe,
+ .remove = mlx5_dpll_remove,
+ .suspend = mlx5_dpll_suspend,
+ .resume = mlx5_dpll_resume,
+ .id_table = mlx5_dpll_id_table,
+};
+
+static int __init mlx5_dpll_init(void)
+{
+ return auxiliary_driver_register(&mlx5_dpll_driver);
+}
+
+static void __exit mlx5_dpll_exit(void)
+{
+ auxiliary_driver_unregister(&mlx5_dpll_driver);
+}
+
+module_init(mlx5_dpll_init);
+module_exit(mlx5_dpll_exit);
+
+MODULE_AUTHOR("Jiri Pirko <jiri@nvidia.com>");
+MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) DPLL driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 86f2690c5e01..b2a5da9739d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -141,7 +141,7 @@ struct page_pool;
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
#define MLX5E_MIN_NUM_CHANNELS 0x1
-#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE / 2)
+#define MLX5E_MAX_NUM_CHANNELS 256
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_TX_XSK_POLL_BUDGET 64
#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
@@ -168,6 +168,13 @@ struct page_pool;
#define mlx5e_state_dereference(priv, p) \
rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock))
+enum mlx5e_devcom_events {
+ MPV_DEVCOM_MASTER_UP,
+ MPV_DEVCOM_MASTER_DOWN,
+ MPV_DEVCOM_IPSEC_MASTER_UP,
+ MPV_DEVCOM_IPSEC_MASTER_DOWN,
+};
+
static inline u8 mlx5e_get_num_lag_ports(struct mlx5_core_dev *mdev)
{
if (mlx5_lag_is_lacp_owner(mdev))
@@ -193,7 +200,8 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
{
return is_kdump_kernel() ?
MLX5E_MIN_NUM_CHANNELS :
- min_t(int, mlx5_comp_vectors_max(mdev), MLX5E_MAX_NUM_CHANNELS);
+ min3(mlx5_comp_vectors_max(mdev), (u32)MLX5E_MAX_NUM_CHANNELS,
+ (u32)(1 << MLX5_CAP_GEN(mdev, log_max_rqt_size)));
}
/* The maximum WQE size can be retrieved by max_wqe_sz_sq in
@@ -936,6 +944,7 @@ struct mlx5e_priv {
struct mlx5e_htb *htb;
struct mlx5e_mqprio_rl *mqprio_rl;
struct dentry *dfs_root;
+ struct mlx5_devcom_comp_dev *devcom;
};
struct mlx5e_dev {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index c6b6e290fd79..0b1ac6e5c890 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -12,11 +12,19 @@ struct mlx5e_dev *mlx5e_create_devlink(struct device *dev,
{
struct mlx5e_dev *mlx5e_dev;
struct devlink *devlink;
+ int err;
devlink = devlink_alloc_ns(&mlx5e_devlink_ops, sizeof(*mlx5e_dev),
devlink_net(priv_to_devlink(mdev)), dev);
if (!devlink)
return ERR_PTR(-ENOMEM);
+
+ err = devl_nested_devlink_set(priv_to_devlink(mdev), devlink);
+ if (err) {
+ devlink_free(devlink);
+ return ERR_PTR(err);
+ }
+
devlink_register(devlink);
return devlink_priv(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index e5a44b0b9616..4d6225e0eec7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -150,7 +150,6 @@ struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
struct dentry *dfs_root);
void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs);
struct mlx5e_vlan_table *mlx5e_fs_get_vlan(struct mlx5e_flow_steering *fs);
-void mlx5e_fs_set_tc(struct mlx5e_flow_steering *fs, struct mlx5e_tc_table *tc);
struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs);
struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs);
struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 6f4e6c34b2a2..81523825faa2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -5,134 +5,59 @@
#include "lib/eq.h"
#include "lib/mlx5.h"
-int mlx5e_health_fmsg_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name)
+void mlx5e_health_fmsg_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name)
{
- int err;
-
- err = devlink_fmsg_pair_nest_start(fmsg, name);
- if (err)
- return err;
-
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- return err;
-
- return 0;
+ devlink_fmsg_pair_nest_start(fmsg, name);
+ devlink_fmsg_obj_nest_start(fmsg);
}
-int mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg)
+void mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg)
{
- int err;
-
- err = devlink_fmsg_obj_nest_end(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_pair_nest_end(fmsg);
- if (err)
- return err;
-
- return 0;
+ devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_pair_nest_end(fmsg);
}
-int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
+void mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
{
u32 out[MLX5_ST_SZ_DW(query_cq_out)] = {};
u8 hw_status;
void *cqc;
- int err;
-
- err = mlx5_core_query_cq(cq->mdev, &cq->mcq, out);
- if (err)
- return err;
+ mlx5_core_query_cq(cq->mdev, &cq->mcq, out);
cqc = MLX5_ADDR_OF(query_cq_out, out, cq_context);
hw_status = MLX5_GET(cqc, cqc, status);
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ");
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "cqn", cq->mcq.cqn);
- if (err)
- return err;
-
- err = devlink_fmsg_u8_pair_put(fmsg, "HW status", hw_status);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "ci", mlx5_cqwq_get_ci(&cq->wq));
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&cq->wq));
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- return 0;
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ");
+ devlink_fmsg_u32_pair_put(fmsg, "cqn", cq->mcq.cqn);
+ devlink_fmsg_u8_pair_put(fmsg, "HW status", hw_status);
+ devlink_fmsg_u32_pair_put(fmsg, "ci", mlx5_cqwq_get_ci(&cq->wq));
+ devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&cq->wq));
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
-int mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
+void mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg)
{
u8 cq_log_stride;
u32 cq_sz;
- int err;
cq_sz = mlx5_cqwq_get_size(&cq->wq);
cq_log_stride = mlx5_cqwq_get_log_stride_size(&cq->wq);
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ");
- if (err)
- return err;
-
- err = devlink_fmsg_u64_pair_put(fmsg, "stride size", BIT(cq_log_stride));
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "size", cq_sz);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- return 0;
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ");
+ devlink_fmsg_u64_pair_put(fmsg, "stride size", BIT(cq_log_stride));
+ devlink_fmsg_u32_pair_put(fmsg, "size", cq_sz);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
-int mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg)
+void mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg)
{
- int err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "EQ");
- if (err)
- return err;
-
- err = devlink_fmsg_u8_pair_put(fmsg, "eqn", eq->core.eqn);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "irqn", eq->core.irqn);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "vecidx", eq->core.vecidx);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "ci", eq->core.cons_index);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "size", eq_get_size(&eq->core));
- if (err)
- return err;
-
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "EQ");
+ devlink_fmsg_u8_pair_put(fmsg, "eqn", eq->core.eqn);
+ devlink_fmsg_u32_pair_put(fmsg, "irqn", eq->core.irqn);
+ devlink_fmsg_u32_pair_put(fmsg, "vecidx", eq->core.vecidx);
+ devlink_fmsg_u32_pair_put(fmsg, "ci", eq->core.cons_index);
+ devlink_fmsg_u32_pair_put(fmsg, "size", eq_get_size(&eq->core));
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
void mlx5e_health_create_reporters(struct mlx5e_priv *priv)
@@ -235,23 +160,19 @@ int mlx5e_health_report(struct mlx5e_priv *priv,
}
#define MLX5_HEALTH_DEVLINK_MAX_SIZE 1024
-static int mlx5e_health_rsc_fmsg_binary(struct devlink_fmsg *fmsg,
- const void *value, u32 value_len)
+static void mlx5e_health_rsc_fmsg_binary(struct devlink_fmsg *fmsg,
+ const void *value, u32 value_len)
{
u32 data_size;
- int err = 0;
u32 offset;
for (offset = 0; offset < value_len; offset += data_size) {
data_size = value_len - offset;
if (data_size > MLX5_HEALTH_DEVLINK_MAX_SIZE)
data_size = MLX5_HEALTH_DEVLINK_MAX_SIZE;
- err = devlink_fmsg_binary_put(fmsg, value + offset, data_size);
- if (err)
- break;
+ devlink_fmsg_binary_put(fmsg, value + offset, data_size);
}
- return err;
}
int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key,
@@ -259,9 +180,8 @@ int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_rsc_dump_cmd *cmd;
+ int cmd_err, err = 0;
struct page *page;
- int cmd_err, err;
- int end_err;
int size;
if (IS_ERR_OR_NULL(mdev->rsc_dump))
@@ -271,9 +191,7 @@ int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key
if (!page)
return -ENOMEM;
- err = devlink_fmsg_binary_pair_nest_start(fmsg, "data");
- if (err)
- goto free_page;
+ devlink_fmsg_binary_pair_nest_start(fmsg, "data");
cmd = mlx5_rsc_dump_cmd_create(mdev, key);
if (IS_ERR(cmd)) {
@@ -288,52 +206,31 @@ int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key
goto destroy_cmd;
}
- err = mlx5e_health_rsc_fmsg_binary(fmsg, page_address(page), size);
- if (err)
- goto destroy_cmd;
-
+ mlx5e_health_rsc_fmsg_binary(fmsg, page_address(page), size);
} while (cmd_err > 0);
destroy_cmd:
mlx5_rsc_dump_cmd_destroy(cmd);
- end_err = devlink_fmsg_binary_pair_nest_end(fmsg);
- if (end_err)
- err = end_err;
+ devlink_fmsg_binary_pair_nest_end(fmsg);
free_page:
__free_page(page);
return err;
}
-int mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
- int queue_idx, char *lbl)
+void mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
+ int queue_idx, char *lbl)
{
struct mlx5_rsc_key key = {};
- int err;
key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
key.index1 = queue_idx;
key.size = PAGE_SIZE;
key.num_of_obj1 = 1;
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, lbl);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "index", queue_idx);
- if (err)
- return err;
-
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- return devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_obj_nest_start(fmsg);
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, lbl);
+ devlink_fmsg_u32_pair_put(fmsg, "index", queue_idx);
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ devlink_fmsg_obj_nest_end(fmsg);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
index 415840c3ef84..84be3dd6f747 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
@@ -20,11 +20,11 @@ void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq);
int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq);
void mlx5e_reporter_tx_ptpsq_unhealthy(struct mlx5e_ptpsq *ptpsq);
-int mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
-int mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
-int mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg);
-int mlx5e_health_fmsg_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name);
-int mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg);
+void mlx5e_health_cq_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
+void mlx5e_health_cq_common_diag_fmsg(struct mlx5e_cq *cq, struct devlink_fmsg *fmsg);
+void mlx5e_health_eq_diag_fmsg(struct mlx5_eq_comp *eq, struct devlink_fmsg *fmsg);
+void mlx5e_health_fmsg_named_obj_nest_start(struct devlink_fmsg *fmsg, char *name);
+void mlx5e_health_fmsg_named_obj_nest_end(struct devlink_fmsg *fmsg);
void mlx5e_reporter_rx_create(struct mlx5e_priv *priv);
void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv);
@@ -54,6 +54,6 @@ void mlx5e_health_destroy_reporters(struct mlx5e_priv *priv);
void mlx5e_health_channels_update(struct mlx5e_priv *priv);
int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key,
struct devlink_fmsg *fmsg);
-int mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
- int queue_idx, char *lbl);
+void mlx5e_health_queue_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
+ int queue_idx, char *lbl);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index e8eea9ffd5eb..fea8c0a5fe89 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -199,78 +199,38 @@ static int mlx5e_rx_reporter_recover(struct devlink_health_reporter *reporter,
mlx5e_health_recover_channels(priv);
}
-static int mlx5e_reporter_icosq_diagnose(struct mlx5e_icosq *icosq, u8 hw_state,
- struct devlink_fmsg *fmsg)
+static void mlx5e_reporter_icosq_diagnose(struct mlx5e_icosq *icosq, u8 hw_state,
+ struct devlink_fmsg *fmsg)
{
- int err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ");
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "sqn", icosq->sqn);
- if (err)
- return err;
-
- err = devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cc);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "pc", icosq->pc);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "WQE size",
- mlx5_wq_cyc_get_size(&icosq->wq));
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ");
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "cqn", icosq->cq.mcq.cqn);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cq.wq.cc);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&icosq->cq.wq));
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ");
+ devlink_fmsg_u32_pair_put(fmsg, "sqn", icosq->sqn);
+ devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state);
+ devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cc);
+ devlink_fmsg_u32_pair_put(fmsg, "pc", icosq->pc);
+ devlink_fmsg_u32_pair_put(fmsg, "WQE size", mlx5_wq_cyc_get_size(&icosq->wq));
+
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "CQ");
+ devlink_fmsg_u32_pair_put(fmsg, "cqn", icosq->cq.mcq.cqn);
+ devlink_fmsg_u32_pair_put(fmsg, "cc", icosq->cq.wq.cc);
+ devlink_fmsg_u32_pair_put(fmsg, "size", mlx5_cqwq_get_size(&icosq->cq.wq));
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
-static int mlx5e_health_rq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_rq *rq)
+static void mlx5e_health_rq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_rq *rq)
{
- int err;
int i;
BUILD_BUG_ON_MSG(ARRAY_SIZE(rq_sw_state_type_name) != MLX5E_NUM_RQ_STATES,
"rq_sw_state_type_name string array must be consistent with MLX5E_RQ_STATE_* enum in en.h");
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State");
- if (err)
- return err;
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State");
- for (i = 0; i < ARRAY_SIZE(rq_sw_state_type_name); ++i) {
- err = devlink_fmsg_u32_pair_put(fmsg, rq_sw_state_type_name[i],
- test_bit(i, &rq->state));
- if (err)
- return err;
- }
+ for (i = 0; i < ARRAY_SIZE(rq_sw_state_type_name); ++i)
+ devlink_fmsg_u32_pair_put(fmsg, rq_sw_state_type_name[i],
+ test_bit(i, &rq->state));
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
static int
@@ -291,184 +251,93 @@ mlx5e_rx_reporter_build_diagnose_output_rq_common(struct mlx5e_rq *rq,
wq_head = mlx5e_rqwq_get_head(rq);
wqe_counter = mlx5e_rqwq_get_wqe_counter(rq);
- err = devlink_fmsg_u32_pair_put(fmsg, "rqn", rq->rqn);
- if (err)
- return err;
-
- err = devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "WQE counter", wqe_counter);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "posted WQEs", wqes_sz);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "cc", wq_head);
- if (err)
- return err;
-
- err = mlx5e_health_rq_put_sw_state(fmsg, rq);
- if (err)
- return err;
-
- err = mlx5e_health_cq_diag_fmsg(&rq->cq, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_eq_diag_fmsg(rq->cq.mcq.eq, fmsg);
- if (err)
- return err;
+ devlink_fmsg_u32_pair_put(fmsg, "rqn", rq->rqn);
+ devlink_fmsg_u8_pair_put(fmsg, "HW state", hw_state);
+ devlink_fmsg_u32_pair_put(fmsg, "WQE counter", wqe_counter);
+ devlink_fmsg_u32_pair_put(fmsg, "posted WQEs", wqes_sz);
+ devlink_fmsg_u32_pair_put(fmsg, "cc", wq_head);
+ mlx5e_health_rq_put_sw_state(fmsg, rq);
+ mlx5e_health_cq_diag_fmsg(&rq->cq, fmsg);
+ mlx5e_health_eq_diag_fmsg(rq->cq.mcq.eq, fmsg);
if (rq->icosq) {
struct mlx5e_icosq *icosq = rq->icosq;
u8 icosq_hw_state;
+ int err;
err = mlx5_core_query_sq_state(rq->mdev, icosq->sqn, &icosq_hw_state);
if (err)
return err;
- err = mlx5e_reporter_icosq_diagnose(icosq, icosq_hw_state, fmsg);
- if (err)
- return err;
+ mlx5e_reporter_icosq_diagnose(icosq, icosq_hw_state, fmsg);
}
return 0;
}
-static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
- struct devlink_fmsg *fmsg)
+static void mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
+ struct devlink_fmsg *fmsg)
{
- int err;
-
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->ix);
- if (err)
- return err;
-
- err = mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg);
- if (err)
- return err;
-
- return devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->ix);
+ mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg);
+ devlink_fmsg_obj_nest_end(fmsg);
}
-static int mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq,
- struct devlink_fmsg *fmsg)
+static void mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq,
+ struct devlink_fmsg *fmsg)
{
struct mlx5e_priv *priv = rq->priv;
struct mlx5e_params *params;
u32 rq_stride, rq_sz;
bool real_time;
- int err;
params = &priv->channels.params;
rq_sz = mlx5e_rqwq_get_size(rq);
real_time = mlx5_is_real_time_rq(priv->mdev);
rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL));
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ");
- if (err)
- return err;
-
- err = devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type);
- if (err)
- return err;
-
- err = devlink_fmsg_u64_pair_put(fmsg, "stride size", rq_stride);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "size", rq_sz);
- if (err)
- return err;
-
- err = devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC");
- if (err)
- return err;
-
- err = mlx5e_health_cq_common_diag_fmsg(&rq->cq, fmsg);
- if (err)
- return err;
-
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ");
+ devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type);
+ devlink_fmsg_u64_pair_put(fmsg, "stride size", rq_stride);
+ devlink_fmsg_u32_pair_put(fmsg, "size", rq_sz);
+ devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC");
+ mlx5e_health_cq_common_diag_fmsg(&rq->cq, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
-static int
+static void
mlx5e_rx_reporter_diagnose_common_ptp_config(struct mlx5e_priv *priv, struct mlx5e_ptp *ptp_ch,
struct devlink_fmsg *fmsg)
{
- int err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "filter_type", priv->tstamp.rx_filter);
- if (err)
- return err;
-
- err = mlx5e_rx_reporter_diagnose_generic_rq(&ptp_ch->rq, fmsg);
- if (err)
- return err;
-
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
+ devlink_fmsg_u32_pair_put(fmsg, "filter_type", priv->tstamp.rx_filter);
+ mlx5e_rx_reporter_diagnose_generic_rq(&ptp_ch->rq, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
-static int
+static void
mlx5e_rx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_rq *generic_rq = &priv->channels.c[0]->rq;
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
- int err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common config");
- if (err)
- return err;
-
- err = mlx5e_rx_reporter_diagnose_generic_rq(generic_rq, fmsg);
- if (err)
- return err;
- if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) {
- err = mlx5e_rx_reporter_diagnose_common_ptp_config(priv, ptp_ch, fmsg);
- if (err)
- return err;
- }
-
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common config");
+ mlx5e_rx_reporter_diagnose_generic_rq(generic_rq, fmsg);
+ if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state))
+ mlx5e_rx_reporter_diagnose_common_ptp_config(priv, ptp_ch, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
-static int mlx5e_rx_reporter_build_diagnose_output_ptp_rq(struct mlx5e_rq *rq,
- struct devlink_fmsg *fmsg)
+static void mlx5e_rx_reporter_build_diagnose_output_ptp_rq(struct mlx5e_rq *rq,
+ struct devlink_fmsg *fmsg)
{
- int err;
-
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_string_pair_put(fmsg, "channel", "ptp");
- if (err)
- return err;
-
- err = mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_obj_nest_end(fmsg);
- if (err)
- return err;
-
- return 0;
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_string_pair_put(fmsg, "channel", "ptp");
+ mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg);
+ devlink_fmsg_obj_nest_end(fmsg);
}
static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
@@ -477,20 +346,15 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
- int i, err = 0;
+ int i;
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
- err = mlx5e_rx_reporter_diagnose_common_config(reporter, fmsg);
- if (err)
- goto unlock;
-
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
- if (err)
- goto unlock;
+ mlx5e_rx_reporter_diagnose_common_config(reporter, fmsg);
+ devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
for (i = 0; i < priv->channels.num; i++) {
struct mlx5e_channel *c = priv->channels.c[i];
@@ -499,19 +363,14 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
rq = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state) ?
&c->xskrq : &c->rq;
- err = mlx5e_rx_reporter_build_diagnose_output(rq, fmsg);
- if (err)
- goto unlock;
- }
- if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) {
- err = mlx5e_rx_reporter_build_diagnose_output_ptp_rq(&ptp_ch->rq, fmsg);
- if (err)
- goto unlock;
+ mlx5e_rx_reporter_build_diagnose_output(rq, fmsg);
}
- err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state))
+ mlx5e_rx_reporter_build_diagnose_output_ptp_rq(&ptp_ch->rq, fmsg);
+ devlink_fmsg_arr_pair_nest_end(fmsg);
unlock:
mutex_unlock(&priv->state_lock);
- return err;
+ return 0;
}
static int mlx5e_rx_reporter_dump_icosq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
@@ -519,61 +378,34 @@ static int mlx5e_rx_reporter_dump_icosq(struct mlx5e_priv *priv, struct devlink_
{
struct mlx5e_txqsq *icosq = ctx;
struct mlx5_rsc_key key = {};
- int err;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
- if (err)
- return err;
-
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
key.size = PAGE_SIZE;
key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ");
- if (err)
- return err;
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
- if (err)
- return err;
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "ICOSQ");
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
key.index1 = icosq->sqn;
key.num_of_obj1 = 1;
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff");
- if (err)
- return err;
-
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff");
key.rsc = MLX5_SGMT_TYPE_SND_BUFF;
key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ return 0;
}
static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
@@ -581,60 +413,34 @@ static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fms
{
struct mlx5_rsc_key key = {};
struct mlx5e_rq *rq = ctx;
- int err;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice");
- if (err)
- return err;
-
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice");
key.size = PAGE_SIZE;
key.rsc = MLX5_SGMT_TYPE_RX_SLICE_ALL;
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ");
- if (err)
- return err;
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
- if (err)
- return err;
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ");
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
key.index1 = rq->rqn;
key.num_of_obj1 = 1;
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "receive_buff");
- if (err)
- return err;
-
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "receive_buff");
key.rsc = MLX5_SGMT_TYPE_RCV_BUFF;
key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ return 0;
}
static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv,
@@ -642,44 +448,28 @@ static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv,
{
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
struct mlx5_rsc_key key = {};
- int i, err;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice");
- if (err)
- return err;
-
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RX Slice");
key.size = PAGE_SIZE;
key.rsc = MLX5_SGMT_TYPE_RX_SLICE_ALL;
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
- if (err)
- return err;
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ devlink_fmsg_arr_pair_nest_start(fmsg, "RQs");
- for (i = 0; i < priv->channels.num; i++) {
+ for (int i = 0; i < priv->channels.num; i++) {
struct mlx5e_rq *rq = &priv->channels.c[i]->rq;
- err = mlx5e_health_queue_dump(priv, fmsg, rq->rqn, "RQ");
- if (err)
- return err;
+ mlx5e_health_queue_dump(priv, fmsg, rq->rqn, "RQ");
}
- if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) {
- err = mlx5e_health_queue_dump(priv, fmsg, ptp_ch->rq.rqn, "PTP RQ");
- if (err)
- return err;
- }
+ if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state))
+ mlx5e_health_queue_dump(priv, fmsg, ptp_ch->rq.rqn, "PTP RQ");
- return devlink_fmsg_arr_pair_nest_end(fmsg);
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+ return 0;
}
static int mlx5e_rx_reporter_dump_from_ctx(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index ff8242f67c54..6b44ddce14e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -50,25 +50,19 @@ static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq)
sq->pc = 0;
}
-static int mlx5e_health_sq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *sq)
+static void mlx5e_health_sq_put_sw_state(struct devlink_fmsg *fmsg, struct mlx5e_txqsq *sq)
{
- int err;
int i;
BUILD_BUG_ON_MSG(ARRAY_SIZE(sq_sw_state_type_name) != MLX5E_NUM_SQ_STATES,
"sq_sw_state_type_name string array must be consistent with MLX5E_SQ_STATE_* enum in en.h");
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State");
- if (err)
- return err;
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SW State");
- for (i = 0; i < ARRAY_SIZE(sq_sw_state_type_name); ++i) {
- err = devlink_fmsg_u32_pair_put(fmsg, sq_sw_state_type_name[i],
- test_bit(i, &sq->state));
- if (err)
- return err;
- }
+ for (i = 0; i < ARRAY_SIZE(sq_sw_state_type_name); ++i)
+ devlink_fmsg_u32_pair_put(fmsg, sq_sw_state_type_name[i],
+ test_bit(i, &sq->state));
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
@@ -220,7 +214,7 @@ static int mlx5e_tx_reporter_recover(struct devlink_health_reporter *reporter,
mlx5e_health_recover_channels(priv);
}
-static int
+static void
mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg,
struct mlx5e_txqsq *sq, int tc)
{
@@ -229,164 +223,71 @@ mlx5e_tx_reporter_build_diagnose_output_sq_common(struct devlink_fmsg *fmsg,
u8 state;
int err;
- err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "tc", tc);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn);
- if (err)
- return err;
-
- err = devlink_fmsg_u8_pair_put(fmsg, "HW state", state);
- if (err)
- return err;
-
- err = devlink_fmsg_bool_pair_put(fmsg, "stopped", stopped);
- if (err)
- return err;
+ devlink_fmsg_u32_pair_put(fmsg, "tc", tc);
+ devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix);
+ devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn);
- err = devlink_fmsg_u32_pair_put(fmsg, "cc", sq->cc);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "pc", sq->pc);
- if (err)
- return err;
-
- err = mlx5e_health_sq_put_sw_state(fmsg, sq);
- if (err)
- return err;
-
- err = mlx5e_health_cq_diag_fmsg(&sq->cq, fmsg);
- if (err)
- return err;
-
- return mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg);
+ err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state);
+ if (!err)
+ devlink_fmsg_u8_pair_put(fmsg, "HW state", state);
+
+ devlink_fmsg_bool_pair_put(fmsg, "stopped", stopped);
+ devlink_fmsg_u32_pair_put(fmsg, "cc", sq->cc);
+ devlink_fmsg_u32_pair_put(fmsg, "pc", sq->pc);
+ mlx5e_health_sq_put_sw_state(fmsg, sq);
+ mlx5e_health_cq_diag_fmsg(&sq->cq, fmsg);
+ mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg);
}
-static int
+static void
mlx5e_tx_reporter_build_diagnose_output(struct devlink_fmsg *fmsg,
struct mlx5e_txqsq *sq, int tc)
{
- int err;
-
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix);
- if (err)
- return err;
-
- err = mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, sq, tc);
- if (err)
- return err;
-
- err = devlink_fmsg_obj_nest_end(fmsg);
- if (err)
- return err;
-
- return 0;
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix);
+ mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, sq, tc);
+ devlink_fmsg_obj_nest_end(fmsg);
}
-static int
+static void
mlx5e_tx_reporter_build_diagnose_output_ptpsq(struct devlink_fmsg *fmsg,
struct mlx5e_ptpsq *ptpsq, int tc)
{
- int err;
-
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_string_pair_put(fmsg, "channel", "ptp");
- if (err)
- return err;
-
- err = mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, &ptpsq->txqsq, tc);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS");
- if (err)
- return err;
-
- err = mlx5e_health_cq_diag_fmsg(&ptpsq->ts_cq, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_obj_nest_end(fmsg);
- if (err)
- return err;
-
- return 0;
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_string_pair_put(fmsg, "channel", "ptp");
+ mlx5e_tx_reporter_build_diagnose_output_sq_common(fmsg, &ptpsq->txqsq, tc);
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS");
+ mlx5e_health_cq_diag_fmsg(&ptpsq->ts_cq, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ devlink_fmsg_obj_nest_end(fmsg);
}
-static int
+static void
mlx5e_tx_reporter_diagnose_generic_txqsq(struct devlink_fmsg *fmsg,
struct mlx5e_txqsq *txqsq)
{
- u32 sq_stride, sq_sz;
- bool real_time;
- int err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
- if (err)
- return err;
-
- real_time = mlx5_is_real_time_sq(txqsq->mdev);
- sq_sz = mlx5_wq_cyc_get_size(&txqsq->wq);
- sq_stride = MLX5_SEND_WQE_BB;
-
- err = devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride);
- if (err)
- return err;
-
- err = devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz);
- if (err)
- return err;
-
- err = devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC");
- if (err)
- return err;
-
- err = mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg);
- if (err)
- return err;
-
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ bool real_time = mlx5_is_real_time_sq(txqsq->mdev);
+ u32 sq_sz = mlx5_wq_cyc_get_size(&txqsq->wq);
+ u32 sq_stride = MLX5_SEND_WQE_BB;
+
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
+ devlink_fmsg_u64_pair_put(fmsg, "stride size", sq_stride);
+ devlink_fmsg_u32_pair_put(fmsg, "size", sq_sz);
+ devlink_fmsg_string_pair_put(fmsg, "ts_format", real_time ? "RT" : "FRC");
+ mlx5e_health_cq_common_diag_fmsg(&txqsq->cq, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
-static int
+static void
mlx5e_tx_reporter_diagnose_generic_tx_port_ts(struct devlink_fmsg *fmsg,
struct mlx5e_ptpsq *ptpsq)
{
- int err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS");
- if (err)
- return err;
-
- err = mlx5e_health_cq_common_diag_fmsg(&ptpsq->ts_cq, fmsg);
- if (err)
- return err;
-
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Port TS");
+ mlx5e_health_cq_common_diag_fmsg(&ptpsq->ts_cq, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
-static int
+static void
mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg)
{
@@ -394,39 +295,20 @@ mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporte
struct mlx5e_txqsq *generic_sq = priv->txq2sq[0];
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
struct mlx5e_ptpsq *generic_ptpsq;
- int err;
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config");
- if (err)
- return err;
-
- err = mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, generic_sq);
- if (err)
- return err;
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common Config");
+ mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, generic_sq);
if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state))
goto out;
generic_ptpsq = &ptp_ch->ptpsq[0];
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
- if (err)
- return err;
-
- err = mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, &generic_ptpsq->txqsq);
- if (err)
- return err;
-
- err = mlx5e_tx_reporter_diagnose_generic_tx_port_ts(fmsg, generic_ptpsq);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
+ mlx5e_tx_reporter_diagnose_generic_txqsq(fmsg, &generic_ptpsq->txqsq);
+ mlx5e_tx_reporter_diagnose_generic_tx_port_ts(fmsg, generic_ptpsq);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
out:
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
@@ -436,20 +318,15 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
- int i, tc, err = 0;
+ int i, tc;
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
- err = mlx5e_tx_reporter_diagnose_common_config(reporter, fmsg);
- if (err)
- goto unlock;
-
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
- if (err)
- goto unlock;
+ mlx5e_tx_reporter_diagnose_common_config(reporter, fmsg);
+ devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
for (i = 0; i < priv->channels.num; i++) {
struct mlx5e_channel *c = priv->channels.c[i];
@@ -457,31 +334,23 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
struct mlx5e_txqsq *sq = &c->sq[tc];
- err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc);
- if (err)
- goto unlock;
+ mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc);
}
}
if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state))
goto close_sqs_nest;
- for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
- err = mlx5e_tx_reporter_build_diagnose_output_ptpsq(fmsg,
- &ptp_ch->ptpsq[tc],
- tc);
- if (err)
- goto unlock;
- }
+ for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++)
+ mlx5e_tx_reporter_build_diagnose_output_ptpsq(fmsg,
+ &ptp_ch->ptpsq[tc],
+ tc);
close_sqs_nest:
- err = devlink_fmsg_arr_pair_nest_end(fmsg);
- if (err)
- goto unlock;
-
+ devlink_fmsg_arr_pair_nest_end(fmsg);
unlock:
mutex_unlock(&priv->state_lock);
- return err;
+ return 0;
}
static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
@@ -489,60 +358,33 @@ static int mlx5e_tx_reporter_dump_sq(struct mlx5e_priv *priv, struct devlink_fms
{
struct mlx5_rsc_key key = {};
struct mlx5e_txqsq *sq = ctx;
- int err;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
- if (err)
- return err;
-
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
key.size = PAGE_SIZE;
key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
- if (err)
- return err;
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SQ");
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "QPC");
key.rsc = MLX5_SGMT_TYPE_FULL_QPC;
key.index1 = sq->sqn;
key.num_of_obj1 = 1;
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff");
- if (err)
- return err;
-
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "send_buff");
key.rsc = MLX5_SGMT_TYPE_SND_BUFF;
key.num_of_obj2 = MLX5_RSC_DUMP_ALL;
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ return 0;
}
static int mlx5e_tx_reporter_timeout_dump(struct mlx5e_priv *priv, struct devlink_fmsg *fmsg,
@@ -567,28 +409,17 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
{
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
struct mlx5_rsc_key key = {};
- int i, tc, err;
+ int i, tc;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
- err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
- if (err)
- return err;
-
+ mlx5e_health_fmsg_named_obj_nest_start(fmsg, "SX Slice");
key.size = PAGE_SIZE;
key.rsc = MLX5_SGMT_TYPE_SX_SLICE_ALL;
- err = mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
- if (err)
- return err;
-
- err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
- if (err)
- return err;
+ mlx5e_health_rsc_fmsg_dump(priv, &key, fmsg);
+ mlx5e_health_fmsg_named_obj_nest_end(fmsg);
+ devlink_fmsg_arr_pair_nest_start(fmsg, "SQs");
for (i = 0; i < priv->channels.num; i++) {
struct mlx5e_channel *c = priv->channels.c[i];
@@ -596,9 +427,7 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
struct mlx5e_txqsq *sq = &c->sq[tc];
- err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "SQ");
- if (err)
- return err;
+ mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "SQ");
}
}
@@ -606,13 +435,12 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) {
struct mlx5e_txqsq *sq = &ptp_ch->ptpsq[tc].txqsq;
- err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "PTP SQ");
- if (err)
- return err;
+ mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "PTP SQ");
}
}
- return devlink_fmsg_arr_pair_nest_end(fmsg);
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+ return 0;
}
static int mlx5e_tx_reporter_dump_from_ctx(struct mlx5e_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
index b915fb29dd2c..7b8ff7a71003 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c
@@ -9,7 +9,7 @@ void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir,
{
unsigned int i;
- for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
+ for (i = 0; i < indir->actual_table_size; i++)
indir->table[i] = i % num_channels;
}
@@ -45,9 +45,9 @@ static int mlx5e_rqt_init(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
}
int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
- bool indir_enabled, u32 init_rqn)
+ bool indir_enabled, u32 init_rqn, u32 indir_table_size)
{
- u16 max_size = indir_enabled ? MLX5E_INDIR_RQT_SIZE : 1;
+ u16 max_size = indir_enabled ? indir_table_size : 1;
return mlx5e_rqt_init(rqt, mdev, max_size, &init_rqn, 1);
}
@@ -68,11 +68,11 @@ static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, unsigned int num_rqns
{
unsigned int i;
- for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
+ for (i = 0; i < indir->actual_table_size; i++) {
unsigned int ix = i;
if (hfunc == ETH_RSS_HASH_XOR)
- ix = mlx5e_bits_invert(ix, ilog2(MLX5E_INDIR_RQT_SIZE));
+ ix = mlx5e_bits_invert(ix, ilog2(indir->actual_table_size));
ix = indir->table[ix];
@@ -94,7 +94,7 @@ int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
u32 *rss_rqns;
int err;
- rss_rqns = kvmalloc_array(MLX5E_INDIR_RQT_SIZE, sizeof(*rss_rqns), GFP_KERNEL);
+ rss_rqns = kvmalloc_array(indir->actual_table_size, sizeof(*rss_rqns), GFP_KERNEL);
if (!rss_rqns)
return -ENOMEM;
@@ -102,13 +102,25 @@ int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
if (err)
goto out;
- err = mlx5e_rqt_init(rqt, mdev, MLX5E_INDIR_RQT_SIZE, rss_rqns, MLX5E_INDIR_RQT_SIZE);
+ err = mlx5e_rqt_init(rqt, mdev, indir->max_table_size, rss_rqns,
+ indir->actual_table_size);
out:
kvfree(rss_rqns);
return err;
}
+#define MLX5E_UNIFORM_SPREAD_RQT_FACTOR 2
+
+u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels)
+{
+ u32 rqt_size = max_t(u32, MLX5E_INDIR_MIN_RQT_SIZE,
+ roundup_pow_of_two(num_channels * MLX5E_UNIFORM_SPREAD_RQT_FACTOR));
+ u32 max_cap_rqt_size = 1 << MLX5_CAP_GEN(mdev, log_max_rqt_size);
+
+ return min_t(u32, rqt_size, max_cap_rqt_size);
+}
+
void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt)
{
mlx5_core_destroy_rqt(rqt->mdev, rqt->rqtn);
@@ -151,10 +163,10 @@ int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_
u32 *rss_rqns;
int err;
- if (WARN_ON(rqt->size != MLX5E_INDIR_RQT_SIZE))
+ if (WARN_ON(rqt->size != indir->max_table_size))
return -EINVAL;
- rss_rqns = kvmalloc_array(MLX5E_INDIR_RQT_SIZE, sizeof(*rss_rqns), GFP_KERNEL);
+ rss_rqns = kvmalloc_array(indir->actual_table_size, sizeof(*rss_rqns), GFP_KERNEL);
if (!rss_rqns)
return -ENOMEM;
@@ -162,7 +174,7 @@ int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_
if (err)
goto out;
- err = mlx5e_rqt_redirect(rqt, rss_rqns, MLX5E_INDIR_RQT_SIZE);
+ err = mlx5e_rqt_redirect(rqt, rss_rqns, indir->actual_table_size);
out:
kvfree(rss_rqns);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
index 60c985a12f24..77fba3ebd18d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h
@@ -6,12 +6,14 @@
#include <linux/kernel.h>
-#define MLX5E_INDIR_RQT_SIZE (1 << 8)
+#define MLX5E_INDIR_MIN_RQT_SIZE (BIT(8))
struct mlx5_core_dev;
struct mlx5e_rss_params_indir {
- u32 table[MLX5E_INDIR_RQT_SIZE];
+ u32 *table;
+ u32 actual_table_size;
+ u32 max_table_size;
};
void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir,
@@ -24,7 +26,7 @@ struct mlx5e_rqt {
};
int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
- bool indir_enabled, u32 init_rqn);
+ bool indir_enabled, u32 init_rqn, u32 indir_table_size);
int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev,
u32 *rqns, unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir);
@@ -35,6 +37,7 @@ static inline u32 mlx5e_rqt_get_rqtn(struct mlx5e_rqt *rqt)
return rqt->rqtn;
}
+u32 mlx5e_rqt_size(struct mlx5_core_dev *mdev, unsigned int num_channels);
int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn);
int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns,
u8 hfunc, struct mlx5e_rss_params_indir *indir);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
index 7f93426b88b3..c1545a2e8d6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -81,14 +81,75 @@ struct mlx5e_rss {
refcount_t refcnt;
};
-struct mlx5e_rss *mlx5e_rss_alloc(void)
+void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels)
{
- return kvzalloc(sizeof(struct mlx5e_rss), GFP_KERNEL);
+ rss->indir.actual_table_size = mlx5e_rqt_size(rss->mdev, num_channels);
}
-void mlx5e_rss_free(struct mlx5e_rss *rss)
+int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir, struct mlx5_core_dev *mdev,
+ u32 actual_table_size, u32 max_table_size)
{
+ indir->table = kvmalloc_array(max_table_size, sizeof(*indir->table), GFP_KERNEL);
+ if (!indir->table)
+ return -ENOMEM;
+
+ indir->max_table_size = max_table_size;
+ indir->actual_table_size = actual_table_size;
+
+ return 0;
+}
+
+void mlx5e_rss_params_indir_cleanup(struct mlx5e_rss_params_indir *indir)
+{
+ kvfree(indir->table);
+}
+
+static int mlx5e_rss_copy(struct mlx5e_rss *to, const struct mlx5e_rss *from)
+{
+ u32 *dst_indir_table;
+
+ if (to->indir.actual_table_size != from->indir.actual_table_size ||
+ to->indir.max_table_size != from->indir.max_table_size) {
+ mlx5e_rss_warn(to->mdev,
+ "Failed to copy RSS due to size mismatch, src (actual %u, max %u) != dst (actual %u, max %u)\n",
+ from->indir.actual_table_size, from->indir.max_table_size,
+ to->indir.actual_table_size, to->indir.max_table_size);
+ return -EINVAL;
+ }
+
+ dst_indir_table = to->indir.table;
+ *to = *from;
+ to->indir.table = dst_indir_table;
+ memcpy(to->indir.table, from->indir.table,
+ from->indir.actual_table_size * sizeof(*from->indir.table));
+ return 0;
+}
+
+static struct mlx5e_rss *mlx5e_rss_init_copy(const struct mlx5e_rss *from)
+{
+ struct mlx5e_rss *rss;
+ int err;
+
+ rss = kvzalloc(sizeof(*rss), GFP_KERNEL);
+ if (!rss)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5e_rss_params_indir_init(&rss->indir, from->mdev, from->indir.actual_table_size,
+ from->indir.max_table_size);
+ if (err)
+ goto err_free_rss;
+
+ err = mlx5e_rss_copy(rss, from);
+ if (err)
+ goto err_free_indir;
+
+ return rss;
+
+err_free_indir:
+ mlx5e_rss_params_indir_cleanup(&rss->indir);
+err_free_rss:
kvfree(rss);
+ return ERR_PTR(err);
}
static void mlx5e_rss_params_init(struct mlx5e_rss *rss)
@@ -282,28 +343,43 @@ static int mlx5e_rss_update_tirs(struct mlx5e_rss *rss)
return retval;
}
-int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
- bool inner_ft_support, u32 drop_rqn)
+static int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss)
{
- rss->mdev = mdev;
- rss->inner_ft_support = inner_ft_support;
- rss->drop_rqn = drop_rqn;
-
mlx5e_rss_params_init(rss);
refcount_set(&rss->refcnt, 1);
- return mlx5e_rqt_init_direct(&rss->rqt, mdev, true, drop_rqn);
+ return mlx5e_rqt_init_direct(&rss->rqt, rss->mdev, true,
+ rss->drop_rqn, rss->indir.max_table_size);
}
-int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
- bool inner_ft_support, u32 drop_rqn,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param)
+struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn,
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param,
+ enum mlx5e_rss_init_type type, unsigned int nch,
+ unsigned int max_nch)
{
+ struct mlx5e_rss *rss;
int err;
- err = mlx5e_rss_init_no_tirs(rss, mdev, inner_ft_support, drop_rqn);
+ rss = kvzalloc(sizeof(*rss), GFP_KERNEL);
+ if (!rss)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5e_rss_params_indir_init(&rss->indir, mdev,
+ mlx5e_rqt_size(mdev, nch),
+ mlx5e_rqt_size(mdev, max_nch));
+ if (err)
+ goto err_free_rss;
+
+ rss->mdev = mdev;
+ rss->inner_ft_support = inner_ft_support;
+ rss->drop_rqn = drop_rqn;
+
+ err = mlx5e_rss_init_no_tirs(rss);
if (err)
- goto err_out;
+ goto err_free_indir;
+
+ if (type == MLX5E_RSS_INIT_NO_TIRS)
+ goto out;
err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, false);
if (err)
@@ -315,14 +391,18 @@ int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
goto err_destroy_tirs;
}
- return 0;
+out:
+ return rss;
err_destroy_tirs:
mlx5e_rss_destroy_tirs(rss, false);
err_destroy_rqt:
mlx5e_rqt_destroy(&rss->rqt);
-err_out:
- return err;
+err_free_indir:
+ mlx5e_rss_params_indir_cleanup(&rss->indir);
+err_free_rss:
+ kvfree(rss);
+ return ERR_PTR(err);
}
int mlx5e_rss_cleanup(struct mlx5e_rss *rss)
@@ -336,6 +416,8 @@ int mlx5e_rss_cleanup(struct mlx5e_rss *rss)
mlx5e_rss_destroy_tirs(rss, true);
mlx5e_rqt_destroy(&rss->rqt);
+ mlx5e_rss_params_indir_cleanup(&rss->indir);
+ kvfree(rss);
return 0;
}
@@ -470,11 +552,9 @@ inner_tir:
int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc)
{
- unsigned int i;
-
if (indir)
- for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
- indir[i] = rss->indir.table[i];
+ memcpy(indir, rss->indir.table,
+ rss->indir.actual_table_size * sizeof(*rss->indir.table));
if (key)
memcpy(key, rss->hash.toeplitz_hash_key,
@@ -495,11 +575,9 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
struct mlx5e_rss *old_rss;
int err = 0;
- old_rss = mlx5e_rss_alloc();
- if (!old_rss)
- return -ENOMEM;
-
- *old_rss = *rss;
+ old_rss = mlx5e_rss_init_copy(rss);
+ if (IS_ERR(old_rss))
+ return PTR_ERR(old_rss);
if (hfunc && *hfunc != rss->hash.hfunc) {
switch (*hfunc) {
@@ -523,18 +601,16 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
}
if (indir) {
- unsigned int i;
-
changed_indir = true;
- for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
- rss->indir.table[i] = indir[i];
+ memcpy(rss->indir.table, indir,
+ rss->indir.actual_table_size * sizeof(*rss->indir.table));
}
if (changed_indir && rss->enabled) {
err = mlx5e_rss_apply(rss, rqns, num_rqns);
if (err) {
- *rss = *old_rss;
+ mlx5e_rss_copy(rss, old_rss);
goto out;
}
}
@@ -543,7 +619,9 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
mlx5e_rss_update_tirs(rss);
out:
- mlx5e_rss_free(old_rss);
+ mlx5e_rss_params_indir_cleanup(&old_rss->indir);
+ kvfree(old_rss);
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
index c6b216416344..d1d0bc350e92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
@@ -8,18 +8,24 @@
#include "tir.h"
#include "fs.h"
+enum mlx5e_rss_init_type {
+ MLX5E_RSS_INIT_NO_TIRS = 0,
+ MLX5E_RSS_INIT_TIRS
+};
+
struct mlx5e_rss_params_traffic_type
mlx5e_rss_get_default_tt_config(enum mlx5_traffic_types tt);
struct mlx5e_rss;
-struct mlx5e_rss *mlx5e_rss_alloc(void);
-void mlx5e_rss_free(struct mlx5e_rss *rss);
-int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
- bool inner_ft_support, u32 drop_rqn,
- const struct mlx5e_packet_merge_param *init_pkt_merge_param);
-int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
- bool inner_ft_support, u32 drop_rqn);
+int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir, struct mlx5_core_dev *mdev,
+ u32 actual_table_size, u32 max_table_size);
+void mlx5e_rss_params_indir_cleanup(struct mlx5e_rss_params_indir *indir);
+void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels);
+struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn,
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param,
+ enum mlx5e_rss_init_type type, unsigned int nch,
+ unsigned int max_nch);
int mlx5e_rss_cleanup(struct mlx5e_rss *rss);
void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index 56e6b8c7501f..b23e224e3763 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -18,7 +18,7 @@ struct mlx5e_rx_res {
struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS];
bool rss_active;
- u32 rss_rqns[MLX5E_INDIR_RQT_SIZE];
+ u32 *rss_rqns;
unsigned int rss_nch;
struct {
@@ -34,41 +34,42 @@ struct mlx5e_rx_res {
/* API for rx_res_rss_* */
+void mlx5e_rx_res_rss_update_num_channels(struct mlx5e_rx_res *res, u32 nch)
+{
+ int i;
+
+ for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) {
+ if (res->rss[i])
+ mlx5e_rss_params_indir_modify_actual_size(res->rss[i], nch);
+ }
+}
+
static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
unsigned int init_nch)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
struct mlx5e_rss *rss;
- int err;
if (WARN_ON(res->rss[0]))
return -EINVAL;
- rss = mlx5e_rss_alloc();
- if (!rss)
- return -ENOMEM;
-
- err = mlx5e_rss_init(rss, res->mdev, inner_ft_support, res->drop_rqn,
- &res->pkt_merge_param);
- if (err)
- goto err_rss_free;
+ rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn,
+ &res->pkt_merge_param, MLX5E_RSS_INIT_TIRS, init_nch, res->max_nch);
+ if (IS_ERR(rss))
+ return PTR_ERR(rss);
mlx5e_rss_set_indir_uniform(rss, init_nch);
res->rss[0] = rss;
return 0;
-
-err_rss_free:
- mlx5e_rss_free(rss);
- return err;
}
int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
struct mlx5e_rss *rss;
- int err, i;
+ int i;
for (i = 1; i < MLX5E_MAX_NUM_RSS; i++)
if (!res->rss[i])
@@ -77,13 +78,11 @@ int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int i
if (i == MLX5E_MAX_NUM_RSS)
return -ENOSPC;
- rss = mlx5e_rss_alloc();
- if (!rss)
- return -ENOMEM;
-
- err = mlx5e_rss_init_no_tirs(rss, res->mdev, inner_ft_support, res->drop_rqn);
- if (err)
- goto err_rss_free;
+ rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn,
+ &res->pkt_merge_param, MLX5E_RSS_INIT_NO_TIRS, init_nch,
+ res->max_nch);
+ if (IS_ERR(rss))
+ return PTR_ERR(rss);
mlx5e_rss_set_indir_uniform(rss, init_nch);
if (res->rss_active)
@@ -93,10 +92,6 @@ int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int i
*rss_idx = i;
return 0;
-
-err_rss_free:
- mlx5e_rss_free(rss);
- return err;
}
static int __mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx)
@@ -108,7 +103,6 @@ static int __mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx)
if (err)
return err;
- mlx5e_rss_free(rss);
res->rss[rss_idx] = NULL;
return 0;
@@ -284,9 +278,27 @@ struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx)
/* End of API rx_res_rss_* */
-struct mlx5e_rx_res *mlx5e_rx_res_alloc(void)
+static void mlx5e_rx_res_free(struct mlx5e_rx_res *res)
{
- return kvzalloc(sizeof(struct mlx5e_rx_res), GFP_KERNEL);
+ kvfree(res->rss_rqns);
+ kvfree(res);
+}
+
+static struct mlx5e_rx_res *mlx5e_rx_res_alloc(struct mlx5_core_dev *mdev, unsigned int max_nch)
+{
+ struct mlx5e_rx_res *rx_res;
+
+ rx_res = kvzalloc(sizeof(*rx_res), GFP_KERNEL);
+ if (!rx_res)
+ return NULL;
+
+ rx_res->rss_rqns = kvcalloc(max_nch, sizeof(*rx_res->rss_rqns), GFP_KERNEL);
+ if (!rx_res->rss_rqns) {
+ kvfree(rx_res);
+ return NULL;
+ }
+
+ return rx_res;
}
static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res)
@@ -308,7 +320,8 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res)
for (ix = 0; ix < res->max_nch; ix++) {
err = mlx5e_rqt_init_direct(&res->channels[ix].direct_rqt,
- res->mdev, false, res->drop_rqn);
+ res->mdev, false, res->drop_rqn,
+ mlx5e_rqt_size(res->mdev, res->max_nch));
if (err) {
mlx5_core_warn(res->mdev, "Failed to create a direct RQT: err = %d, ix = %u\n",
err, ix);
@@ -362,7 +375,8 @@ static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res)
if (!builder)
return -ENOMEM;
- err = mlx5e_rqt_init_direct(&res->ptp.rqt, res->mdev, false, res->drop_rqn);
+ err = mlx5e_rqt_init_direct(&res->ptp.rqt, res->mdev, false, res->drop_rqn,
+ mlx5e_rqt_size(res->mdev, res->max_nch));
if (err)
goto out;
@@ -404,13 +418,19 @@ static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res)
mlx5e_rqt_destroy(&res->ptp.rqt);
}
-int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
- enum mlx5e_rx_res_features features, unsigned int max_nch,
- u32 drop_rqn, const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- unsigned int init_nch)
+struct mlx5e_rx_res *
+mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features,
+ unsigned int max_nch, u32 drop_rqn,
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param,
+ unsigned int init_nch)
{
+ struct mlx5e_rx_res *res;
int err;
+ res = mlx5e_rx_res_alloc(mdev, max_nch);
+ if (!res)
+ return ERR_PTR(-ENOMEM);
+
res->mdev = mdev;
res->features = features;
res->max_nch = max_nch;
@@ -421,7 +441,7 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
err = mlx5e_rx_res_rss_init_def(res, init_nch);
if (err)
- goto err_out;
+ goto err_rx_res_free;
err = mlx5e_rx_res_channels_init(res);
if (err)
@@ -431,14 +451,15 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
if (err)
goto err_channels_destroy;
- return 0;
+ return res;
err_channels_destroy:
mlx5e_rx_res_channels_destroy(res);
err_rss_destroy:
__mlx5e_rx_res_rss_destroy(res, 0);
-err_out:
- return err;
+err_rx_res_free:
+ mlx5e_rx_res_free(res);
+ return ERR_PTR(err);
}
void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res)
@@ -446,11 +467,7 @@ void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res)
mlx5e_rx_res_ptp_destroy(res);
mlx5e_rx_res_channels_destroy(res);
mlx5e_rx_res_rss_destroy_all(res);
-}
-
-void mlx5e_rx_res_free(struct mlx5e_rx_res *res)
-{
- kvfree(res);
+ mlx5e_rx_res_free(res);
}
u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index 580fe8bc3cd2..82aaba8a82b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -21,13 +21,12 @@ enum mlx5e_rx_res_features {
};
/* Setup */
-struct mlx5e_rx_res *mlx5e_rx_res_alloc(void);
-int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
- enum mlx5e_rx_res_features features, unsigned int max_nch,
- u32 drop_rqn, const struct mlx5e_packet_merge_param *init_pkt_merge_param,
- unsigned int init_nch);
+struct mlx5e_rx_res *
+mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features,
+ unsigned int max_nch, u32 drop_rqn,
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param,
+ unsigned int init_nch);
void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res);
-void mlx5e_rx_res_free(struct mlx5e_rx_res *res);
/* TIRN getters for flow steering */
u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix);
@@ -60,6 +59,7 @@ int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx);
int mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res *res);
int mlx5e_rx_res_rss_index(struct mlx5e_rx_res *res, struct mlx5e_rss *rss);
struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx);
+void mlx5e_rx_res_rss_update_num_channels(struct mlx5e_rx_res *res, u32 nch);
/* Workaround for hairpin */
struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 8bed17d8fe56..7decc81ed33a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -893,7 +893,7 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq)
mlx5e_xmit_xdp_doorbell(xdpsq);
if (test_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags)) {
- xdp_do_flush_map();
+ xdp_do_flush();
__clear_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index 7d4ceb9b9c16..655496598c68 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -56,7 +56,7 @@ static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x)
return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle;
}
-static void mlx5e_ipsec_handle_tx_limit(struct work_struct *_work)
+static void mlx5e_ipsec_handle_sw_limits(struct work_struct *_work)
{
struct mlx5e_ipsec_dwork *dwork =
container_of(_work, struct mlx5e_ipsec_dwork, dwork.work);
@@ -486,9 +486,15 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
return -EINVAL;
}
- if (x->lft.hard_byte_limit != XFRM_INF ||
- x->lft.soft_byte_limit != XFRM_INF) {
- NL_SET_ERR_MSG_MOD(extack, "Device doesn't support limits in bytes");
+ if (x->lft.soft_byte_limit >= x->lft.hard_byte_limit &&
+ x->lft.hard_byte_limit != XFRM_INF) {
+ /* XFRM stack doesn't prevent such configuration :(. */
+ NL_SET_ERR_MSG_MOD(extack, "Hard byte limit must be greater than soft one");
+ return -EINVAL;
+ }
+
+ if (!x->lft.soft_byte_limit || !x->lft.hard_byte_limit) {
+ NL_SET_ERR_MSG_MOD(extack, "Soft/hard byte limits can't be 0");
return -EINVAL;
}
@@ -624,11 +630,10 @@ static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry)
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
return 0;
- if (x->xso.dir != XFRM_DEV_OFFLOAD_OUT)
- return 0;
-
if (x->lft.soft_packet_limit == XFRM_INF &&
- x->lft.hard_packet_limit == XFRM_INF)
+ x->lft.hard_packet_limit == XFRM_INF &&
+ x->lft.soft_byte_limit == XFRM_INF &&
+ x->lft.hard_byte_limit == XFRM_INF)
return 0;
dwork = kzalloc(sizeof(*dwork), GFP_KERNEL);
@@ -636,7 +641,7 @@ static int mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry)
return -ENOMEM;
dwork->sa_entry = sa_entry;
- INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_tx_limit);
+ INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_sw_limits);
sa_entry->dwork = dwork;
return 0;
}
@@ -850,6 +855,7 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv)
xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC);
ipsec->mdev = priv->mdev;
+ init_completion(&ipsec->comp);
ipsec->wq = alloc_workqueue("mlx5e_ipsec: %s", WQ_UNBOUND, 0,
priv->netdev->name);
if (!ipsec->wq)
@@ -870,7 +876,7 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv)
}
ipsec->is_uplink_rep = mlx5e_is_uplink_rep(priv);
- ret = mlx5e_accel_ipsec_fs_init(ipsec);
+ ret = mlx5e_accel_ipsec_fs_init(ipsec, &priv->devcom);
if (ret)
goto err_fs_init;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 9e7c42c2f77b..8f4a37bceaf4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -38,6 +38,7 @@
#include <net/xfrm.h>
#include <linux/idr.h>
#include "lib/aso.h"
+#include "lib/devcom.h"
#define MLX5E_IPSEC_SADB_RX_BITS 10
#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
@@ -221,12 +222,20 @@ struct mlx5e_ipsec_tx_create_attr {
enum mlx5_flow_namespace_type chains_ns;
};
+struct mlx5e_ipsec_mpv_work {
+ int event;
+ struct work_struct work;
+ struct mlx5e_priv *slave_priv;
+ struct mlx5e_priv *master_priv;
+};
+
struct mlx5e_ipsec {
struct mlx5_core_dev *mdev;
struct xarray sadb;
struct mlx5e_ipsec_sw_stats sw_stats;
struct mlx5e_ipsec_hw_stats hw_stats;
struct workqueue_struct *wq;
+ struct completion comp;
struct mlx5e_flow_steering *fs;
struct mlx5e_ipsec_rx *rx_ipv4;
struct mlx5e_ipsec_rx *rx_ipv6;
@@ -238,6 +247,7 @@ struct mlx5e_ipsec {
struct notifier_block netevent_nb;
struct mlx5_ipsec_fs *roce;
u8 is_uplink_rep: 1;
+ struct mlx5e_ipsec_mpv_work mpv_work;
};
struct mlx5e_ipsec_esn_state {
@@ -302,7 +312,7 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
-int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
+int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec, struct mlx5_devcom_comp_dev **devcom);
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
@@ -328,6 +338,10 @@ void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv,
void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs);
+void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
+ struct mlx5e_priv *master_priv);
+void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event);
+
static inline struct mlx5_core_dev *
mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
{
@@ -363,6 +377,15 @@ static inline u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
return 0;
}
+
+static inline void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
+ struct mlx5e_priv *master_priv)
+{
+}
+
+static inline void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
+{
+}
#endif
#endif /* __MLX5E_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 7dba4221993f..f41c976dc33f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -229,6 +229,83 @@ out:
return err;
}
+static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family)
+{
+ struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
+ struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
+ struct mlx5_flow_destination old_dest, new_dest;
+
+ old_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
+ family2tt(family));
+
+ mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, ns, &old_dest, family,
+ MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL, MLX5E_NIC_PRIO);
+
+ new_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
+ new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
+ mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
+}
+
+static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family)
+{
+ struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
+ struct mlx5_flow_destination old_dest, new_dest;
+
+ old_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
+ old_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ new_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
+ family2tt(family));
+ mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
+ mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
+
+ mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, ipsec->mdev);
+}
+
+static void ipsec_mpv_work_handler(struct work_struct *_work)
+{
+ struct mlx5e_ipsec_mpv_work *work = container_of(_work, struct mlx5e_ipsec_mpv_work, work);
+ struct mlx5e_ipsec *ipsec = work->slave_priv->ipsec;
+
+ switch (work->event) {
+ case MPV_DEVCOM_IPSEC_MASTER_UP:
+ mutex_lock(&ipsec->tx->ft.mutex);
+ if (ipsec->tx->ft.refcnt)
+ mlx5_ipsec_fs_roce_tx_create(ipsec->mdev, ipsec->roce, ipsec->tx->ft.pol,
+ true);
+ mutex_unlock(&ipsec->tx->ft.mutex);
+
+ mutex_lock(&ipsec->rx_ipv4->ft.mutex);
+ if (ipsec->rx_ipv4->ft.refcnt)
+ handle_ipsec_rx_bringup(ipsec, AF_INET);
+ mutex_unlock(&ipsec->rx_ipv4->ft.mutex);
+
+ mutex_lock(&ipsec->rx_ipv6->ft.mutex);
+ if (ipsec->rx_ipv6->ft.refcnt)
+ handle_ipsec_rx_bringup(ipsec, AF_INET6);
+ mutex_unlock(&ipsec->rx_ipv6->ft.mutex);
+ break;
+ case MPV_DEVCOM_IPSEC_MASTER_DOWN:
+ mutex_lock(&ipsec->tx->ft.mutex);
+ if (ipsec->tx->ft.refcnt)
+ mlx5_ipsec_fs_roce_tx_destroy(ipsec->roce, ipsec->mdev);
+ mutex_unlock(&ipsec->tx->ft.mutex);
+
+ mutex_lock(&ipsec->rx_ipv4->ft.mutex);
+ if (ipsec->rx_ipv4->ft.refcnt)
+ handle_ipsec_rx_cleanup(ipsec, AF_INET);
+ mutex_unlock(&ipsec->rx_ipv4->ft.mutex);
+
+ mutex_lock(&ipsec->rx_ipv6->ft.mutex);
+ if (ipsec->rx_ipv6->ft.refcnt)
+ handle_ipsec_rx_cleanup(ipsec, AF_INET6);
+ mutex_unlock(&ipsec->rx_ipv6->ft.mutex);
+ break;
+ }
+
+ complete(&work->master_priv->ipsec->comp);
+}
+
static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family)
{
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
@@ -264,7 +341,7 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
}
mlx5_destroy_flow_table(rx->ft.status);
- mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family);
+ mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
}
static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
@@ -422,7 +499,7 @@ err_fs_ft:
err_add:
mlx5_destroy_flow_table(rx->ft.status);
err_fs_ft_status:
- mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family);
+ mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
return err;
}
@@ -562,7 +639,7 @@ err_rule:
static void tx_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
struct mlx5_ipsec_fs *roce)
{
- mlx5_ipsec_fs_roce_tx_destroy(roce);
+ mlx5_ipsec_fs_roce_tx_destroy(roce, ipsec->mdev);
if (tx->chains) {
ipsec_chains_destroy(tx->chains);
} else {
@@ -665,7 +742,7 @@ static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
}
connect_roce:
- err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol);
+ err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol, false);
if (err)
goto err_roce;
return 0;
@@ -1249,15 +1326,17 @@ static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
setup_fte_no_frags(spec);
setup_fte_upper_proto_match(spec, &attrs->upspec);
- if (rx != ipsec->rx_esw)
- err = setup_modify_header(ipsec, attrs->type,
- sa_entry->ipsec_obj_id | BIT(31),
- XFRM_DEV_OFFLOAD_IN, &flow_act);
- else
- err = mlx5_esw_ipsec_rx_setup_modify_header(sa_entry, &flow_act);
+ if (!attrs->drop) {
+ if (rx != ipsec->rx_esw)
+ err = setup_modify_header(ipsec, attrs->type,
+ sa_entry->ipsec_obj_id | BIT(31),
+ XFRM_DEV_OFFLOAD_IN, &flow_act);
+ else
+ err = mlx5_esw_ipsec_rx_setup_modify_header(sa_entry, &flow_act);
- if (err)
- goto err_mod_header;
+ if (err)
+ goto err_mod_header;
+ }
switch (attrs->type) {
case XFRM_DEV_OFFLOAD_PACKET:
@@ -1307,7 +1386,8 @@ err_add_cnt:
if (flow_act.pkt_reformat)
mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
err_pkt_reformat:
- mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
+ if (flow_act.modify_hdr)
+ mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
err_mod_header:
kvfree(spec);
err_alloc:
@@ -1805,7 +1885,8 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
return;
}
- mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
+ if (ipsec_rule->modify_hdr)
+ mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
}
@@ -1888,7 +1969,8 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
}
}
-int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
+int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec,
+ struct mlx5_devcom_comp_dev **devcom)
{
struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_flow_namespace *ns, *ns_esw;
@@ -1940,7 +2022,9 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
ipsec->tx_esw->ns = ns_esw;
xa_init_flags(&ipsec->rx_esw->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
} else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
- ipsec->roce = mlx5_ipsec_fs_roce_init(mdev);
+ ipsec->roce = mlx5_ipsec_fs_roce_init(mdev, devcom);
+ } else {
+ mlx5_core_warn(mdev, "IPsec was initialized without RoCE support\n");
}
return 0;
@@ -1987,3 +2071,33 @@ bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry)
return rx->allow_tunnel_mode;
}
+
+void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
+ struct mlx5e_priv *master_priv)
+{
+ struct mlx5e_ipsec_mpv_work *work;
+
+ reinit_completion(&master_priv->ipsec->comp);
+
+ if (!slave_priv->ipsec) {
+ complete(&master_priv->ipsec->comp);
+ return;
+ }
+
+ work = &slave_priv->ipsec->mpv_work;
+
+ INIT_WORK(&work->work, ipsec_mpv_work_handler);
+ work->event = event;
+ work->slave_priv = slave_priv;
+ work->master_priv = master_priv;
+ queue_work(slave_priv->ipsec->wq, &work->work);
+}
+
+void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
+{
+ if (!priv->ipsec)
+ return; /* IPsec not supported */
+
+ mlx5_devcom_send_event(priv->devcom, event, event, priv);
+ wait_for_completion(&priv->ipsec->comp);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 3245d1c9d539..a91f772dc981 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -5,6 +5,7 @@
#include "en.h"
#include "ipsec.h"
#include "lib/crypto.h"
+#include "lib/ipsec_fs_roce.h"
enum {
MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
@@ -63,7 +64,7 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
caps |= MLX5_IPSEC_CAP_ESPINUDP;
}
- if (mlx5_get_roce_state(mdev) &&
+ if (mlx5_get_roce_state(mdev) && mlx5_ipsec_fs_is_mpv_roce_supported(mdev) &&
MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_RX_2_NIC_RX_RDMA &&
MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
caps |= MLX5_IPSEC_CAP_ROCE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 9ee014a8ad24..2ed99772f168 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -54,7 +54,6 @@ struct mlx5e_accel_tx_ipsec_state {
#ifdef CONFIG_MLX5_EN_IPSEC
-void mlx5e_ipsec_inverse_table_init(void);
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index dff02434ff45..215261a69255 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1247,7 +1247,7 @@ static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv)
{
- return MLX5E_INDIR_RQT_SIZE;
+ return mlx5e_rqt_size(priv->mdev, priv->channels.params.num_channels);
}
static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 934b0d5ce1b3..777d311d44ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -1283,9 +1283,7 @@ static int mlx5e_create_inner_ttc_table(struct mlx5e_flow_steering *fs,
mlx5e_set_inner_ttc_params(fs, rx_res, &ttc_params);
fs->inner_ttc = mlx5_create_inner_ttc_table(fs->mdev,
&ttc_params);
- if (IS_ERR(fs->inner_ttc))
- return PTR_ERR(fs->inner_ttc);
- return 0;
+ return PTR_ERR_OR_ZERO(fs->inner_ttc);
}
int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
@@ -1295,9 +1293,7 @@ int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
mlx5e_set_ttc_params(fs, rx_res, &ttc_params, true);
fs->ttc = mlx5_create_ttc_table(fs->mdev, &ttc_params);
- if (IS_ERR(fs->ttc))
- return PTR_ERR(fs->ttc);
- return 0;
+ return PTR_ERR_OR_ZERO(fs->ttc);
}
int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index acb40770cf0c..ea58c6917433 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -69,6 +69,7 @@
#include "en/htb.h"
#include "qos.h"
#include "en/trap.h"
+#include "lib/devcom.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
@@ -178,6 +179,61 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
mlx5_notifier_unregister(priv->mdev, &priv->events_nb);
}
+static int mlx5e_devcom_event_mpv(int event, void *my_data, void *event_data)
+{
+ struct mlx5e_priv *slave_priv = my_data;
+
+ switch (event) {
+ case MPV_DEVCOM_MASTER_UP:
+ mlx5_devcom_comp_set_ready(slave_priv->devcom, true);
+ break;
+ case MPV_DEVCOM_MASTER_DOWN:
+ /* no need for comp set ready false since we unregister after
+ * and it hurts cleanup flow.
+ */
+ break;
+ case MPV_DEVCOM_IPSEC_MASTER_UP:
+ case MPV_DEVCOM_IPSEC_MASTER_DOWN:
+ mlx5e_ipsec_handle_mpv_event(event, my_data, event_data);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlx5e_devcom_init_mpv(struct mlx5e_priv *priv, u64 *data)
+{
+ priv->devcom = mlx5_devcom_register_component(priv->mdev->priv.devc,
+ MLX5_DEVCOM_MPV,
+ *data,
+ mlx5e_devcom_event_mpv,
+ priv);
+ if (IS_ERR_OR_NULL(priv->devcom))
+ return -EOPNOTSUPP;
+
+ if (mlx5_core_is_mp_master(priv->mdev)) {
+ mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_UP,
+ MPV_DEVCOM_MASTER_UP, priv);
+ mlx5e_ipsec_send_event(priv, MPV_DEVCOM_IPSEC_MASTER_UP);
+ }
+
+ return 0;
+}
+
+static void mlx5e_devcom_cleanup_mpv(struct mlx5e_priv *priv)
+{
+ if (IS_ERR_OR_NULL(priv->devcom))
+ return;
+
+ if (mlx5_core_is_mp_master(priv->mdev)) {
+ mlx5_devcom_send_event(priv->devcom, MPV_DEVCOM_MASTER_DOWN,
+ MPV_DEVCOM_MASTER_DOWN, priv);
+ mlx5e_ipsec_send_event(priv, MPV_DEVCOM_IPSEC_MASTER_DOWN);
+ }
+
+ mlx5_devcom_unregister_component(priv->devcom);
+}
+
static int blocking_event(struct notifier_block *nb, unsigned long event, void *data)
{
struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, blocking_events_nb);
@@ -192,6 +248,13 @@ static int blocking_event(struct notifier_block *nb, unsigned long event, void *
return NOTIFY_BAD;
}
break;
+ case MLX5_DRIVER_EVENT_AFFILIATION_DONE:
+ if (mlx5e_devcom_init_mpv(priv, data))
+ return NOTIFY_BAD;
+ break;
+ case MLX5_DRIVER_EVENT_AFFILIATION_REMOVED:
+ mlx5e_devcom_cleanup_mpv(priv);
+ break;
default:
return NOTIFY_DONE;
}
@@ -834,7 +897,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
struct page_pool_params pp_params = { 0 };
pp_params.order = 0;
- pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV | PP_FLAG_PAGE_FRAG;
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
pp_params.pool_size = pool_size;
pp_params.nid = node;
pp_params.dev = rq->pdev;
@@ -2885,8 +2948,12 @@ static int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
/* This function may be called on attach, before priv->rx_res is created. */
- if (!netif_is_rxfh_configured(priv->netdev) && priv->rx_res)
- mlx5e_rx_res_rss_set_indir_uniform(priv->rx_res, count);
+ if (priv->rx_res) {
+ mlx5e_rx_res_rss_update_num_channels(priv->rx_res, count);
+
+ if (!netif_is_rxfh_configured(priv->netdev))
+ mlx5e_rx_res_rss_set_indir_uniform(priv->rx_res, count);
+ }
return 0;
}
@@ -5326,10 +5393,6 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
enum mlx5e_rx_res_features features;
int err;
- priv->rx_res = mlx5e_rx_res_alloc();
- if (!priv->rx_res)
- return -ENOMEM;
-
mlx5e_create_q_counters(priv);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
@@ -5341,12 +5404,16 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
features = MLX5E_RX_RES_FEATURE_PTP;
if (mlx5_tunnel_inner_ft_supported(mdev))
features |= MLX5E_RX_RES_FEATURE_INNER_FT;
- err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
- priv->max_nch, priv->drop_rq.rqn,
- &priv->channels.params.packet_merge,
- priv->channels.params.num_channels);
- if (err)
+
+ priv->rx_res = mlx5e_rx_res_create(priv->mdev, features, priv->max_nch, priv->drop_rq.rqn,
+ &priv->channels.params.packet_merge,
+ priv->channels.params.num_channels);
+ if (IS_ERR(priv->rx_res)) {
+ err = PTR_ERR(priv->rx_res);
+ priv->rx_res = NULL;
+ mlx5_core_err(mdev, "create rx resources failed, %d\n", err);
goto err_close_drop_rq;
+ }
err = mlx5e_create_flow_steering(priv->fs, priv->rx_res, priv->profile,
priv->netdev);
@@ -5376,12 +5443,11 @@ err_destroy_flow_steering:
priv->profile);
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
+ priv->rx_res = NULL;
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
err_destroy_q_counters:
mlx5e_destroy_q_counters(priv);
- mlx5e_rx_res_free(priv->rx_res);
- priv->rx_res = NULL;
return err;
}
@@ -5392,10 +5458,9 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE),
priv->profile);
mlx5e_rx_res_destroy(priv->rx_res);
+ priv->rx_res = NULL;
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
- mlx5e_rx_res_free(priv->rx_res);
- priv->rx_res = NULL;
}
static void mlx5e_set_mqprio_rl(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index fd1cce542b68..693e55b010d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1006,26 +1006,22 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
int err;
- priv->rx_res = mlx5e_rx_res_alloc();
- if (!priv->rx_res) {
- err = -ENOMEM;
- goto err_free_fs;
- }
-
mlx5e_fs_init_l2_addr(priv->fs, priv->netdev);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
if (err) {
mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
- goto err_rx_res_free;
+ goto err_free_fs;
}
- err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
- priv->max_nch, priv->drop_rq.rqn,
- &priv->channels.params.packet_merge,
- priv->channels.params.num_channels);
- if (err)
+ priv->rx_res = mlx5e_rx_res_create(priv->mdev, 0, priv->max_nch, priv->drop_rq.rqn,
+ &priv->channels.params.packet_merge,
+ priv->channels.params.num_channels);
+ if (IS_ERR(priv->rx_res)) {
+ err = PTR_ERR(priv->rx_res);
+ mlx5_core_err(mdev, "Create rx resources failed, err=%d\n", err);
goto err_close_drop_rq;
+ }
err = mlx5e_create_rep_ttc_table(priv);
if (err)
@@ -1049,11 +1045,9 @@ err_destroy_ttc_table:
mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false));
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
+ priv->rx_res = ERR_PTR(-EINVAL);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
-err_rx_res_free:
- mlx5e_rx_res_free(priv->rx_res);
- priv->rx_res = NULL;
err_free_fs:
mlx5e_fs_cleanup(priv->fs);
priv->fs = NULL;
@@ -1067,9 +1061,8 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
mlx5e_destroy_rep_root_ft(priv);
mlx5_destroy_ttc_table(mlx5e_fs_get_ttc(priv->fs, false));
mlx5e_rx_res_destroy(priv->rx_res);
+ priv->rx_res = ERR_PTR(-EINVAL);
mlx5e_close_drop_rq(&priv->drop_rq);
- mlx5e_rx_res_free(priv->rx_res);
- priv->rx_res = NULL;
}
static void mlx5e_rep_mpesw_work(struct work_struct *work)
@@ -1363,8 +1356,9 @@ mlx5e_rep_vnic_reporter_diagnose(struct devlink_health_reporter *reporter,
struct mlx5e_rep_priv *rpriv = devlink_health_reporter_priv(reporter);
struct mlx5_eswitch_rep *rep = rpriv->rep;
- return mlx5_reporter_vnic_diagnose_counters(rep->esw->dev, fmsg,
- rep->vport, true);
+ mlx5_reporter_vnic_diagnose_counters(rep->esw->dev, fmsg, rep->vport,
+ true);
+ return 0;
}
static const struct devlink_health_reporter_ops mlx5_rep_vnic_reporter_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index c8590483ddc6..9a5a5c2c7da9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -753,19 +753,21 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
{
struct mlx5e_priv *priv = hp->func_priv;
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_rss_params_indir *indir;
+ struct mlx5e_rss_params_indir indir;
int err;
- indir = kvmalloc(sizeof(*indir), GFP_KERNEL);
- if (!indir)
- return -ENOMEM;
+ err = mlx5e_rss_params_indir_init(&indir, mdev,
+ mlx5e_rqt_size(mdev, hp->num_channels),
+ mlx5e_rqt_size(mdev, priv->max_nch));
+ if (err)
+ return err;
- mlx5e_rss_params_indir_init_uniform(indir, hp->num_channels);
+ mlx5e_rss_params_indir_init_uniform(&indir, hp->num_channels);
err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels,
mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc,
- indir);
+ &indir);
- kvfree(indir);
+ mlx5e_rss_params_indir_cleanup(&indir);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
index 7a01714b3780..a7ed87e9d842 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
@@ -78,6 +78,8 @@ mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_md
xa_for_each(&entry->ports, idx, port) {
dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dests[i].ft = port->mcast.ft;
+ if (port->vport_num == MLX5_VPORT_UPLINK)
+ dests[i].ft->flags |= MLX5_FLOW_TABLE_UPLINK_VPORT;
i++;
}
@@ -585,10 +587,6 @@ mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_po
if (!rule_spec)
return ERR_PTR(-ENOMEM);
- if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
- port->vport_num == MLX5_VPORT_UPLINK)
- rule_spec->flow_context.flow_source =
- MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
@@ -660,11 +658,6 @@ mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
if (!rule_spec)
return ERR_PTR(-ENOMEM);
- if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
- port->vport_num == MLX5_VPORT_UPLINK)
- rule_spec->flow_context.flow_source =
- MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
-
if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
dest.vport.vhca_id = port->esw_owner_vhca_id;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 1887a24ee414..d2ebe56c3977 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#include "eswitch.h"
+#include "lib/mlx5.h"
#include "esw/qos.h"
#include "en/port.h"
#define CREATE_TRACE_POINTS
@@ -701,10 +702,75 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
return err;
}
+static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev)
+{
+ struct ethtool_link_ksettings lksettings;
+ struct net_device *slave, *master;
+ u32 speed = SPEED_UNKNOWN;
+
+ /* Lock ensures a stable reference to master and slave netdevice
+ * while port speed of master is queried.
+ */
+ ASSERT_RTNL();
+
+ slave = mlx5_uplink_netdev_get(mdev);
+ if (!slave)
+ goto out;
+
+ master = netdev_master_upper_dev_get(slave);
+ if (master && !__ethtool_get_link_ksettings(master, &lksettings))
+ speed = lksettings.base.speed;
+
+out:
+ return speed;
+}
+
+static int mlx5_esw_qos_max_link_speed_get(struct mlx5_core_dev *mdev, u32 *link_speed_max,
+ bool hold_rtnl_lock, struct netlink_ext_ack *extack)
+{
+ int err;
+
+ if (!mlx5_lag_is_active(mdev))
+ goto skip_lag;
+
+ if (hold_rtnl_lock)
+ rtnl_lock();
+
+ *link_speed_max = mlx5_esw_qos_lag_link_speed_get_locked(mdev);
+
+ if (hold_rtnl_lock)
+ rtnl_unlock();
+
+ if (*link_speed_max != (u32)SPEED_UNKNOWN)
+ return 0;
+
+skip_lag:
+ err = mlx5_port_max_linkspeed(mdev, link_speed_max);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed");
+
+ return err;
+}
+
+static int mlx5_esw_qos_link_speed_verify(struct mlx5_core_dev *mdev,
+ const char *name, u32 link_speed_max,
+ u64 value, struct netlink_ext_ack *extack)
+{
+ if (value > link_speed_max) {
+ pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n",
+ name, value, link_speed_max);
+ NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps)
{
u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
struct mlx5_vport *vport;
+ u32 link_speed_max;
u32 bitmask;
int err;
@@ -712,6 +778,17 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32
if (IS_ERR(vport))
return PTR_ERR(vport);
+ if (rate_mbps) {
+ err = mlx5_esw_qos_max_link_speed_get(esw->dev, &link_speed_max, false, NULL);
+ if (err)
+ return err;
+
+ err = mlx5_esw_qos_link_speed_verify(esw->dev, "Police",
+ link_speed_max, rate_mbps, NULL);
+ if (err)
+ return err;
+ }
+
mutex_lock(&esw->state_lock);
if (!vport->qos.enabled) {
/* Eswitch QoS wasn't enabled yet. Enable it and vport QoS. */
@@ -744,12 +821,6 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
u64 value;
int err;
- err = mlx5_port_max_linkspeed(mdev, &link_speed_max);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed");
- return err;
- }
-
value = div_u64_rem(*rate, MLX5_LINKSPEED_UNIT, &remainder);
if (remainder) {
pr_err("%s rate value %lluBps not in link speed units of 1Mbps.\n",
@@ -758,12 +829,13 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
return -EINVAL;
}
- if (value > link_speed_max) {
- pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n",
- name, value, link_speed_max);
- NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed");
- return -EINVAL;
- }
+ err = mlx5_esw_qos_max_link_speed_get(mdev, &link_speed_max, true, extack);
+ if (err)
+ return err;
+
+ err = mlx5_esw_qos_link_speed_verify(mdev, name, link_speed_max, value, extack);
+ if (err)
+ return err;
*rate = value;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index 3ec892d51f57..d91ea53eb394 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -441,8 +441,3 @@ int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int ev
return blocking_notifier_call_chain(&events->sw_nh, event, data);
}
-
-void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work)
-{
- queue_work(dev->priv.events->wq, work);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index a13b9c2bd144..e6bfa7e4f146 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -114,9 +114,9 @@
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy,
- * IPsec RoCE policy
+ * {IPsec RoCE MPV,Alias table},IPsec RoCE policy
*/
-#define KERNEL_NIC_PRIO_NUM_LEVELS 9
+#define KERNEL_NIC_PRIO_NUM_LEVELS 11
#define KERNEL_NIC_NUM_PRIOS 1
/* One more level for tc */
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
@@ -137,7 +137,7 @@
#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1)
#define KERNEL_TX_IPSEC_NUM_PRIOS 1
-#define KERNEL_TX_IPSEC_NUM_LEVELS 3
+#define KERNEL_TX_IPSEC_NUM_LEVELS 4
#define KERNEL_TX_IPSEC_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
#define KERNEL_TX_MACSEC_NUM_PRIOS 1
@@ -231,7 +231,7 @@ enum {
};
#define RDMA_RX_IPSEC_NUM_PRIOS 1
-#define RDMA_RX_IPSEC_NUM_LEVELS 2
+#define RDMA_RX_IPSEC_NUM_LEVELS 4
#define RDMA_RX_IPSEC_MIN_LEVEL (RDMA_RX_IPSEC_NUM_LEVELS)
#define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS
@@ -288,7 +288,7 @@ enum {
#define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS
#define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1)
-#define RDMA_TX_IPSEC_NUM_PRIOS 1
+#define RDMA_TX_IPSEC_NUM_PRIOS 2
#define RDMA_TX_IPSEC_PRIO_NUM_LEVELS 1
#define RDMA_TX_IPSEC_MIN_LEVEL (RDMA_TX_COUNTERS_MIN_LEVEL + RDMA_TX_IPSEC_NUM_PRIOS)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 2fb2598b775e..8ff6dc9bc803 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -365,6 +365,8 @@ static const char *hsynd_str(u8 synd)
return "FFSER error";
case MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR:
return "High temperature";
+ case MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR:
+ return "ICM fetch PCI data poisoned error";
default:
return "unrecognized error";
}
@@ -448,14 +450,15 @@ mlx5_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
struct mlx5_core_dev *dev = devlink_health_reporter_priv(reporter);
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
- u8 synd;
- int err;
+ u8 synd = ioread8(&h->synd);
- synd = ioread8(&h->synd);
- err = devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
- if (err || !synd)
- return err;
- return devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd));
+ if (!synd)
+ return 0;
+
+ devlink_fmsg_u8_pair_put(fmsg, "Syndrome", synd);
+ devlink_fmsg_string_pair_put(fmsg, "Description", hsynd_str(synd));
+
+ return 0;
}
struct mlx5_fw_reporter_ctx {
@@ -463,94 +466,47 @@ struct mlx5_fw_reporter_ctx {
int miss_counter;
};
-static int
+static void
mlx5_fw_reporter_ctx_pairs_put(struct devlink_fmsg *fmsg,
struct mlx5_fw_reporter_ctx *fw_reporter_ctx)
{
- int err;
-
- err = devlink_fmsg_u8_pair_put(fmsg, "syndrome",
- fw_reporter_ctx->err_synd);
- if (err)
- return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "fw_miss_counter",
- fw_reporter_ctx->miss_counter);
- if (err)
- return err;
- return 0;
+ devlink_fmsg_u8_pair_put(fmsg, "syndrome", fw_reporter_ctx->err_synd);
+ devlink_fmsg_u32_pair_put(fmsg, "fw_miss_counter", fw_reporter_ctx->miss_counter);
}
-static int
+static void
mlx5_fw_reporter_heath_buffer_data_put(struct mlx5_core_dev *dev,
struct devlink_fmsg *fmsg)
{
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
u8 rfr_severity;
- int err;
int i;
if (!ioread8(&h->synd))
- return 0;
-
- err = devlink_fmsg_pair_nest_start(fmsg, "health buffer");
- if (err)
- return err;
- err = devlink_fmsg_obj_nest_start(fmsg);
- if (err)
- return err;
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "assert_var");
- if (err)
- return err;
+ return;
- for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) {
- err = devlink_fmsg_u32_put(fmsg, ioread32be(h->assert_var + i));
- if (err)
- return err;
- }
- err = devlink_fmsg_arr_pair_nest_end(fmsg);
- if (err)
- return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "assert_exit_ptr",
- ioread32be(&h->assert_exit_ptr));
- if (err)
- return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "assert_callra",
- ioread32be(&h->assert_callra));
- if (err)
- return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "time", ioread32be(&h->time));
- if (err)
- return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "hw_id", ioread32be(&h->hw_id));
- if (err)
- return err;
+ devlink_fmsg_pair_nest_start(fmsg, "health buffer");
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_arr_pair_nest_start(fmsg, "assert_var");
+ for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
+ devlink_fmsg_u32_put(fmsg, ioread32be(h->assert_var + i));
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+ devlink_fmsg_u32_pair_put(fmsg, "assert_exit_ptr",
+ ioread32be(&h->assert_exit_ptr));
+ devlink_fmsg_u32_pair_put(fmsg, "assert_callra",
+ ioread32be(&h->assert_callra));
+ devlink_fmsg_u32_pair_put(fmsg, "time", ioread32be(&h->time));
+ devlink_fmsg_u32_pair_put(fmsg, "hw_id", ioread32be(&h->hw_id));
rfr_severity = ioread8(&h->rfr_severity);
- err = devlink_fmsg_u8_pair_put(fmsg, "rfr", mlx5_health_get_rfr(rfr_severity));
- if (err)
- return err;
- err = devlink_fmsg_u8_pair_put(fmsg, "severity", mlx5_health_get_severity(rfr_severity));
- if (err)
- return err;
- err = devlink_fmsg_u8_pair_put(fmsg, "irisc_index",
- ioread8(&h->irisc_index));
- if (err)
- return err;
- err = devlink_fmsg_u8_pair_put(fmsg, "synd", ioread8(&h->synd));
- if (err)
- return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "ext_synd",
- ioread16be(&h->ext_synd));
- if (err)
- return err;
- err = devlink_fmsg_u32_pair_put(fmsg, "raw_fw_ver",
- ioread32be(&h->fw_ver));
- if (err)
- return err;
- err = devlink_fmsg_obj_nest_end(fmsg);
- if (err)
- return err;
- return devlink_fmsg_pair_nest_end(fmsg);
+ devlink_fmsg_u8_pair_put(fmsg, "rfr", mlx5_health_get_rfr(rfr_severity));
+ devlink_fmsg_u8_pair_put(fmsg, "severity", mlx5_health_get_severity(rfr_severity));
+ devlink_fmsg_u8_pair_put(fmsg, "irisc_index", ioread8(&h->irisc_index));
+ devlink_fmsg_u8_pair_put(fmsg, "synd", ioread8(&h->synd));
+ devlink_fmsg_u32_pair_put(fmsg, "ext_synd", ioread16be(&h->ext_synd));
+ devlink_fmsg_u32_pair_put(fmsg, "raw_fw_ver", ioread32be(&h->fw_ver));
+ devlink_fmsg_obj_nest_end(fmsg);
+ devlink_fmsg_pair_nest_end(fmsg);
}
static int
@@ -568,14 +524,11 @@ mlx5_fw_reporter_dump(struct devlink_health_reporter *reporter,
if (priv_ctx) {
struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
- err = mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx);
- if (err)
- return err;
+ mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx);
}
- err = mlx5_fw_reporter_heath_buffer_data_put(dev, fmsg);
- if (err)
- return err;
+ mlx5_fw_reporter_heath_buffer_data_put(dev, fmsg);
+
return mlx5_fw_tracer_get_saved_traces_objects(dev->tracer, fmsg);
}
@@ -641,12 +594,10 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
if (priv_ctx) {
struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
- err = mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx);
- if (err)
- goto free_data;
+ mlx5_fw_reporter_ctx_pairs_put(fmsg, fw_reporter_ctx);
}
- err = devlink_fmsg_binary_pair_put(fmsg, "crdump_data", cr_data, crdump_size);
+ devlink_fmsg_binary_pair_put(fmsg, "crdump_data", cr_data, crdump_size);
free_data:
kvfree(cr_data);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index baa7ef812313..2bf77a5251b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -418,12 +418,6 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
return -ENOMEM;
}
- priv->rx_res = mlx5e_rx_res_alloc();
- if (!priv->rx_res) {
- err = -ENOMEM;
- goto err_free_fs;
- }
-
mlx5e_create_q_counters(priv);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
@@ -432,12 +426,13 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
goto err_destroy_q_counters;
}
- err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
- priv->max_nch, priv->drop_rq.rqn,
- &priv->channels.params.packet_merge,
- priv->channels.params.num_channels);
- if (err)
+ priv->rx_res = mlx5e_rx_res_create(priv->mdev, 0, priv->max_nch, priv->drop_rq.rqn,
+ &priv->channels.params.packet_merge,
+ priv->channels.params.num_channels);
+ if (IS_ERR(priv->rx_res)) {
+ err = PTR_ERR(priv->rx_res);
goto err_close_drop_rq;
+ }
err = mlx5i_create_flow_steering(priv);
if (err)
@@ -447,13 +442,11 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
+ priv->rx_res = ERR_PTR(-EINVAL);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
err_destroy_q_counters:
mlx5e_destroy_q_counters(priv);
- mlx5e_rx_res_free(priv->rx_res);
- priv->rx_res = NULL;
-err_free_fs:
mlx5e_fs_cleanup(priv->fs);
return err;
}
@@ -462,10 +455,9 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
{
mlx5i_destroy_flow_steering(priv);
mlx5e_rx_res_destroy(priv->rx_res);
+ priv->rx_res = ERR_PTR(-EINVAL);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
- mlx5e_rx_res_free(priv->rx_res);
- priv->rx_res = NULL;
mlx5e_fs_cleanup(priv->fs);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index af3fac090b82..d14459e5c04f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -943,6 +943,26 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
}
}
+/* The last mdev to unregister will destroy the workqueue before removing the
+ * devcom component, and as all the mdevs use the same devcom component we are
+ * guaranteed that the devcom is valid while the calling work is running.
+ */
+struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev)
+{
+ struct mlx5_devcom_comp_dev *devcom = NULL;
+ int i;
+
+ mutex_lock(&ldev->lock);
+ for (i = 0; i < ldev->ports; i++) {
+ if (ldev->pf[i].dev) {
+ devcom = ldev->pf[i].dev->priv.hca_devcom_comp;
+ break;
+ }
+ }
+ mutex_unlock(&ldev->lock);
+ return devcom;
+}
+
static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
{
queue_delayed_work(ldev->wq, &ldev->bond_work, delay);
@@ -953,9 +973,14 @@ static void mlx5_do_bond_work(struct work_struct *work)
struct delayed_work *delayed_work = to_delayed_work(work);
struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
bond_work);
+ struct mlx5_devcom_comp_dev *devcom;
int status;
- status = mlx5_dev_list_trylock();
+ devcom = mlx5_lag_get_devcom_comp(ldev);
+ if (!devcom)
+ return;
+
+ status = mlx5_devcom_comp_trylock(devcom);
if (!status) {
mlx5_queue_bond_work(ldev, HZ);
return;
@@ -964,14 +989,14 @@ static void mlx5_do_bond_work(struct work_struct *work)
mutex_lock(&ldev->lock);
if (ldev->mode_changes_in_progress) {
mutex_unlock(&ldev->lock);
- mlx5_dev_list_unlock();
+ mlx5_devcom_comp_unlock(devcom);
mlx5_queue_bond_work(ldev, HZ);
return;
}
mlx5_do_bond(ldev);
mutex_unlock(&ldev->lock);
- mlx5_dev_list_unlock();
+ mlx5_devcom_comp_unlock(devcom);
}
static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
@@ -1212,13 +1237,14 @@ static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
dev->priv.lag = NULL;
}
-/* Must be called with intf_mutex held */
+/* Must be called with HCA devcom component lock held */
static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
{
+ struct mlx5_devcom_comp_dev *pos = NULL;
struct mlx5_lag *ldev = NULL;
struct mlx5_core_dev *tmp_dev;
- tmp_dev = mlx5_get_next_phys_dev_lag(dev);
+ tmp_dev = mlx5_devcom_get_next_peer_data(dev->priv.hca_devcom_comp, &pos);
if (tmp_dev)
ldev = mlx5_lag_dev(tmp_dev);
@@ -1275,10 +1301,13 @@ void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
if (!mlx5_lag_is_supported(dev))
return;
+ if (IS_ERR_OR_NULL(dev->priv.hca_devcom_comp))
+ return;
+
recheck:
- mlx5_dev_list_lock();
+ mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
err = __mlx5_lag_dev_add_mdev(dev);
- mlx5_dev_list_unlock();
+ mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
if (err) {
msleep(100);
@@ -1431,7 +1460,7 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
if (!ldev)
return;
- mlx5_dev_list_lock();
+ mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
mutex_lock(&ldev->lock);
ldev->mode_changes_in_progress++;
@@ -1439,7 +1468,7 @@ void mlx5_lag_disable_change(struct mlx5_core_dev *dev)
mlx5_disable_lag(ldev);
mutex_unlock(&ldev->lock);
- mlx5_dev_list_unlock();
+ mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
}
void mlx5_lag_enable_change(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index 481e92f39fe6..50fcb1eee574 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -112,6 +112,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev);
void mlx5_lag_remove_devices(struct mlx5_lag *ldev);
int mlx5_deactivate_lag(struct mlx5_lag *ldev);
void mlx5_lag_add_devices(struct mlx5_lag *ldev);
+struct mlx5_devcom_comp_dev *mlx5_lag_get_devcom_comp(struct mlx5_lag *ldev);
static inline bool mlx5_lag_is_supported(struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index 4bf15391525c..82889f30506e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -65,12 +65,12 @@ err_metadata:
return err;
}
-#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 2
+#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 4
static int enable_mpesw(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
int err;
+ int i;
if (ldev->mode != MLX5_LAG_MODE_NONE)
return -EINVAL;
@@ -98,11 +98,11 @@ static int enable_mpesw(struct mlx5_lag *ldev)
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- err = mlx5_eswitch_reload_reps(dev0->priv.eswitch);
- if (!err)
- err = mlx5_eswitch_reload_reps(dev1->priv.eswitch);
- if (err)
- goto err_rescan_drivers;
+ for (i = 0; i < ldev->ports; i++) {
+ err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ if (err)
+ goto err_rescan_drivers;
+ }
return 0;
@@ -112,8 +112,8 @@ err_rescan_drivers:
mlx5_deactivate_lag(ldev);
err_add_devices:
mlx5_lag_add_devices(ldev);
- mlx5_eswitch_reload_reps(dev0->priv.eswitch);
- mlx5_eswitch_reload_reps(dev1->priv.eswitch);
+ for (i = 0; i < ldev->ports; i++)
+ mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_mpesw_metadata_cleanup(ldev);
return err;
}
@@ -129,9 +129,14 @@ static void disable_mpesw(struct mlx5_lag *ldev)
static void mlx5_mpesw_work(struct work_struct *work)
{
struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work);
+ struct mlx5_devcom_comp_dev *devcom;
struct mlx5_lag *ldev = mpesww->lag;
- mlx5_dev_list_lock();
+ devcom = mlx5_lag_get_devcom_comp(ldev);
+ if (!devcom)
+ return;
+
+ mlx5_devcom_comp_lock(devcom);
mutex_lock(&ldev->lock);
if (ldev->mode_changes_in_progress) {
mpesww->result = -EAGAIN;
@@ -144,7 +149,7 @@ static void mlx5_mpesw_work(struct work_struct *work)
disable_mpesw(ldev);
unlock:
mutex_unlock(&ldev->lock);
- mlx5_dev_list_unlock();
+ mlx5_devcom_comp_unlock(devcom);
complete(&mpesww->comp);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index 7d9bbb494d95..101b3bb90863 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -507,10 +507,7 @@ static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
mlx5_lag_set_outer_ttc_params(ldev, &ttc_params);
port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params);
- if (IS_ERR(port_sel->outer.ttc))
- return PTR_ERR(port_sel->outer.ttc);
-
- return 0;
+ return PTR_ERR_OR_ZERO(port_sel->outer.ttc);
}
static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
@@ -521,10 +518,7 @@ static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
mlx5_lag_set_inner_ttc_params(ldev, &ttc_params);
port_sel->inner.ttc = mlx5_create_inner_ttc_table(dev, &ttc_params);
- if (IS_ERR(port_sel->inner.ttc))
- return PTR_ERR(port_sel->inner.ttc);
-
- return 0;
+ return PTR_ERR_OR_ZERO(port_sel->inner.ttc);
}
int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index 00e67910e3ee..e8e50563e956 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -31,6 +31,7 @@ struct mlx5_devcom_comp {
struct kref ref;
bool ready;
struct rw_semaphore sem;
+ struct lock_class_key lock_key;
};
struct mlx5_devcom_comp_dev {
@@ -119,6 +120,8 @@ mlx5_devcom_comp_alloc(u64 id, u64 key, mlx5_devcom_event_handler_t handler)
comp->key = key;
comp->handler = handler;
init_rwsem(&comp->sem);
+ lockdep_register_key(&comp->lock_key);
+ lockdep_set_class(&comp->sem, &comp->lock_key);
kref_init(&comp->ref);
INIT_LIST_HEAD(&comp->comp_dev_list_head);
@@ -133,6 +136,7 @@ mlx5_devcom_comp_release(struct kref *ref)
mutex_lock(&comp_list_lock);
list_del(&comp->comp_list);
mutex_unlock(&comp_list_lock);
+ lockdep_unregister_key(&comp->lock_key);
kfree(comp);
}
@@ -383,3 +387,24 @@ void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
*pos = tmp;
return data;
}
+
+void mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev *devcom)
+{
+ if (IS_ERR_OR_NULL(devcom))
+ return;
+ down_write(&devcom->comp->sem);
+}
+
+void mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev *devcom)
+{
+ if (IS_ERR_OR_NULL(devcom))
+ return;
+ up_write(&devcom->comp->sem);
+}
+
+int mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev *devcom)
+{
+ if (IS_ERR_OR_NULL(devcom))
+ return 0;
+ return down_write_trylock(&devcom->comp->sem);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
index 8389ac0af708..fc23bbef87b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -8,6 +8,8 @@
enum mlx5_devcom_component {
MLX5_DEVCOM_ESW_OFFLOADS,
+ MLX5_DEVCOM_MPV,
+ MLX5_DEVCOM_HCA_PORTS,
MLX5_DEVCOM_NUM_COMPONENTS,
};
@@ -51,4 +53,8 @@ void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom_comp_dev *devcom,
data; \
data = mlx5_devcom_get_next_peer_data_rcu(devcom, &pos))
+void mlx5_devcom_comp_lock(struct mlx5_devcom_comp_dev *devcom);
+void mlx5_devcom_comp_unlock(struct mlx5_devcom_comp_dev *devcom);
+int mlx5_devcom_comp_trylock(struct mlx5_devcom_comp_dev *devcom);
+
#endif /* __LIB_MLX5_DEVCOM_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index 69a75459775d..4b7f7131c560 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -85,7 +85,6 @@ void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn);
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev);
void mlx5_cq_tasklet_cb(struct tasklet_struct *t);
-struct cpumask *mlx5_eq_comp_cpumask(struct mlx5_core_dev *dev, int ix);
u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq);
void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
index 6e3f178d6f84..234cd00f71a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
@@ -2,8 +2,11 @@
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#include "fs_core.h"
+#include "fs_cmd.h"
+#include "en.h"
#include "lib/ipsec_fs_roce.h"
#include "mlx5_core.h"
+#include <linux/random.h>
struct mlx5_ipsec_miss {
struct mlx5_flow_group *group;
@@ -15,6 +18,12 @@ struct mlx5_ipsec_rx_roce {
struct mlx5_flow_table *ft;
struct mlx5_flow_handle *rule;
struct mlx5_ipsec_miss roce_miss;
+ struct mlx5_flow_table *nic_master_ft;
+ struct mlx5_flow_group *nic_master_group;
+ struct mlx5_flow_handle *nic_master_rule;
+ struct mlx5_flow_table *goto_alias_ft;
+ u32 alias_id;
+ char key[ACCESS_KEY_LEN];
struct mlx5_flow_table *ft_rdma;
struct mlx5_flow_namespace *ns_rdma;
@@ -24,6 +33,9 @@ struct mlx5_ipsec_tx_roce {
struct mlx5_flow_group *g;
struct mlx5_flow_table *ft;
struct mlx5_flow_handle *rule;
+ struct mlx5_flow_table *goto_alias_ft;
+ u32 alias_id;
+ char key[ACCESS_KEY_LEN];
struct mlx5_flow_namespace *ns;
};
@@ -31,6 +43,7 @@ struct mlx5_ipsec_fs {
struct mlx5_ipsec_rx_roce ipv4_rx;
struct mlx5_ipsec_rx_roce ipv6_rx;
struct mlx5_ipsec_tx_roce tx;
+ struct mlx5_devcom_comp_dev **devcom;
};
static void ipsec_fs_roce_setup_udp_dport(struct mlx5_flow_spec *spec,
@@ -43,11 +56,83 @@ static void ipsec_fs_roce_setup_udp_dport(struct mlx5_flow_spec *spec,
MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, dport);
}
+static bool ipsec_fs_create_alias_supported_one(struct mlx5_core_dev *mdev)
+{
+ u64 obj_allowed = MLX5_CAP_GEN_2_64(mdev, allowed_object_for_other_vhca_access);
+ u32 obj_supp = MLX5_CAP_GEN_2(mdev, cross_vhca_object_to_object_supported);
+
+ if (!(obj_supp &
+ MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_TO_REMOTE_FLOW_TABLE_MISS))
+ return false;
+
+ if (!(obj_allowed & MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE))
+ return false;
+
+ return true;
+}
+
+static bool ipsec_fs_create_alias_supported(struct mlx5_core_dev *mdev,
+ struct mlx5_core_dev *master_mdev)
+{
+ if (ipsec_fs_create_alias_supported_one(mdev) &&
+ ipsec_fs_create_alias_supported_one(master_mdev))
+ return true;
+
+ return false;
+}
+
+static int ipsec_fs_create_aliased_ft(struct mlx5_core_dev *ibv_owner,
+ struct mlx5_core_dev *ibv_allowed,
+ struct mlx5_flow_table *ft,
+ u32 *obj_id, char *alias_key, bool from_event)
+{
+ u32 aliased_object_id = (ft->type << FT_ID_FT_TYPE_OFFSET) | ft->id;
+ u16 vhca_id_to_be_accessed = MLX5_CAP_GEN(ibv_owner, vhca_id);
+ struct mlx5_cmd_allow_other_vhca_access_attr allow_attr = {};
+ struct mlx5_cmd_alias_obj_create_attr alias_attr = {};
+ int ret;
+ int i;
+
+ if (!ipsec_fs_create_alias_supported(ibv_owner, ibv_allowed))
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < ACCESS_KEY_LEN; i++)
+ if (!from_event)
+ alias_key[i] = get_random_u64() & 0xFF;
+
+ memcpy(allow_attr.access_key, alias_key, ACCESS_KEY_LEN);
+ allow_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS;
+ allow_attr.obj_id = aliased_object_id;
+
+ if (!from_event) {
+ ret = mlx5_cmd_allow_other_vhca_access(ibv_owner, &allow_attr);
+ if (ret) {
+ mlx5_core_err(ibv_owner, "Failed to allow other vhca access err=%d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ memcpy(alias_attr.access_key, alias_key, ACCESS_KEY_LEN);
+ alias_attr.obj_id = aliased_object_id;
+ alias_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS;
+ alias_attr.vhca_id = vhca_id_to_be_accessed;
+ ret = mlx5_cmd_alias_obj_create(ibv_allowed, &alias_attr, obj_id);
+ if (ret) {
+ mlx5_core_err(ibv_allowed, "Failed to create alias object err=%d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int
ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev,
struct mlx5_flow_destination *default_dst,
struct mlx5_ipsec_rx_roce *roce)
{
+ bool is_mpv_slave = mlx5_core_is_mp_slave(mdev);
struct mlx5_flow_destination dst = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
@@ -61,14 +146,19 @@ ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev,
ipsec_fs_roce_setup_udp_dport(spec, ROCE_V2_UDP_DPORT);
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
- dst.ft = roce->ft_rdma;
+ if (is_mpv_slave) {
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dst.ft = roce->goto_alias_ft;
+ } else {
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ dst.ft = roce->ft_rdma;
+ }
rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_err(mdev, "Fail to add RX RoCE IPsec rule err=%d\n",
err);
- goto fail_add_rule;
+ goto out;
}
roce->rule = rule;
@@ -84,12 +174,30 @@ ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev,
roce->roce_miss.rule = rule;
+ if (!is_mpv_slave)
+ goto out;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ dst.ft = roce->ft_rdma;
+ rule = mlx5_add_flow_rules(roce->nic_master_ft, NULL, &flow_act, &dst,
+ 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Fail to add RX RoCE IPsec rule for alias err=%d\n",
+ err);
+ goto fail_add_nic_master_rule;
+ }
+ roce->nic_master_rule = rule;
+
kvfree(spec);
return 0;
+fail_add_nic_master_rule:
+ mlx5_del_flow_rules(roce->roce_miss.rule);
fail_add_default_rule:
mlx5_del_flow_rules(roce->rule);
-fail_add_rule:
+out:
kvfree(spec);
return err;
}
@@ -120,25 +228,373 @@ out:
return err;
}
-void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce)
+static int ipsec_fs_roce_tx_mpv_rule_setup(struct mlx5_core_dev *mdev,
+ struct mlx5_ipsec_tx_roce *roce,
+ struct mlx5_flow_table *pol_ft)
{
+ struct mlx5_flow_destination dst = {};
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.source_vhca_port);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters.source_vhca_port,
+ MLX5_CAP_GEN(mdev, native_port_num));
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ dst.ft = roce->goto_alias_ft;
+ rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Fail to add TX RoCE IPsec rule err=%d\n",
+ err);
+ goto out;
+ }
+ roce->rule = rule;
+
+ /* No need for miss rule, since on miss we go to next PRIO, in which
+ * if master is configured, he will catch the traffic to go to his
+ * encryption table.
+ */
+
+out:
+ kvfree(spec);
+ return err;
+}
+
+#define MLX5_TX_ROCE_GROUP_SIZE BIT(0)
+#define MLX5_IPSEC_RDMA_TX_FT_LEVEL 0
+#define MLX5_IPSEC_NIC_GOTO_ALIAS_FT_LEVEL 3 /* Since last used level in NIC ipsec is 2 */
+
+static int ipsec_fs_roce_tx_mpv_create_ft(struct mlx5_core_dev *mdev,
+ struct mlx5_ipsec_tx_roce *roce,
+ struct mlx5_flow_table *pol_ft,
+ struct mlx5e_priv *peer_priv,
+ bool from_event)
+{
+ struct mlx5_flow_namespace *roce_ns, *nic_ns;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table next_ft;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ roce_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC);
+ if (!roce_ns)
+ return -EOPNOTSUPP;
+
+ nic_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
+ if (!nic_ns)
+ return -EOPNOTSUPP;
+
+ err = ipsec_fs_create_aliased_ft(mdev, peer_priv->mdev, pol_ft, &roce->alias_id, roce->key,
+ from_event);
+ if (err)
+ return err;
+
+ next_ft.id = roce->alias_id;
+ ft_attr.max_fte = 1;
+ ft_attr.next_ft = &next_ft;
+ ft_attr.level = MLX5_IPSEC_NIC_GOTO_ALIAS_FT_LEVEL;
+ ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
+ ft = mlx5_create_flow_table(nic_ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec goto alias ft err=%d\n", err);
+ goto destroy_alias;
+ }
+
+ roce->goto_alias_ft = ft;
+
+ memset(&ft_attr, 0, sizeof(ft_attr));
+ ft_attr.max_fte = 1;
+ ft_attr.level = MLX5_IPSEC_RDMA_TX_FT_LEVEL;
+ ft = mlx5_create_flow_table(roce_ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec tx ft err=%d\n", err);
+ goto destroy_alias_ft;
+ }
+
+ roce->ft = ft;
+
+ return 0;
+
+destroy_alias_ft:
+ mlx5_destroy_flow_table(roce->goto_alias_ft);
+destroy_alias:
+ mlx5_cmd_alias_obj_destroy(peer_priv->mdev, roce->alias_id,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
+ return err;
+}
+
+static int ipsec_fs_roce_tx_mpv_create_group_rules(struct mlx5_core_dev *mdev,
+ struct mlx5_ipsec_tx_roce *roce,
+ struct mlx5_flow_table *pol_ft,
+ u32 *in)
+{
+ struct mlx5_flow_group *g;
+ int ix = 0;
+ int err;
+ u8 *mc;
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters.source_vhca_port);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS);
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5_TX_ROCE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ g = mlx5_create_flow_group(roce->ft, in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec tx group err=%d\n", err);
+ return err;
+ }
+ roce->g = g;
+
+ err = ipsec_fs_roce_tx_mpv_rule_setup(mdev, roce, pol_ft);
+ if (err) {
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec tx rules err=%d\n", err);
+ goto destroy_group;
+ }
+
+ return 0;
+
+destroy_group:
+ mlx5_destroy_flow_group(roce->g);
+ return err;
+}
+
+static int ipsec_fs_roce_tx_mpv_create(struct mlx5_core_dev *mdev,
+ struct mlx5_ipsec_fs *ipsec_roce,
+ struct mlx5_flow_table *pol_ft,
+ u32 *in, bool from_event)
+{
+ struct mlx5_devcom_comp_dev *tmp = NULL;
+ struct mlx5_ipsec_tx_roce *roce;
+ struct mlx5e_priv *peer_priv;
+ int err;
+
+ if (!mlx5_devcom_for_each_peer_begin(*ipsec_roce->devcom))
+ return -EOPNOTSUPP;
+
+ peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp);
+ if (!peer_priv) {
+ err = -EOPNOTSUPP;
+ goto release_peer;
+ }
+
+ roce = &ipsec_roce->tx;
+
+ err = ipsec_fs_roce_tx_mpv_create_ft(mdev, roce, pol_ft, peer_priv, from_event);
+ if (err) {
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec tables err=%d\n", err);
+ goto release_peer;
+ }
+
+ err = ipsec_fs_roce_tx_mpv_create_group_rules(mdev, roce, pol_ft, in);
+ if (err) {
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec tx group/rule err=%d\n", err);
+ goto destroy_tables;
+ }
+
+ mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
+ return 0;
+
+destroy_tables:
+ mlx5_destroy_flow_table(roce->ft);
+ mlx5_destroy_flow_table(roce->goto_alias_ft);
+ mlx5_cmd_alias_obj_destroy(peer_priv->mdev, roce->alias_id,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
+release_peer:
+ mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
+ return err;
+}
+
+static void roce_rx_mpv_destroy_tables(struct mlx5_core_dev *mdev, struct mlx5_ipsec_rx_roce *roce)
+{
+ mlx5_destroy_flow_table(roce->goto_alias_ft);
+ mlx5_cmd_alias_obj_destroy(mdev, roce->alias_id,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
+ mlx5_destroy_flow_group(roce->nic_master_group);
+ mlx5_destroy_flow_table(roce->nic_master_ft);
+}
+
+#define MLX5_RX_ROCE_GROUP_SIZE BIT(0)
+#define MLX5_IPSEC_RX_IPV4_FT_LEVEL 3
+#define MLX5_IPSEC_RX_IPV6_FT_LEVEL 2
+
+static int ipsec_fs_roce_rx_mpv_create(struct mlx5_core_dev *mdev,
+ struct mlx5_ipsec_fs *ipsec_roce,
+ struct mlx5_flow_namespace *ns,
+ u32 family, u32 level, u32 prio)
+{
+ struct mlx5_flow_namespace *roce_ns, *nic_ns;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_devcom_comp_dev *tmp = NULL;
+ struct mlx5_ipsec_rx_roce *roce;
+ struct mlx5_flow_table next_ft;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *g;
+ struct mlx5e_priv *peer_priv;
+ int ix = 0;
+ u32 *in;
+ int err;
+
+ roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx :
+ &ipsec_roce->ipv6_rx;
+
+ if (!mlx5_devcom_for_each_peer_begin(*ipsec_roce->devcom))
+ return -EOPNOTSUPP;
+
+ peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp);
+ if (!peer_priv) {
+ err = -EOPNOTSUPP;
+ goto release_peer;
+ }
+
+ roce_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC);
+ if (!roce_ns) {
+ err = -EOPNOTSUPP;
+ goto release_peer;
+ }
+
+ nic_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
+ if (!nic_ns) {
+ err = -EOPNOTSUPP;
+ goto release_peer;
+ }
+
+ in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto release_peer;
+ }
+
+ ft_attr.level = (family == AF_INET) ? MLX5_IPSEC_RX_IPV4_FT_LEVEL :
+ MLX5_IPSEC_RX_IPV6_FT_LEVEL;
+ ft_attr.max_fte = 1;
+ ft = mlx5_create_flow_table(roce_ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at rdma master err=%d\n", err);
+ goto free_in;
+ }
+
+ roce->ft_rdma = ft;
+
+ ft_attr.max_fte = 1;
+ ft_attr.prio = prio;
+ ft_attr.level = level + 2;
+ ft = mlx5_create_flow_table(nic_ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at NIC master err=%d\n", err);
+ goto destroy_ft_rdma;
+ }
+ roce->nic_master_ft = ft;
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += 1;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ g = mlx5_create_flow_group(roce->nic_master_ft, in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec rx group aliased err=%d\n", err);
+ goto destroy_nic_master_ft;
+ }
+ roce->nic_master_group = g;
+
+ err = ipsec_fs_create_aliased_ft(peer_priv->mdev, mdev, roce->nic_master_ft,
+ &roce->alias_id, roce->key, false);
+ if (err) {
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec rx alias FT err=%d\n", err);
+ goto destroy_group;
+ }
+
+ next_ft.id = roce->alias_id;
+ ft_attr.max_fte = 1;
+ ft_attr.prio = prio;
+ ft_attr.level = roce->ft->level + 1;
+ ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.next_ft = &next_ft;
+ ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at NIC slave err=%d\n", err);
+ goto destroy_alias;
+ }
+ roce->goto_alias_ft = ft;
+
+ kvfree(in);
+ mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
+ return 0;
+
+destroy_alias:
+ mlx5_cmd_alias_obj_destroy(mdev, roce->alias_id,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
+destroy_group:
+ mlx5_destroy_flow_group(roce->nic_master_group);
+destroy_nic_master_ft:
+ mlx5_destroy_flow_table(roce->nic_master_ft);
+destroy_ft_rdma:
+ mlx5_destroy_flow_table(roce->ft_rdma);
+free_in:
+ kvfree(in);
+release_peer:
+ mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
+ return err;
+}
+
+void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce,
+ struct mlx5_core_dev *mdev)
+{
+ struct mlx5_devcom_comp_dev *tmp = NULL;
struct mlx5_ipsec_tx_roce *tx_roce;
+ struct mlx5e_priv *peer_priv;
if (!ipsec_roce)
return;
tx_roce = &ipsec_roce->tx;
+ if (!tx_roce->ft)
+ return; /* Incase RoCE was cleaned from MPV event flow */
+
mlx5_del_flow_rules(tx_roce->rule);
mlx5_destroy_flow_group(tx_roce->g);
mlx5_destroy_flow_table(tx_roce->ft);
-}
-#define MLX5_TX_ROCE_GROUP_SIZE BIT(0)
+ if (!mlx5_core_is_mp_slave(mdev))
+ return;
+
+ if (!mlx5_devcom_for_each_peer_begin(*ipsec_roce->devcom))
+ return;
+
+ peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp);
+ if (!peer_priv) {
+ mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
+ return;
+ }
+
+ mlx5_destroy_flow_table(tx_roce->goto_alias_ft);
+ mlx5_cmd_alias_obj_destroy(peer_priv->mdev, tx_roce->alias_id,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
+ mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
+ tx_roce->ft = NULL;
+}
int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev,
struct mlx5_ipsec_fs *ipsec_roce,
- struct mlx5_flow_table *pol_ft)
+ struct mlx5_flow_table *pol_ft,
+ bool from_event)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_ipsec_tx_roce *roce;
@@ -157,7 +613,14 @@ int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev,
if (!in)
return -ENOMEM;
+ if (mlx5_core_is_mp_slave(mdev)) {
+ err = ipsec_fs_roce_tx_mpv_create(mdev, ipsec_roce, pol_ft, in, from_event);
+ goto free_in;
+ }
+
ft_attr.max_fte = 1;
+ ft_attr.prio = 1;
+ ft_attr.level = MLX5_IPSEC_RDMA_TX_FT_LEVEL;
ft = mlx5_create_flow_table(roce->ns, &ft_attr);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
@@ -209,8 +672,10 @@ struct mlx5_flow_table *mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs *ipsec_ro
return rx_roce->ft;
}
-void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, u32 family)
+void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, u32 family,
+ struct mlx5_core_dev *mdev)
{
+ bool is_mpv_slave = mlx5_core_is_mp_slave(mdev);
struct mlx5_ipsec_rx_roce *rx_roce;
if (!ipsec_roce)
@@ -218,23 +683,29 @@ void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, u32 family)
rx_roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx :
&ipsec_roce->ipv6_rx;
+ if (!rx_roce->ft)
+ return; /* Incase RoCE was cleaned from MPV event flow */
+ if (is_mpv_slave)
+ mlx5_del_flow_rules(rx_roce->nic_master_rule);
mlx5_del_flow_rules(rx_roce->roce_miss.rule);
mlx5_del_flow_rules(rx_roce->rule);
+ if (is_mpv_slave)
+ roce_rx_mpv_destroy_tables(mdev, rx_roce);
mlx5_destroy_flow_table(rx_roce->ft_rdma);
mlx5_destroy_flow_group(rx_roce->roce_miss.group);
mlx5_destroy_flow_group(rx_roce->g);
mlx5_destroy_flow_table(rx_roce->ft);
+ rx_roce->ft = NULL;
}
-#define MLX5_RX_ROCE_GROUP_SIZE BIT(0)
-
int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev,
struct mlx5_ipsec_fs *ipsec_roce,
struct mlx5_flow_namespace *ns,
struct mlx5_flow_destination *default_dst,
u32 family, u32 level, u32 prio)
{
+ bool is_mpv_slave = mlx5_core_is_mp_slave(mdev);
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_ipsec_rx_roce *roce;
struct mlx5_flow_table *ft;
@@ -298,18 +769,28 @@ int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev,
}
roce->roce_miss.group = g;
- memset(&ft_attr, 0, sizeof(ft_attr));
- if (family == AF_INET)
- ft_attr.level = 1;
- ft = mlx5_create_flow_table(roce->ns_rdma, &ft_attr);
- if (IS_ERR(ft)) {
- err = PTR_ERR(ft);
- mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at rdma err=%d\n", err);
- goto fail_rdma_table;
+ if (is_mpv_slave) {
+ err = ipsec_fs_roce_rx_mpv_create(mdev, ipsec_roce, ns, family, level, prio);
+ if (err) {
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec rx alias err=%d\n", err);
+ goto fail_mpv_create;
+ }
+ } else {
+ memset(&ft_attr, 0, sizeof(ft_attr));
+ if (family == AF_INET)
+ ft_attr.level = 1;
+ ft_attr.max_fte = 1;
+ ft = mlx5_create_flow_table(roce->ns_rdma, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev,
+ "Fail to create RoCE IPsec rx ft at rdma err=%d\n", err);
+ goto fail_rdma_table;
+ }
+
+ roce->ft_rdma = ft;
}
- roce->ft_rdma = ft;
-
err = ipsec_fs_roce_rx_rule_setup(mdev, default_dst, roce);
if (err) {
mlx5_core_err(mdev, "Fail to create RoCE IPsec rx rules err=%d\n", err);
@@ -320,7 +801,10 @@ int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev,
return 0;
fail_setup_rule:
+ if (is_mpv_slave)
+ roce_rx_mpv_destroy_tables(mdev, roce);
mlx5_destroy_flow_table(roce->ft_rdma);
+fail_mpv_create:
fail_rdma_table:
mlx5_destroy_flow_group(roce->roce_miss.group);
fail_mgroup:
@@ -332,12 +816,24 @@ fail_nomem:
return err;
}
+bool mlx5_ipsec_fs_is_mpv_roce_supported(struct mlx5_core_dev *mdev)
+{
+ if (!mlx5_core_mp_enabled(mdev))
+ return true;
+
+ if (ipsec_fs_create_alias_supported_one(mdev))
+ return true;
+
+ return false;
+}
+
void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce)
{
kfree(ipsec_roce);
}
-struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev)
+struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev,
+ struct mlx5_devcom_comp_dev **devcom)
{
struct mlx5_ipsec_fs *roce_ipsec;
struct mlx5_flow_namespace *ns;
@@ -363,6 +859,8 @@ struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev)
roce_ipsec->tx.ns = ns;
+ roce_ipsec->devcom = devcom;
+
return roce_ipsec;
err_tx:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h
index 9712d705fe48..2a1af78309fe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h
@@ -4,22 +4,28 @@
#ifndef __MLX5_LIB_IPSEC_H__
#define __MLX5_LIB_IPSEC_H__
+#include "lib/devcom.h"
+
struct mlx5_ipsec_fs;
struct mlx5_flow_table *
mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs *ipsec_roce, u32 family);
void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce,
- u32 family);
+ u32 family, struct mlx5_core_dev *mdev);
int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev,
struct mlx5_ipsec_fs *ipsec_roce,
struct mlx5_flow_namespace *ns,
struct mlx5_flow_destination *default_dst,
u32 family, u32 level, u32 prio);
-void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce);
+void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce,
+ struct mlx5_core_dev *mdev);
int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev,
struct mlx5_ipsec_fs *ipsec_roce,
- struct mlx5_flow_table *pol_ft);
+ struct mlx5_flow_table *pol_ft,
+ bool from_event);
void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce);
-struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev);
+struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev,
+ struct mlx5_devcom_comp_dev **devcom);
+bool mlx5_ipsec_fs_is_mpv_roce_supported(struct mlx5_core_dev *mdev);
#endif /* __MLX5_LIB_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 15561965d2af..a17152c1cbb2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -73,6 +73,7 @@
#include "sf/sf.h"
#include "mlx5_irq.h"
#include "hwmon.h"
+#include "lag/lag.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
@@ -361,6 +362,12 @@ void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_core_uplink_netdev_event_replay);
+void mlx5_core_mp_event_replay(struct mlx5_core_dev *dev, u32 event, void *data)
+{
+ mlx5_blocking_notifier_call_chain(dev, event, data);
+}
+EXPORT_SYMBOL(mlx5_core_mp_event_replay);
+
int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
enum mlx5_cap_mode cap_mode)
{
@@ -946,6 +953,27 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev)
mlx5_pci_disable_device(dev);
}
+static void mlx5_register_hca_devcom_comp(struct mlx5_core_dev *dev)
+{
+ /* This component is use to sync adding core_dev to lag_dev and to sync
+ * changes of mlx5_adev_devices between LAG layer and other layers.
+ */
+ if (!mlx5_lag_is_supported(dev))
+ return;
+
+ dev->priv.hca_devcom_comp =
+ mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_HCA_PORTS,
+ mlx5_query_nic_system_image_guid(dev),
+ NULL, dev);
+ if (IS_ERR_OR_NULL(dev->priv.hca_devcom_comp))
+ mlx5_core_err(dev, "Failed to register devcom HCA component\n");
+}
+
+static void mlx5_unregister_hca_devcom_comp(struct mlx5_core_dev *dev)
+{
+ mlx5_devcom_unregister_component(dev->priv.hca_devcom_comp);
+}
+
static int mlx5_init_once(struct mlx5_core_dev *dev)
{
int err;
@@ -954,6 +982,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
if (IS_ERR(dev->priv.devc))
mlx5_core_warn(dev, "failed to register devcom device %ld\n",
PTR_ERR(dev->priv.devc));
+ mlx5_register_hca_devcom_comp(dev);
err = mlx5_query_board_id(dev);
if (err) {
@@ -1088,6 +1117,7 @@ err_eq_cleanup:
err_irq_cleanup:
mlx5_irq_table_cleanup(dev);
err_devcom:
+ mlx5_unregister_hca_devcom_comp(dev);
mlx5_devcom_unregister_device(dev->priv.devc);
return err;
@@ -1117,6 +1147,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev);
mlx5_irq_table_cleanup(dev);
+ mlx5_unregister_hca_devcom_comp(dev);
mlx5_devcom_unregister_device(dev->priv.devc);
}
@@ -1405,9 +1436,9 @@ err_irq_table:
static void mlx5_unload(struct mlx5_core_dev *dev)
{
+ mlx5_eswitch_disable(dev->priv.eswitch);
mlx5_devlink_traps_unregister(priv_to_devlink(dev));
mlx5_sf_dev_table_destroy(dev);
- mlx5_eswitch_disable(dev->priv.eswitch);
mlx5_sriov_detach(dev);
mlx5_lag_remove_mdev(dev);
mlx5_ec_cleanup(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 124352459c23..6b14e347d914 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -41,6 +41,7 @@
#include <linux/mlx5/cq.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/driver.h>
+#include "lib/devcom.h"
extern uint mlx5_core_debug_mask;
@@ -97,6 +98,22 @@ do { \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
+#define ACCESS_KEY_LEN 32
+#define FT_ID_FT_TYPE_OFFSET 24
+
+struct mlx5_cmd_allow_other_vhca_access_attr {
+ u16 obj_type;
+ u32 obj_id;
+ u8 access_key[ACCESS_KEY_LEN];
+};
+
+struct mlx5_cmd_alias_obj_create_attr {
+ u32 obj_id;
+ u16 vhca_id;
+ u16 obj_type;
+ u8 access_key[ACCESS_KEY_LEN];
+};
+
static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...)
{
struct device *device = dev->device;
@@ -143,6 +160,8 @@ enum mlx5_semaphore_space_address {
#define MLX5_DEFAULT_PROF 2
#define MLX5_SF_PROF 3
+#define MLX5_NUM_FW_CMD_THREADS 8
+#define MLX5_DEV_MAX_WQS MLX5_NUM_FW_CMD_THREADS
static inline int mlx5_flexible_inlen(struct mlx5_core_dev *dev, size_t fixed,
size_t item_size, size_t num_items,
@@ -248,10 +267,6 @@ int mlx5_register_device(struct mlx5_core_dev *dev);
void mlx5_unregister_device(struct mlx5_core_dev *dev);
void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev);
bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev);
-struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev);
-void mlx5_dev_list_lock(void);
-void mlx5_dev_list_unlock(void);
-int mlx5_dev_list_trylock(void);
void mlx5_fw_reporters_create(struct mlx5_core_dev *dev);
int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size);
@@ -290,14 +305,12 @@ static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev)
{
int ret;
- mlx5_dev_list_lock();
+ mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp);
ret = mlx5_rescan_drivers_locked(dev);
- mlx5_dev_list_unlock();
+ mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp);
return ret;
}
-void mlx5_lag_update(struct mlx5_core_dev *dev);
-
enum {
MLX5_NIC_IFC_FULL = 0,
MLX5_NIC_IFC_DISABLED = 1,
@@ -331,7 +344,6 @@ int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap
#define mlx5_vport_get_other_func_general_cap(dev, vport, out) \
mlx5_vport_get_other_func_cap(dev, vport, out, MLX5_CAP_GENERAL)
-void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work);
static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
@@ -343,6 +355,12 @@ bool mlx5_eth_supported(struct mlx5_core_dev *dev);
bool mlx5_rdma_supported(struct mlx5_core_dev *dev);
bool mlx5_vnet_supported(struct mlx5_core_dev *dev);
bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev);
+int mlx5_cmd_allow_other_vhca_access(struct mlx5_core_dev *dev,
+ struct mlx5_cmd_allow_other_vhca_access_attr *attr);
+int mlx5_cmd_alias_obj_create(struct mlx5_core_dev *dev,
+ struct mlx5_cmd_alias_obj_create_attr *alias_attr,
+ u32 *obj_id);
+int mlx5_cmd_alias_obj_destroy(struct mlx5_core_dev *dev, u32 obj_id, u16 obj_type);
static inline u16 mlx5_core_ec_vf_vport_base(const struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index 05e148db9889..c93492b67788 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -14,17 +14,22 @@
struct mlx5_sf_dev_table {
struct xarray devices;
- unsigned int max_sfs;
phys_addr_t base_address;
u64 sf_bar_length;
struct notifier_block nb;
- struct mutex table_lock; /* Serializes sf life cycle and vhca state change handler */
struct workqueue_struct *active_wq;
struct work_struct work;
u8 stop_active_wq:1;
struct mlx5_core_dev *dev;
};
+struct mlx5_sf_dev_active_work_ctx {
+ struct work_struct work;
+ struct mlx5_vhca_state_event event;
+ struct mlx5_sf_dev_table *table;
+ int sf_index;
+};
+
static bool mlx5_sf_dev_supported(const struct mlx5_core_dev *dev)
{
return MLX5_CAP_GEN(dev, sf) && mlx5_vhca_event_supported(dev);
@@ -110,12 +115,6 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u16 fn_id,
sf_dev->parent_mdev = dev;
sf_dev->fn_id = fn_id;
- if (!table->max_sfs) {
- mlx5_adev_idx_free(id);
- kfree(sf_dev);
- err = -EOPNOTSUPP;
- goto add_err;
- }
sf_dev->bar_base_addr = table->base_address + (sf_index * table->sf_bar_length);
trace_mlx5_sf_dev_add(dev, sf_dev, id);
@@ -172,7 +171,6 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
return 0;
sf_index = event->function_id - base_id;
- mutex_lock(&table->table_lock);
sf_dev = xa_load(&table->devices, sf_index);
switch (event->new_vhca_state) {
case MLX5_VHCA_STATE_INVALID:
@@ -196,7 +194,6 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
default:
break;
}
- mutex_unlock(&table->table_lock);
return 0;
}
@@ -221,15 +218,44 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table)
return 0;
}
-static void mlx5_sf_dev_add_active_work(struct work_struct *work)
+static void mlx5_sf_dev_add_active_work(struct work_struct *_work)
{
- struct mlx5_sf_dev_table *table = container_of(work, struct mlx5_sf_dev_table, work);
+ struct mlx5_sf_dev_active_work_ctx *work_ctx;
+
+ work_ctx = container_of(_work, struct mlx5_sf_dev_active_work_ctx, work);
+ if (work_ctx->table->stop_active_wq)
+ goto out;
+ /* Don't probe device which is already probe */
+ if (!xa_load(&work_ctx->table->devices, work_ctx->sf_index))
+ mlx5_sf_dev_add(work_ctx->table->dev, work_ctx->sf_index,
+ work_ctx->event.function_id, work_ctx->event.sw_function_id);
+ /* There is a race where SF got inactive after the query
+ * above. e.g.: the query returns that the state of the
+ * SF is active, and after that the eswitch manager set it to
+ * inactive.
+ * This case cannot be managed in SW, since the probing of the
+ * SF is on one system, and the inactivation is on a different
+ * system.
+ * If the inactive is done after the SF perform init_hca(),
+ * the SF will fully probe and then removed. If it was
+ * done before init_hca(), the SF probe will fail.
+ */
+out:
+ kfree(work_ctx);
+}
+
+/* In case SFs are generated externally, probe active SFs */
+static void mlx5_sf_dev_queue_active_works(struct work_struct *_work)
+{
+ struct mlx5_sf_dev_table *table = container_of(_work, struct mlx5_sf_dev_table, work);
u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
+ struct mlx5_sf_dev_active_work_ctx *work_ctx;
struct mlx5_core_dev *dev = table->dev;
u16 max_functions;
u16 function_id;
u16 sw_func_id;
int err = 0;
+ int wq_idx;
u8 state;
int i;
@@ -249,27 +275,22 @@ static void mlx5_sf_dev_add_active_work(struct work_struct *work)
continue;
sw_func_id = MLX5_GET(query_vhca_state_out, out, vhca_state_context.sw_function_id);
- mutex_lock(&table->table_lock);
- /* Don't probe device which is already probe */
- if (!xa_load(&table->devices, i))
- mlx5_sf_dev_add(dev, i, function_id, sw_func_id);
- /* There is a race where SF got inactive after the query
- * above. e.g.: the query returns that the state of the
- * SF is active, and after that the eswitch manager set it to
- * inactive.
- * This case cannot be managed in SW, since the probing of the
- * SF is on one system, and the inactivation is on a different
- * system.
- * If the inactive is done after the SF perform init_hca(),
- * the SF will fully probe and then removed. If it was
- * done before init_hca(), the SF probe will fail.
- */
- mutex_unlock(&table->table_lock);
+ work_ctx = kzalloc(sizeof(*work_ctx), GFP_KERNEL);
+ if (!work_ctx)
+ return;
+
+ INIT_WORK(&work_ctx->work, &mlx5_sf_dev_add_active_work);
+ work_ctx->event.function_id = function_id;
+ work_ctx->event.sw_function_id = sw_func_id;
+ work_ctx->table = table;
+ work_ctx->sf_index = i;
+ wq_idx = work_ctx->event.function_id % MLX5_DEV_MAX_WQS;
+ mlx5_vhca_events_work_enqueue(dev, wq_idx, &work_ctx->work);
}
}
/* In case SFs are generated externally, probe active SFs */
-static int mlx5_sf_dev_queue_active_work(struct mlx5_sf_dev_table *table)
+static int mlx5_sf_dev_create_active_works(struct mlx5_sf_dev_table *table)
{
if (MLX5_CAP_GEN(table->dev, eswitch_manager))
return 0; /* the table is local */
@@ -280,12 +301,12 @@ static int mlx5_sf_dev_queue_active_work(struct mlx5_sf_dev_table *table)
table->active_wq = create_singlethread_workqueue("mlx5_active_sf");
if (!table->active_wq)
return -ENOMEM;
- INIT_WORK(&table->work, &mlx5_sf_dev_add_active_work);
+ INIT_WORK(&table->work, &mlx5_sf_dev_queue_active_works);
queue_work(table->active_wq, &table->work);
return 0;
}
-static void mlx5_sf_dev_destroy_active_work(struct mlx5_sf_dev_table *table)
+static void mlx5_sf_dev_destroy_active_works(struct mlx5_sf_dev_table *table)
{
if (table->active_wq) {
table->stop_active_wq = true;
@@ -296,7 +317,6 @@ static void mlx5_sf_dev_destroy_active_work(struct mlx5_sf_dev_table *table)
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
{
struct mlx5_sf_dev_table *table;
- unsigned int max_sfs;
int err;
if (!mlx5_sf_dev_supported(dev))
@@ -310,37 +330,30 @@ void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
table->nb.notifier_call = mlx5_sf_dev_state_change_handler;
table->dev = dev;
- if (MLX5_CAP_GEN(dev, max_num_sf))
- max_sfs = MLX5_CAP_GEN(dev, max_num_sf);
- else
- max_sfs = 1 << MLX5_CAP_GEN(dev, log_max_sf);
table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12);
table->base_address = pci_resource_start(dev->pdev, 2);
- table->max_sfs = max_sfs;
xa_init(&table->devices);
- mutex_init(&table->table_lock);
dev->priv.sf_dev_table = table;
err = mlx5_vhca_event_notifier_register(dev, &table->nb);
if (err)
goto vhca_err;
- err = mlx5_sf_dev_queue_active_work(table);
+ err = mlx5_sf_dev_create_active_works(table);
if (err)
goto add_active_err;
err = mlx5_sf_dev_vhca_arm_all(table);
if (err)
goto arm_err;
- mlx5_core_dbg(dev, "SF DEV: max sf devices=%d\n", max_sfs);
return;
arm_err:
- mlx5_sf_dev_destroy_active_work(table);
+ mlx5_sf_dev_destroy_active_works(table);
add_active_err:
mlx5_vhca_event_notifier_unregister(dev, &table->nb);
+ mlx5_vhca_event_work_queues_flush(dev);
vhca_err:
- table->max_sfs = 0;
kfree(table);
dev->priv.sf_dev_table = NULL;
table_err:
@@ -365,9 +378,9 @@ void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
if (!table)
return;
- mlx5_sf_dev_destroy_active_work(table);
+ mlx5_sf_dev_destroy_active_works(table);
mlx5_vhca_event_notifier_unregister(dev, &table->nb);
- mutex_destroy(&table->table_lock);
+ mlx5_vhca_event_work_queues_flush(dev);
/* Now that event handler is not running, it is safe to destroy
* the sf device without race.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
index 2a66a427ef15..b99131e95e37 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
@@ -19,6 +19,12 @@ struct mlx5_sf_dev {
u16 fn_id;
};
+struct mlx5_sf_peer_devlink_event_ctx {
+ u16 fn_id;
+ struct devlink *devlink;
+ int err;
+};
+
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev);
void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index 8fe82f1191bb..169c2c68ed5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -8,6 +8,20 @@
#include "dev.h"
#include "devlink.h"
+static int mlx5_core_peer_devlink_set(struct mlx5_sf_dev *sf_dev, struct devlink *devlink)
+{
+ struct mlx5_sf_peer_devlink_event_ctx event_ctx = {
+ .fn_id = sf_dev->fn_id,
+ .devlink = devlink,
+ };
+ int ret;
+
+ ret = mlx5_blocking_notifier_call_chain(sf_dev->parent_mdev,
+ MLX5_DRIVER_EVENT_SF_PEER_DEVLINK,
+ &event_ctx);
+ return ret == NOTIFY_OK ? event_ctx.err : 0;
+}
+
static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
@@ -54,9 +68,21 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
mlx5_core_warn(mdev, "mlx5_init_one err=%d\n", err);
goto init_one_err;
}
+
+ err = mlx5_core_peer_devlink_set(sf_dev, devlink);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_core_peer_devlink_set err=%d\n", err);
+ goto peer_devlink_set_err;
+ }
+
devlink_register(devlink);
return 0;
+peer_devlink_set_err:
+ if (mlx5_dev_is_lightweight(sf_dev->mdev))
+ mlx5_uninit_one_light(sf_dev->mdev);
+ else
+ mlx5_uninit_one(sf_dev->mdev);
init_one_err:
iounmap(mdev->iseg);
remap_err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index e34a8f88c518..6c11e075cab0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -20,43 +20,36 @@ struct mlx5_sf {
u16 hw_state;
};
+static void *mlx5_sf_by_dl_port(struct devlink_port *dl_port)
+{
+ struct mlx5_devlink_port *mlx5_dl_port = mlx5_devlink_port_get(dl_port);
+
+ return container_of(mlx5_dl_port, struct mlx5_sf, dl_port);
+}
+
struct mlx5_sf_table {
struct mlx5_core_dev *dev; /* To refer from notifier context. */
- struct xarray port_indices; /* port index based lookup. */
- refcount_t refcount;
- struct completion disable_complete;
+ struct xarray function_ids; /* function id based lookup. */
struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */
struct notifier_block esw_nb;
struct notifier_block vhca_nb;
+ struct notifier_block mdev_nb;
};
static struct mlx5_sf *
-mlx5_sf_lookup_by_index(struct mlx5_sf_table *table, unsigned int port_index)
-{
- return xa_load(&table->port_indices, port_index);
-}
-
-static struct mlx5_sf *
mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id)
{
- unsigned long index;
- struct mlx5_sf *sf;
-
- xa_for_each(&table->port_indices, index, sf) {
- if (sf->hw_fn_id == fn_id)
- return sf;
- }
- return NULL;
+ return xa_load(&table->function_ids, fn_id);
}
-static int mlx5_sf_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf)
+static int mlx5_sf_function_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
- return xa_insert(&table->port_indices, sf->port_index, sf, GFP_KERNEL);
+ return xa_insert(&table->function_ids, sf->hw_fn_id, sf, GFP_KERNEL);
}
-static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
+static void mlx5_sf_function_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
- xa_erase(&table->port_indices, sf->port_index);
+ xa_erase(&table->function_ids, sf->hw_fn_id);
}
static struct mlx5_sf *
@@ -93,7 +86,7 @@ mlx5_sf_alloc(struct mlx5_sf_table *table, struct mlx5_eswitch *esw,
sf->hw_state = MLX5_VHCA_STATE_ALLOCATED;
sf->controller = controller;
- err = mlx5_sf_id_insert(table, sf);
+ err = mlx5_sf_function_id_insert(table, sf);
if (err)
goto insert_err;
@@ -111,28 +104,11 @@ id_err:
static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
- mlx5_sf_id_erase(table, sf);
mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id);
trace_mlx5_sf_free(table->dev, sf->port_index, sf->controller, sf->hw_fn_id);
kfree(sf);
}
-static struct mlx5_sf_table *mlx5_sf_table_try_get(struct mlx5_core_dev *dev)
-{
- struct mlx5_sf_table *table = dev->priv.sf_table;
-
- if (!table)
- return NULL;
-
- return refcount_inc_not_zero(&table->refcount) ? table : NULL;
-}
-
-static void mlx5_sf_table_put(struct mlx5_sf_table *table)
-{
- if (refcount_dec_and_test(&table->refcount))
- complete(&table->disable_complete);
-}
-
static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state)
{
switch (hw_state) {
@@ -172,26 +148,14 @@ int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
- struct mlx5_sf_table *table;
- struct mlx5_sf *sf;
- int err = 0;
-
- table = mlx5_sf_table_try_get(dev);
- if (!table)
- return -EOPNOTSUPP;
+ struct mlx5_sf_table *table = dev->priv.sf_table;
+ struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port);
- sf = mlx5_sf_lookup_by_index(table, dl_port->index);
- if (!sf) {
- err = -EOPNOTSUPP;
- goto sf_err;
- }
mutex_lock(&table->sf_state_lock);
*state = mlx5_sf_to_devlink_state(sf->hw_state);
*opstate = mlx5_sf_to_devlink_opstate(sf->hw_state);
mutex_unlock(&table->sf_state_lock);
-sf_err:
- mlx5_sf_table_put(table);
- return err;
+ return 0;
}
static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
@@ -257,26 +221,10 @@ int mlx5_devlink_sf_port_fn_state_set(struct devlink_port *dl_port,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink);
- struct mlx5_sf_table *table;
- struct mlx5_sf *sf;
- int err;
-
- table = mlx5_sf_table_try_get(dev);
- if (!table) {
- NL_SET_ERR_MSG_MOD(extack,
- "Port state set is only supported in eswitch switchdev mode or SF ports are disabled.");
- return -EOPNOTSUPP;
- }
- sf = mlx5_sf_lookup_by_index(table, dl_port->index);
- if (!sf) {
- err = -ENODEV;
- goto out;
- }
+ struct mlx5_sf_table *table = dev->priv.sf_table;
+ struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port);
- err = mlx5_sf_state_set(dev, table, sf, state, extack);
-out:
- mlx5_sf_table_put(table);
- return err;
+ return mlx5_sf_state_set(dev, table, sf, state, extack);
}
static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
@@ -335,32 +283,45 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_
return 0;
}
+static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
+{
+ return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) &&
+ mlx5_sf_hw_table_supported(dev);
+}
+
int mlx5_devlink_sf_port_new(struct devlink *devlink,
const struct devlink_port_new_attrs *new_attr,
struct netlink_ext_ack *extack,
struct devlink_port **dl_port)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- struct mlx5_sf_table *table;
+ struct mlx5_sf_table *table = dev->priv.sf_table;
int err;
err = mlx5_sf_new_check_attr(dev, new_attr, extack);
if (err)
return err;
- table = mlx5_sf_table_try_get(dev);
- if (!table) {
+ if (!mlx5_sf_table_supported(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "SF ports are not supported.");
+ return -EOPNOTSUPP;
+ }
+
+ if (!is_mdev_switchdev_mode(dev)) {
NL_SET_ERR_MSG_MOD(extack,
- "Port add is only supported in eswitch switchdev mode or SF ports are disabled.");
+ "SF ports are only supported in eswitch switchdev mode.");
return -EOPNOTSUPP;
}
- err = mlx5_sf_add(dev, table, new_attr, extack, dl_port);
- mlx5_sf_table_put(table);
- return err;
+
+ return mlx5_sf_add(dev, table, new_attr, extack, dl_port);
}
static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
+ mutex_lock(&table->sf_state_lock);
+
+ mlx5_sf_function_id_erase(table, sf);
+
if (sf->hw_state == MLX5_VHCA_STATE_ALLOCATED) {
mlx5_sf_free(table, sf);
} else if (mlx5_sf_is_active(sf)) {
@@ -376,6 +337,16 @@ static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->controller, sf->id);
kfree(sf);
}
+
+ mutex_unlock(&table->sf_state_lock);
+}
+
+static void mlx5_sf_del(struct mlx5_sf_table *table, struct mlx5_sf *sf)
+{
+ struct mlx5_eswitch *esw = table->dev->priv.eswitch;
+
+ mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id);
+ mlx5_sf_dealloc(table, sf);
}
int mlx5_devlink_sf_port_del(struct devlink *devlink,
@@ -383,32 +354,11 @@ int mlx5_devlink_sf_port_del(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- struct mlx5_eswitch *esw = dev->priv.eswitch;
- struct mlx5_sf_table *table;
- struct mlx5_sf *sf;
- int err = 0;
-
- table = mlx5_sf_table_try_get(dev);
- if (!table) {
- NL_SET_ERR_MSG_MOD(extack,
- "Port del is only supported in eswitch switchdev mode or SF ports are disabled.");
- return -EOPNOTSUPP;
- }
- sf = mlx5_sf_lookup_by_index(table, dl_port->index);
- if (!sf) {
- err = -ENODEV;
- goto sf_err;
- }
-
- mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id);
- mlx5_sf_id_erase(table, sf);
+ struct mlx5_sf_table *table = dev->priv.sf_table;
+ struct mlx5_sf *sf = mlx5_sf_by_dl_port(dl_port);
- mutex_lock(&table->sf_state_lock);
- mlx5_sf_dealloc(table, sf);
- mutex_unlock(&table->sf_state_lock);
-sf_err:
- mlx5_sf_table_put(table);
- return err;
+ mlx5_sf_del(table, sf);
+ return 0;
}
static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state)
@@ -433,14 +383,10 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v
bool update = false;
struct mlx5_sf *sf;
- table = mlx5_sf_table_try_get(table->dev);
- if (!table)
- return 0;
-
mutex_lock(&table->sf_state_lock);
sf = mlx5_sf_lookup_by_function_id(table, event->function_id);
if (!sf)
- goto sf_err;
+ goto unlock;
/* When driver is attached or detached to a function, an event
* notifies such state change.
@@ -450,46 +396,18 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v
sf->hw_state = event->new_vhca_state;
trace_mlx5_sf_update_state(table->dev, sf->port_index, sf->controller,
sf->hw_fn_id, sf->hw_state);
-sf_err:
+unlock:
mutex_unlock(&table->sf_state_lock);
- mlx5_sf_table_put(table);
return 0;
}
-static void mlx5_sf_table_enable(struct mlx5_sf_table *table)
-{
- init_completion(&table->disable_complete);
- refcount_set(&table->refcount, 1);
-}
-
-static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table)
+static void mlx5_sf_del_all(struct mlx5_sf_table *table)
{
- struct mlx5_eswitch *esw = table->dev->priv.eswitch;
unsigned long index;
struct mlx5_sf *sf;
- /* At this point, no new user commands can start and no vhca event can
- * arrive. It is safe to destroy all user created SFs.
- */
- xa_for_each(&table->port_indices, index, sf) {
- mlx5_eswitch_unload_sf_vport(esw, sf->hw_fn_id);
- mlx5_sf_id_erase(table, sf);
- mlx5_sf_dealloc(table, sf);
- }
-}
-
-static void mlx5_sf_table_disable(struct mlx5_sf_table *table)
-{
- if (!refcount_read(&table->refcount))
- return;
-
- /* Balances with refcount_set; drop the reference so that new user cmd cannot start
- * and new vhca event handler cannot run.
- */
- mlx5_sf_table_put(table);
- wait_for_completion(&table->disable_complete);
-
- mlx5_sf_deactivate_all(table);
+ xa_for_each(&table->function_ids, index, sf)
+ mlx5_sf_del(table, sf);
}
static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data)
@@ -498,11 +416,8 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
const struct mlx5_esw_event_info *mode = data;
switch (mode->new_mode) {
- case MLX5_ESWITCH_OFFLOADS:
- mlx5_sf_table_enable(table);
- break;
case MLX5_ESWITCH_LEGACY:
- mlx5_sf_table_disable(table);
+ mlx5_sf_del_all(table);
break;
default:
break;
@@ -511,10 +426,29 @@ static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, voi
return 0;
}
-static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
+static int mlx5_sf_mdev_event(struct notifier_block *nb, unsigned long event, void *data)
{
- return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) &&
- mlx5_sf_hw_table_supported(dev);
+ struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, mdev_nb);
+ struct mlx5_sf_peer_devlink_event_ctx *event_ctx = data;
+ int ret = NOTIFY_DONE;
+ struct mlx5_sf *sf;
+
+ if (event != MLX5_DRIVER_EVENT_SF_PEER_DEVLINK)
+ return NOTIFY_DONE;
+
+
+ mutex_lock(&table->sf_state_lock);
+ sf = mlx5_sf_lookup_by_function_id(table, event_ctx->fn_id);
+ if (!sf)
+ goto out;
+
+ event_ctx->err = devl_port_fn_devlink_set(&sf->dl_port.dl_port,
+ event_ctx->devlink);
+
+ ret = NOTIFY_OK;
+out:
+ mutex_unlock(&table->sf_state_lock);
+ return ret;
}
int mlx5_sf_table_init(struct mlx5_core_dev *dev)
@@ -531,9 +465,8 @@ int mlx5_sf_table_init(struct mlx5_core_dev *dev)
mutex_init(&table->sf_state_lock);
table->dev = dev;
- xa_init(&table->port_indices);
+ xa_init(&table->function_ids);
dev->priv.sf_table = table;
- refcount_set(&table->refcount, 0);
table->esw_nb.notifier_call = mlx5_sf_esw_event;
err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb);
if (err)
@@ -544,6 +477,9 @@ int mlx5_sf_table_init(struct mlx5_core_dev *dev)
if (err)
goto vhca_err;
+ table->mdev_nb.notifier_call = mlx5_sf_mdev_event;
+ mlx5_blocking_notifier_register(dev, &table->mdev_nb);
+
return 0;
vhca_err:
@@ -562,10 +498,10 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
if (!table)
return;
+ mlx5_blocking_notifier_unregister(dev, &table->mdev_nb);
mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
- WARN_ON(refcount_read(&table->refcount));
mutex_destroy(&table->sf_state_lock);
- WARN_ON(!xa_empty(&table->port_indices));
+ WARN_ON(!xa_empty(&table->function_ids));
kfree(table);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
index d908fba968f0..cda01ba441ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
@@ -21,6 +21,15 @@ struct mlx5_vhca_event_work {
struct mlx5_vhca_state_event event;
};
+struct mlx5_vhca_event_handler {
+ struct workqueue_struct *wq;
+};
+
+struct mlx5_vhca_events {
+ struct mlx5_core_dev *dev;
+ struct mlx5_vhca_event_handler handler[MLX5_DEV_MAX_WQS];
+};
+
int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, u32 *out, u32 outlen)
{
u32 in[MLX5_ST_SZ_DW(query_vhca_state_in)] = {};
@@ -99,6 +108,11 @@ static void mlx5_vhca_state_work_handler(struct work_struct *_work)
kfree(work);
}
+void mlx5_vhca_events_work_enqueue(struct mlx5_core_dev *dev, int idx, struct work_struct *work)
+{
+ queue_work(dev->priv.vhca_events->handler[idx].wq, work);
+}
+
static int
mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, void *data)
{
@@ -106,6 +120,7 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v
mlx5_nb_cof(nb, struct mlx5_vhca_state_notifier, nb);
struct mlx5_vhca_event_work *work;
struct mlx5_eqe *eqe = data;
+ int wq_idx;
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
@@ -113,7 +128,8 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v
INIT_WORK(&work->work, &mlx5_vhca_state_work_handler);
work->notifier = notifier;
work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id);
- mlx5_events_work_enqueue(notifier->dev, &work->work);
+ wq_idx = work->event.function_id % MLX5_DEV_MAX_WQS;
+ mlx5_vhca_events_work_enqueue(notifier->dev, wq_idx, &work->work);
return NOTIFY_OK;
}
@@ -132,28 +148,75 @@ void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
{
struct mlx5_vhca_state_notifier *notifier;
+ char wq_name[MLX5_CMD_WQ_MAX_NAME];
+ struct mlx5_vhca_events *events;
+ int err, i;
if (!mlx5_vhca_event_supported(dev))
return 0;
- notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
- if (!notifier)
+ events = kzalloc(sizeof(*events), GFP_KERNEL);
+ if (!events)
return -ENOMEM;
+ events->dev = dev;
+ dev->priv.vhca_events = events;
+ for (i = 0; i < MLX5_DEV_MAX_WQS; i++) {
+ snprintf(wq_name, MLX5_CMD_WQ_MAX_NAME, "mlx5_vhca_event%d", i);
+ events->handler[i].wq = create_singlethread_workqueue(wq_name);
+ if (!events->handler[i].wq) {
+ err = -ENOMEM;
+ goto err_create_wq;
+ }
+ }
+
+ notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
+ if (!notifier) {
+ err = -ENOMEM;
+ goto err_notifier;
+ }
+
dev->priv.vhca_state_notifier = notifier;
notifier->dev = dev;
BLOCKING_INIT_NOTIFIER_HEAD(&notifier->n_head);
MLX5_NB_INIT(&notifier->nb, mlx5_vhca_state_change_notifier, VHCA_STATE_CHANGE);
return 0;
+
+err_notifier:
+err_create_wq:
+ for (--i; i >= 0; i--)
+ destroy_workqueue(events->handler[i].wq);
+ kfree(events);
+ return err;
+}
+
+void mlx5_vhca_event_work_queues_flush(struct mlx5_core_dev *dev)
+{
+ struct mlx5_vhca_events *vhca_events;
+ int i;
+
+ if (!mlx5_vhca_event_supported(dev))
+ return;
+
+ vhca_events = dev->priv.vhca_events;
+ for (i = 0; i < MLX5_DEV_MAX_WQS; i++)
+ flush_workqueue(vhca_events->handler[i].wq);
}
void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev)
{
+ struct mlx5_vhca_events *vhca_events;
+ int i;
+
if (!mlx5_vhca_event_supported(dev))
return;
kfree(dev->priv.vhca_state_notifier);
dev->priv.vhca_state_notifier = NULL;
+ vhca_events = dev->priv.vhca_events;
+ for (i = 0; i < MLX5_DEV_MAX_WQS; i++)
+ destroy_workqueue(vhca_events->handler[i].wq);
+ kvfree(vhca_events);
}
void mlx5_vhca_event_start(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
index 013cdfe90616..1725ba64f8af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
@@ -28,6 +28,9 @@ int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn
int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id);
int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
u32 *out, u32 outlen);
+void mlx5_vhca_events_work_enqueue(struct mlx5_core_dev *dev, int idx, struct work_struct *work);
+void mlx5_vhca_event_work_queues_flush(struct mlx5_core_dev *dev);
+
#else
static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 5b83da08692d..6ea88a581804 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -55,6 +55,12 @@ static const char *dr_action_id_to_str(enum mlx5dr_action_type action_id)
return action_type_to_str[action_id];
}
+static bool mlx5dr_action_supp_fwd_fdb_multi_ft(struct mlx5_core_dev *dev)
+{
+ return (MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table_limit_regc) ||
+ MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table));
+}
+
static const enum dr_action_valid_state
next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] = {
[DR_ACTION_DOMAIN_NIC_INGRESS] = {
@@ -1163,12 +1169,16 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
bool ignore_flow_level,
u32 flow_source)
{
+ struct mlx5dr_cmd_flow_destination_hw_info tmp_hw_dest;
struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
struct mlx5dr_action **ref_actions;
struct mlx5dr_action *action;
bool reformat_req = false;
+ bool is_ft_wire = false;
+ u16 num_dst_ft = 0;
u32 num_of_ref = 0;
u32 ref_act_cnt;
+ u16 last_dest;
int ret;
int i;
@@ -1210,11 +1220,22 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
break;
case DR_ACTION_TYP_FT:
+ if (num_dst_ft &&
+ !mlx5dr_action_supp_fwd_fdb_multi_ft(dmn->mdev)) {
+ mlx5dr_dbg(dmn, "multiple FT destinations not supported\n");
+ goto free_ref_actions;
+ }
+ num_dst_ft++;
hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- if (dest_action->dest_tbl->is_fw_tbl)
+ if (dest_action->dest_tbl->is_fw_tbl) {
hw_dests[i].ft_id = dest_action->dest_tbl->fw_tbl.id;
- else
+ } else {
hw_dests[i].ft_id = dest_action->dest_tbl->tbl->table_id;
+ if (dest_action->dest_tbl->is_wire_ft) {
+ is_ft_wire = true;
+ last_dest = i;
+ }
+ }
break;
default:
@@ -1223,6 +1244,16 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
}
}
+ /* In multidest, the FW does the iterator in the RX except of the last
+ * one that done in the TX.
+ * So, if one of the ft target is wire, put it at the end of the dest list.
+ */
+ if (is_ft_wire && num_dst_ft > 1) {
+ tmp_hw_dest = hw_dests[last_dest];
+ hw_dests[last_dest] = hw_dests[num_of_dests - 1];
+ hw_dests[num_of_dests - 1] = tmp_hw_dest;
+ }
+
action = dr_action_create_generic(DR_ACTION_TYP_FT);
if (!action)
goto free_ref_actions;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 6c59de3e28f6..81eff6c410ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -436,10 +436,6 @@ void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx,
- struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx);
void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
@@ -1064,6 +1060,7 @@ struct mlx5dr_action_sampler {
struct mlx5dr_action_dest_tbl {
u8 is_fw_tbl:1;
+ u8 is_wire_ft:1;
union {
struct mlx5dr_table *tbl;
struct {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 14f6df88b1f9..50c2554c9ccf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -209,10 +209,17 @@ static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain,
struct mlx5_flow_rule *dst)
{
struct mlx5_flow_table *dest_ft = dst->dest_attr.ft;
+ struct mlx5dr_action *tbl_action;
if (mlx5dr_is_fw_table(dest_ft))
return mlx5dr_action_create_dest_flow_fw_table(domain, dest_ft);
- return mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
+
+ tbl_action = mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
+ if (tbl_action)
+ tbl_action->dest_tbl->is_wire_ft =
+ dest_ft->flags & MLX5_FLOW_TABLE_UPLINK_VPORT ? 1 : 0;
+
+ return tbl_action;
}
static struct mlx5dr_action *create_range_action(struct mlx5dr_domain *domain,
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index 694de9513b9f..954ba0826c61 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -471,7 +471,7 @@ out:
return err;
}
-static int mlxbf_gige_remove(struct platform_device *pdev)
+static void mlxbf_gige_remove(struct platform_device *pdev)
{
struct mlxbf_gige *priv = platform_get_drvdata(pdev);
@@ -479,8 +479,6 @@ static int mlxbf_gige_remove(struct platform_device *pdev)
phy_disconnect(priv->netdev->phydev);
mlxbf_gige_mdio_remove(priv);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static void mlxbf_gige_shutdown(struct platform_device *pdev)
@@ -499,7 +497,7 @@ MODULE_DEVICE_TABLE(acpi, mlxbf_gige_acpi_match);
static struct platform_driver mlxbf_gige_driver = {
.probe = mlxbf_gige_probe,
- .remove = mlxbf_gige_remove,
+ .remove_new = mlxbf_gige_remove,
.shutdown = mlxbf_gige_shutdown,
.driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index 09bef04b11d1..e827c78be114 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -276,6 +276,12 @@ MLXSW_ITEM32(cmd_mbox, query_fw, fw_month, 0x14, 8, 8);
*/
MLXSW_ITEM32(cmd_mbox, query_fw, fw_day, 0x14, 0, 8);
+/* cmd_mbox_query_fw_lag_mode_support
+ * 0: CONFIG_PROFILE.lag_mode is not supported by FW
+ * 1: CONFIG_PROFILE.lag_mode is supported by FW
+ */
+MLXSW_ITEM32(cmd_mbox, query_fw, lag_mode_support, 0x18, 1, 1);
+
/* cmd_mbox_query_fw_clr_int_base_offset
* Clear Interrupt register's offset from clr_int_bar register
* in PCI address space.
@@ -659,42 +665,48 @@ MLXSW_ITEM32(cmd_mbox, config_profile,
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1);
-/* cmd_mbox_config_set_ubridge
+/* cmd_mbox_config_profile_set_ubridge
* Capability bit. Setting a bit to 1 configures the profile
* according to the mailbox contents.
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_ubridge, 0x0C, 22, 1);
-/* cmd_mbox_config_set_kvd_linear_size
+/* cmd_mbox_config_profile_set_kvd_linear_size
* Capability bit. Setting a bit to 1 configures the profile
* according to the mailbox contents.
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_linear_size, 0x0C, 24, 1);
-/* cmd_mbox_config_set_kvd_hash_single_size
+/* cmd_mbox_config_profile_set_kvd_hash_single_size
* Capability bit. Setting a bit to 1 configures the profile
* according to the mailbox contents.
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_single_size, 0x0C, 25, 1);
-/* cmd_mbox_config_set_kvd_hash_double_size
+/* cmd_mbox_config_profile_set_kvd_hash_double_size
* Capability bit. Setting a bit to 1 configures the profile
* according to the mailbox contents.
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1);
-/* cmd_mbox_config_set_cqe_version
+/* cmd_mbox_config_profile_set_cqe_version
* Capability bit. Setting a bit to 1 configures the profile
* according to the mailbox contents.
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_version, 0x08, 0, 1);
-/* cmd_mbox_config_set_cqe_time_stamp_type
+/* cmd_mbox_config_profile_set_cqe_time_stamp_type
* Capability bit. Setting a bit to 1 configures the profile
* according to the mailbox contents.
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_time_stamp_type, 0x08, 2, 1);
+/* cmd_mbox_config_profile_set_lag_mode
+ * Capability bit. Setting a bit to 1 configures the lag_mode
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_lag_mode, 0x08, 7, 1);
+
/* cmd_mbox_config_profile_max_vepa_channels
* Maximum number of VEPA channels per port (0 through 16)
* 0 - multi-channel VEPA is disabled
@@ -840,6 +852,21 @@ MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, ubridge, 0x50, 4, 1);
+enum mlxsw_cmd_mbox_config_profile_lag_mode {
+ /* FW manages PGT LAG table */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_FW,
+ /* SW manages PGT LAG table */
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW,
+};
+
+/* cmd_mbox_config_profile_lag_mode
+ * LAG mode
+ * Configured if set_lag_mode is set
+ * Supported from Spectrum-2 and above.
+ * Supported only when ubridge = 1
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, lag_mode, 0x50, 3, 1);
+
/* cmd_mbox_config_kvd_linear_size
* KVD Linear Size
* Valid for Spectrum only
@@ -847,7 +874,7 @@ MLXSW_ITEM32(cmd_mbox, config_profile, ubridge, 0x50, 4, 1);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, kvd_linear_size, 0x54, 0, 24);
-/* cmd_mbox_config_kvd_hash_single_size
+/* cmd_mbox_config_profile_kvd_hash_single_size
* KVD Hash single-entries size
* Valid for Spectrum only
* Allowed values are 128*N where N=0 or higher
@@ -856,7 +883,7 @@ MLXSW_ITEM32(cmd_mbox, config_profile, kvd_linear_size, 0x54, 0, 24);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, kvd_hash_single_size, 0x58, 0, 24);
-/* cmd_mbox_config_kvd_hash_double_size
+/* cmd_mbox_config_profile_kvd_hash_double_size
* KVD Hash double-entries size (units of single-size entries)
* Valid for Spectrum only
* Allowed values are 128*N where N=0 or higher
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 1ccf3b73ed72..f23421f038f3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -204,6 +204,13 @@ int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag)
}
EXPORT_SYMBOL(mlxsw_core_max_lag);
+enum mlxsw_cmd_mbox_config_profile_lag_mode
+mlxsw_core_lag_mode(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->bus->lag_mode(mlxsw_core->bus_priv);
+}
+EXPORT_SYMBOL(mlxsw_core_lag_mode);
+
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
{
return mlxsw_core->driver_priv;
@@ -1792,122 +1799,78 @@ static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg,
static const struct mlxsw_listener mlxsw_core_health_listener =
MLXSW_CORE_EVENTL(mlxsw_core_health_listener_func, MFDE);
-static int
+static void
mlxsw_core_health_fw_fatal_dump_fatal_cause(const char *mfde_pl,
struct devlink_fmsg *fmsg)
{
u32 val, tile_v;
- int err;
val = mlxsw_reg_mfde_fatal_cause_id_get(mfde_pl);
- err = devlink_fmsg_u32_pair_put(fmsg, "cause_id", val);
- if (err)
- return err;
+ devlink_fmsg_u32_pair_put(fmsg, "cause_id", val);
tile_v = mlxsw_reg_mfde_fatal_cause_tile_v_get(mfde_pl);
if (tile_v) {
val = mlxsw_reg_mfde_fatal_cause_tile_index_get(mfde_pl);
- err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val);
- if (err)
- return err;
+ devlink_fmsg_u8_pair_put(fmsg, "tile_index", val);
}
-
- return 0;
}
-static int
+static void
mlxsw_core_health_fw_fatal_dump_fw_assert(const char *mfde_pl,
struct devlink_fmsg *fmsg)
{
u32 val, tile_v;
- int err;
val = mlxsw_reg_mfde_fw_assert_var0_get(mfde_pl);
- err = devlink_fmsg_u32_pair_put(fmsg, "var0", val);
- if (err)
- return err;
+ devlink_fmsg_u32_pair_put(fmsg, "var0", val);
val = mlxsw_reg_mfde_fw_assert_var1_get(mfde_pl);
- err = devlink_fmsg_u32_pair_put(fmsg, "var1", val);
- if (err)
- return err;
+ devlink_fmsg_u32_pair_put(fmsg, "var1", val);
val = mlxsw_reg_mfde_fw_assert_var2_get(mfde_pl);
- err = devlink_fmsg_u32_pair_put(fmsg, "var2", val);
- if (err)
- return err;
+ devlink_fmsg_u32_pair_put(fmsg, "var2", val);
val = mlxsw_reg_mfde_fw_assert_var3_get(mfde_pl);
- err = devlink_fmsg_u32_pair_put(fmsg, "var3", val);
- if (err)
- return err;
+ devlink_fmsg_u32_pair_put(fmsg, "var3", val);
val = mlxsw_reg_mfde_fw_assert_var4_get(mfde_pl);
- err = devlink_fmsg_u32_pair_put(fmsg, "var4", val);
- if (err)
- return err;
+ devlink_fmsg_u32_pair_put(fmsg, "var4", val);
val = mlxsw_reg_mfde_fw_assert_existptr_get(mfde_pl);
- err = devlink_fmsg_u32_pair_put(fmsg, "existptr", val);
- if (err)
- return err;
+ devlink_fmsg_u32_pair_put(fmsg, "existptr", val);
val = mlxsw_reg_mfde_fw_assert_callra_get(mfde_pl);
- err = devlink_fmsg_u32_pair_put(fmsg, "callra", val);
- if (err)
- return err;
+ devlink_fmsg_u32_pair_put(fmsg, "callra", val);
val = mlxsw_reg_mfde_fw_assert_oe_get(mfde_pl);
- err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
- if (err)
- return err;
+ devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
tile_v = mlxsw_reg_mfde_fw_assert_tile_v_get(mfde_pl);
if (tile_v) {
val = mlxsw_reg_mfde_fw_assert_tile_index_get(mfde_pl);
- err = devlink_fmsg_u8_pair_put(fmsg, "tile_index", val);
- if (err)
- return err;
+ devlink_fmsg_u8_pair_put(fmsg, "tile_index", val);
}
val = mlxsw_reg_mfde_fw_assert_ext_synd_get(mfde_pl);
- err = devlink_fmsg_u32_pair_put(fmsg, "ext_synd", val);
- if (err)
- return err;
-
- return 0;
+ devlink_fmsg_u32_pair_put(fmsg, "ext_synd", val);
}
-static int
+static void
mlxsw_core_health_fw_fatal_dump_kvd_im_stop(const char *mfde_pl,
struct devlink_fmsg *fmsg)
{
u32 val;
- int err;
val = mlxsw_reg_mfde_kvd_im_stop_oe_get(mfde_pl);
- err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
- if (err)
- return err;
+ devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
val = mlxsw_reg_mfde_kvd_im_stop_pipes_mask_get(mfde_pl);
- return devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val);
+ devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val);
}
-static int
+static void
mlxsw_core_health_fw_fatal_dump_crspace_to(const char *mfde_pl,
struct devlink_fmsg *fmsg)
{
u32 val;
- int err;
val = mlxsw_reg_mfde_crspace_to_log_address_get(mfde_pl);
- err = devlink_fmsg_u32_pair_put(fmsg, "log_address", val);
- if (err)
- return err;
+ devlink_fmsg_u32_pair_put(fmsg, "log_address", val);
val = mlxsw_reg_mfde_crspace_to_oe_get(mfde_pl);
- err = devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
- if (err)
- return err;
+ devlink_fmsg_bool_pair_put(fmsg, "old_event", val);
val = mlxsw_reg_mfde_crspace_to_log_id_get(mfde_pl);
- err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val);
- if (err)
- return err;
+ devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val);
val = mlxsw_reg_mfde_crspace_to_log_ip_get(mfde_pl);
- err = devlink_fmsg_u64_pair_put(fmsg, "log_ip", val);
- if (err)
- return err;
-
- return 0;
+ devlink_fmsg_u64_pair_put(fmsg, "log_ip", val);
}
static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *reporter,
@@ -1918,24 +1881,17 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor
char *val_str;
u8 event_id;
u32 val;
- int err;
if (!priv_ctx)
/* User-triggered dumps are not possible */
return -EOPNOTSUPP;
val = mlxsw_reg_mfde_irisc_id_get(mfde_pl);
- err = devlink_fmsg_u8_pair_put(fmsg, "irisc_id", val);
- if (err)
- return err;
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "event");
- if (err)
- return err;
+ devlink_fmsg_u8_pair_put(fmsg, "irisc_id", val);
+ devlink_fmsg_arr_pair_nest_start(fmsg, "event");
event_id = mlxsw_reg_mfde_event_id_get(mfde_pl);
- err = devlink_fmsg_u32_pair_put(fmsg, "id", event_id);
- if (err)
- return err;
+ devlink_fmsg_u32_pair_put(fmsg, "id", event_id);
switch (event_id) {
case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO:
val_str = "CR space timeout";
@@ -1955,24 +1911,13 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor
default:
val_str = NULL;
}
- if (val_str) {
- err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str);
- if (err)
- return err;
- }
-
- err = devlink_fmsg_arr_pair_nest_end(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "severity");
- if (err)
- return err;
+ if (val_str)
+ devlink_fmsg_string_pair_put(fmsg, "desc", val_str);
+ devlink_fmsg_arr_pair_nest_end(fmsg);
+ devlink_fmsg_arr_pair_nest_start(fmsg, "severity");
val = mlxsw_reg_mfde_severity_get(mfde_pl);
- err = devlink_fmsg_u8_pair_put(fmsg, "id", val);
- if (err)
- return err;
+ devlink_fmsg_u8_pair_put(fmsg, "id", val);
switch (val) {
case MLXSW_REG_MFDE_SEVERITY_FATL:
val_str = "Fatal";
@@ -1986,15 +1931,9 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor
default:
val_str = NULL;
}
- if (val_str) {
- err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str);
- if (err)
- return err;
- }
-
- err = devlink_fmsg_arr_pair_nest_end(fmsg);
- if (err)
- return err;
+ if (val_str)
+ devlink_fmsg_string_pair_put(fmsg, "desc", val_str);
+ devlink_fmsg_arr_pair_nest_end(fmsg);
val = mlxsw_reg_mfde_method_get(mfde_pl);
switch (val) {
@@ -2007,16 +1946,11 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor
default:
val_str = NULL;
}
- if (val_str) {
- err = devlink_fmsg_string_pair_put(fmsg, "method", val_str);
- if (err)
- return err;
- }
+ if (val_str)
+ devlink_fmsg_string_pair_put(fmsg, "method", val_str);
val = mlxsw_reg_mfde_long_process_get(mfde_pl);
- err = devlink_fmsg_bool_pair_put(fmsg, "long_process", val);
- if (err)
- return err;
+ devlink_fmsg_bool_pair_put(fmsg, "long_process", val);
val = mlxsw_reg_mfde_command_type_get(mfde_pl);
switch (val) {
@@ -2032,29 +1966,25 @@ static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *repor
default:
val_str = NULL;
}
- if (val_str) {
- err = devlink_fmsg_string_pair_put(fmsg, "command_type", val_str);
- if (err)
- return err;
- }
+ if (val_str)
+ devlink_fmsg_string_pair_put(fmsg, "command_type", val_str);
val = mlxsw_reg_mfde_reg_attr_id_get(mfde_pl);
- err = devlink_fmsg_u32_pair_put(fmsg, "reg_attr_id", val);
- if (err)
- return err;
+ devlink_fmsg_u32_pair_put(fmsg, "reg_attr_id", val);
switch (event_id) {
case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO:
- return mlxsw_core_health_fw_fatal_dump_crspace_to(mfde_pl,
- fmsg);
+ mlxsw_core_health_fw_fatal_dump_crspace_to(mfde_pl, fmsg);
+ break;
case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP:
- return mlxsw_core_health_fw_fatal_dump_kvd_im_stop(mfde_pl,
- fmsg);
+ mlxsw_core_health_fw_fatal_dump_kvd_im_stop(mfde_pl, fmsg);
+ break;
case MLXSW_REG_MFDE_EVENT_ID_FW_ASSERT:
- return mlxsw_core_health_fw_fatal_dump_fw_assert(mfde_pl, fmsg);
+ mlxsw_core_health_fw_fatal_dump_fw_assert(mfde_pl, fmsg);
+ break;
case MLXSW_REG_MFDE_EVENT_ID_FATAL_CAUSE:
- return mlxsw_core_health_fw_fatal_dump_fatal_cause(mfde_pl,
- fmsg);
+ mlxsw_core_health_fw_fatal_dump_fatal_cause(mfde_pl, fmsg);
+ break;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index e5474d3e34db..764d14bd5bc0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -36,6 +36,8 @@ struct mlxsw_fw_rev;
unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core);
int mlxsw_core_max_lag(struct mlxsw_core *mlxsw_core, u16 *p_max_lag);
+enum mlxsw_cmd_mbox_config_profile_lag_mode
+mlxsw_core_lag_mode(struct mlxsw_core *mlxsw_core);
void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
@@ -335,6 +337,7 @@ struct mlxsw_config_profile {
u8 kvd_hash_single_parts;
u8 kvd_hash_double_parts;
u8 cqe_time_stamp_type;
+ bool lag_mode_prefer_sw;
struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
};
@@ -485,6 +488,7 @@ struct mlxsw_bus {
u32 (*read_frc_l)(void *bus_priv);
u32 (*read_utc_sec)(void *bus_priv);
u32 (*read_utc_nsec)(void *bus_priv);
+ enum mlxsw_cmd_mbox_config_profile_lag_mode (*lag_mode)(void *bus_priv);
u8 features;
};
@@ -624,7 +628,7 @@ struct mlxsw_linecards {
struct mlxsw_linecard_types_info *types_info;
struct list_head event_ops_list;
struct mutex event_ops_list_lock; /* Locks accesses to event ops list */
- struct mlxsw_linecard linecards[];
+ struct mlxsw_linecard linecards[] __counted_by(count);
};
static inline struct mlxsw_linecard *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index 70f9b5e85a26..0d5e6f9b466e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -32,8 +32,7 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8),
MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2),
MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6),
- MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x18, 17, 4),
- MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_LSB, 0x18, 21, 8),
+ MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER, 0x18, 17, 12),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4),
MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4),
@@ -44,6 +43,9 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_0_31, 0x3C, 4),
MLXSW_AFK_ELEMENT_INFO_U32(FDB_MISS, 0x40, 0, 1),
MLXSW_AFK_ELEMENT_INFO_U32(L4_PORT_RANGE, 0x40, 1, 16),
+ MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_0_3, 0x40, 17, 4),
+ MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_4_7, 0x40, 21, 4),
+ MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x40, 25, 4),
};
struct mlxsw_afk {
@@ -136,6 +138,7 @@ mlxsw_afk_key_info_find(struct mlxsw_afk *mlxsw_afk,
struct mlxsw_afk_picker {
DECLARE_BITMAP(element, MLXSW_AFK_ELEMENT_MAX);
+ DECLARE_BITMAP(chosen_element, MLXSW_AFK_ELEMENT_MAX);
unsigned int total;
};
@@ -206,7 +209,7 @@ static int mlxsw_afk_picker_key_info_add(struct mlxsw_afk *mlxsw_afk,
if (key_info->blocks_count == mlxsw_afk->max_blocks)
return -EINVAL;
- for_each_set_bit(element, picker[block_index].element,
+ for_each_set_bit(element, picker[block_index].chosen_element,
MLXSW_AFK_ELEMENT_MAX) {
key_info->element_to_block[element] = key_info->blocks_count;
mlxsw_afk_element_usage_add(&key_info->elusage, element);
@@ -218,11 +221,43 @@ static int mlxsw_afk_picker_key_info_add(struct mlxsw_afk *mlxsw_afk,
return 0;
}
+static int mlxsw_afk_keys_fill(struct mlxsw_afk *mlxsw_afk,
+ unsigned long *chosen_blocks_bm,
+ struct mlxsw_afk_picker *picker,
+ struct mlxsw_afk_key_info *key_info)
+{
+ int i, err;
+
+ /* First fill only key blocks with high_entropy. */
+ for_each_set_bit(i, chosen_blocks_bm, mlxsw_afk->blocks_count) {
+ if (!mlxsw_afk->blocks[i].high_entropy)
+ continue;
+
+ err = mlxsw_afk_picker_key_info_add(mlxsw_afk, picker, i,
+ key_info);
+ if (err)
+ return err;
+ __clear_bit(i, chosen_blocks_bm);
+ }
+
+ /* Fill the rest of key blocks. */
+ for_each_set_bit(i, chosen_blocks_bm, mlxsw_afk->blocks_count) {
+ err = mlxsw_afk_picker_key_info_add(mlxsw_afk, picker, i,
+ key_info);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int mlxsw_afk_picker(struct mlxsw_afk *mlxsw_afk,
struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_usage *elusage)
{
+ DECLARE_BITMAP(elusage_chosen, MLXSW_AFK_ELEMENT_MAX) = {0};
struct mlxsw_afk_picker *picker;
+ unsigned long *chosen_blocks_bm;
enum mlxsw_afk_element element;
int err;
@@ -230,6 +265,12 @@ static int mlxsw_afk_picker(struct mlxsw_afk *mlxsw_afk,
if (!picker)
return -ENOMEM;
+ chosen_blocks_bm = bitmap_zalloc(mlxsw_afk->blocks_count, GFP_KERNEL);
+ if (!chosen_blocks_bm) {
+ err = -ENOMEM;
+ goto err_bitmap_alloc;
+ }
+
/* Since the same elements could be present in multiple blocks,
* we must find out optimal block list in order to make the
* block count as low as possible.
@@ -254,15 +295,26 @@ static int mlxsw_afk_picker(struct mlxsw_afk *mlxsw_afk,
err = block_index;
goto out;
}
- err = mlxsw_afk_picker_key_info_add(mlxsw_afk, picker,
- block_index, key_info);
- if (err)
- goto out;
+
+ __set_bit(block_index, chosen_blocks_bm);
+
+ bitmap_copy(picker[block_index].chosen_element,
+ picker[block_index].element, MLXSW_AFK_ELEMENT_MAX);
+
+ bitmap_or(elusage_chosen, elusage_chosen,
+ picker[block_index].chosen_element,
+ MLXSW_AFK_ELEMENT_MAX);
+
mlxsw_afk_picker_subtract_hits(mlxsw_afk, picker, block_index);
- } while (!mlxsw_afk_key_info_elements_eq(key_info, elusage));
- err = 0;
+ } while (!bitmap_equal(elusage_chosen, elusage->usage,
+ MLXSW_AFK_ELEMENT_MAX));
+
+ err = mlxsw_afk_keys_fill(mlxsw_afk, chosen_blocks_bm, picker,
+ key_info);
out:
+ bitmap_free(chosen_blocks_bm);
+err_bitmap_alloc:
kfree(picker);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index 2eac7582c31a..98a05598178b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -33,10 +33,12 @@ enum mlxsw_afk_element {
MLXSW_AFK_ELEMENT_IP_TTL_,
MLXSW_AFK_ELEMENT_IP_ECN,
MLXSW_AFK_ELEMENT_IP_DSCP,
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER,
MLXSW_AFK_ELEMENT_FDB_MISS,
MLXSW_AFK_ELEMENT_L4_PORT_RANGE,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_3,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_4_7,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
MLXSW_AFK_ELEMENT_MAX,
};
@@ -117,6 +119,7 @@ struct mlxsw_afk_block {
u16 encoding; /* block ID */
struct mlxsw_afk_element_inst *instances;
unsigned int instances_count;
+ bool high_entropy;
};
#define MLXSW_AFK_BLOCK(_encoding, _instances) \
@@ -126,6 +129,14 @@ struct mlxsw_afk_block {
.instances_count = ARRAY_SIZE(_instances), \
}
+#define MLXSW_AFK_BLOCK_HIGH_ENTROPY(_encoding, _instances) \
+ { \
+ .encoding = _encoding, \
+ .instances = _instances, \
+ .instances_count = ARRAY_SIZE(_instances), \
+ .high_entropy = true, \
+ }
+
struct mlxsw_afk_element_usage {
DECLARE_BITMAP(usage, MLXSW_AFK_ELEMENT_MAX);
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index d637c0348fa1..53b150b7ae4e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -34,7 +34,7 @@ struct mlxsw_env {
u8 num_of_slots; /* Including the main board. */
u8 max_eeprom_len; /* Maximum module EEPROM transaction length. */
struct mutex line_cards_lock; /* Protects line cards. */
- struct mlxsw_env_line_card *line_cards[];
+ struct mlxsw_env_line_card *line_cards[] __counted_by(num_of_slots);
};
static bool __mlxsw_env_linecard_is_active(struct mlxsw_env *mlxsw_env,
@@ -775,7 +775,7 @@ static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core,
int err;
mlxsw_reg_mtbr_pack(mtbr_pl, slot_index,
- MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1);
+ MLXSW_REG_MTBR_BASE_MODULE_INDEX + module);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mtbr), mtbr_pl);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 0fd290d776ff..9c12e1feb643 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -293,7 +293,7 @@ static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev,
module = mlxsw_hwmon_attr->type_index - mlxsw_hwmon_dev->sensor_count;
mlxsw_reg_mtbr_pack(mtbr_pl, mlxsw_hwmon_dev->slot_index,
- MLXSW_REG_MTBR_BASE_MODULE_INDEX + module, 1);
+ MLXSW_REG_MTBR_BASE_MODULE_INDEX + module);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtbr), mtbr_pl);
if (err) {
dev_err(dev, "Failed to query module temperature sensor\n");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c
index af37e650a8ad..e8d6fe35bf36 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecard_dev.c
@@ -132,6 +132,7 @@ static int mlxsw_linecard_bdev_probe(struct auxiliary_device *adev,
struct mlxsw_linecard *linecard = linecard_bdev->linecard;
struct mlxsw_linecard_dev *linecard_dev;
struct devlink *devlink;
+ int err;
devlink = devlink_alloc(&mlxsw_linecard_dev_devlink_ops,
sizeof(*linecard_dev), &adev->dev);
@@ -141,8 +142,12 @@ static int mlxsw_linecard_bdev_probe(struct auxiliary_device *adev,
linecard_dev->linecard = linecard_bdev->linecard;
linecard_bdev->linecard_dev = linecard_dev;
+ err = devlink_linecard_nested_dl_set(linecard->devlink_linecard, devlink);
+ if (err) {
+ devlink_free(devlink);
+ return err;
+ }
devlink_register(devlink);
- devlink_linecard_nested_dl_set(linecard->devlink_linecard, devlink);
return 0;
}
@@ -151,9 +156,7 @@ static void mlxsw_linecard_bdev_remove(struct auxiliary_device *adev)
struct mlxsw_linecard_bdev *linecard_bdev =
container_of(adev, struct mlxsw_linecard_bdev, adev);
struct devlink *devlink = priv_to_devlink(linecard_bdev->linecard_dev);
- struct mlxsw_linecard *linecard = linecard_bdev->linecard;
- devlink_linecard_nested_dl_set(linecard->devlink_linecard, NULL);
devlink_unregister(devlink);
devlink_free(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 70d7fff24fa2..f1b48d6615f6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -31,6 +31,7 @@
/* External cooling devices, allowed for binding to mlxsw thermal zones. */
static char * const mlxsw_thermal_external_allowed_cdev[] = {
"mlxreg_fan",
+ "emc2305",
};
struct mlxsw_cooling_states {
@@ -535,7 +536,7 @@ mlxsw_thermal_modules_fini(struct mlxsw_thermal *thermal,
static int
mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
{
- char tz_name[THERMAL_NAME_LENGTH];
+ char tz_name[40];
int ret;
if (gearbox_tz->slot_index)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index d23f293e285c..1e150ce1c73a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -424,9 +424,7 @@ mlxsw_i2c_cmd(struct device *dev, u16 opcode, u32 in_mod, size_t in_mbox_size,
if (in_mbox) {
reg_size = mlxsw_i2c_get_reg_size(in_mbox);
- num = reg_size / mlxsw_i2c->block_size;
- if (reg_size % mlxsw_i2c->block_size)
- num++;
+ num = DIV_ROUND_UP(reg_size, mlxsw_i2c->block_size);
if (mutex_lock_interruptible(&mlxsw_i2c->cmd.lock) < 0) {
dev_err(&client->dev, "Could not acquire lock");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 51eea1f0529c..e4b25e187467 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -105,6 +105,8 @@ struct mlxsw_pci {
u64 free_running_clock_offset;
u64 utc_sec_offset;
u64 utc_nsec_offset;
+ bool lag_mode_support;
+ enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode;
struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
u32 doorbell_offset;
struct mlxsw_core *core;
@@ -352,14 +354,15 @@ static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
}
static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
- struct mlxsw_pci_queue_elem_info *elem_info)
+ struct mlxsw_pci_queue_elem_info *elem_info,
+ gfp_t gfp)
{
size_t buf_len = MLXSW_PORT_MAX_MTU;
char *wqe = elem_info->elem;
struct sk_buff *skb;
int err;
- skb = netdev_alloc_skb_ip_align(NULL, buf_len);
+ skb = __netdev_alloc_skb_ip_align(NULL, buf_len, gfp);
if (!skb)
return -ENOMEM;
@@ -420,7 +423,7 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
for (i = 0; i < q->count; i++) {
elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
BUG_ON(!elem_info);
- err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+ err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_KERNEL);
if (err)
goto rollback;
/* Everything is set up, ring doorbell to pass elem to HW */
@@ -640,7 +643,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
if (q->consumer_counter++ != consumer_counter_limit)
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
- err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+ err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_ATOMIC);
if (err) {
dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
goto out;
@@ -1311,6 +1314,16 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
profile->cqe_time_stamp_type);
}
+ if (profile->lag_mode_prefer_sw && mlxsw_pci->lag_mode_support) {
+ enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode =
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW;
+
+ mlxsw_cmd_mbox_config_profile_set_lag_mode_set(mbox, 1);
+ mlxsw_cmd_mbox_config_profile_lag_mode_set(mbox, lag_mode);
+ mlxsw_pci->lag_mode = lag_mode;
+ } else {
+ mlxsw_pci->lag_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_FW;
+ }
return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
}
@@ -1586,6 +1599,8 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
mlxsw_pci->utc_nsec_offset =
mlxsw_cmd_mbox_query_fw_utc_nsec_offset_get(mbox);
+ mlxsw_pci->lag_mode_support =
+ mlxsw_cmd_mbox_query_fw_lag_mode_support_get(mbox);
num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
if (err)
@@ -1618,9 +1633,8 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_config_profile;
- /* Some resources depend on unified bridge model, which is configured
- * as part of config_profile. Query the resources again to get correct
- * values.
+ /* Some resources depend on details of config_profile, such as unified
+ * bridge model. Query the resources again to get correct values.
*/
err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
if (err)
@@ -1895,6 +1909,14 @@ static u32 mlxsw_pci_read_utc_nsec(void *bus_priv)
return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_nsec_offset);
}
+static enum mlxsw_cmd_mbox_config_profile_lag_mode
+mlxsw_pci_lag_mode(void *bus_priv)
+{
+ struct mlxsw_pci *mlxsw_pci = bus_priv;
+
+ return mlxsw_pci->lag_mode;
+}
+
static const struct mlxsw_bus mlxsw_pci_bus = {
.kind = "pci",
.init = mlxsw_pci_init,
@@ -1906,6 +1928,7 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
.read_frc_l = mlxsw_pci_read_frc_l,
.read_utc_sec = mlxsw_pci_read_utc_sec,
.read_utc_nsec = mlxsw_pci_read_utc_nsec,
+ .lag_mode = mlxsw_pci_lag_mode,
.features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index ae556ddd7624..25b294fdeb3d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -38,18 +38,18 @@ static const struct mlxsw_reg_info mlxsw_reg_##_name = { \
MLXSW_REG_DEFINE(sgcr, MLXSW_REG_SGCR_ID, MLXSW_REG_SGCR_LEN);
-/* reg_sgcr_llb
- * Link Local Broadcast (Default=0)
- * When set, all Link Local packets (224.0.0.X) will be treated as broadcast
- * packets and ignore the IGMP snooping entries.
+/* reg_sgcr_lag_lookup_pgt_base
+ * Base address used for lookup in PGT table
+ * Supported when CONFIG_PROFILE.lag_mode = 1
+ * Note: when IGCR.ddd_lag_mode=0, the address shall be aligned to 8 entries.
* Access: RW
*/
-MLXSW_ITEM32(reg, sgcr, llb, 0x04, 0, 1);
+MLXSW_ITEM32(reg, sgcr, lag_lookup_pgt_base, 0x0C, 0, 16);
-static inline void mlxsw_reg_sgcr_pack(char *payload, bool llb)
+static inline void mlxsw_reg_sgcr_pack(char *payload, u16 lag_lookup_pgt_base)
{
MLXSW_REG_ZERO(sgcr, payload);
- mlxsw_reg_sgcr_llb_set(payload, !!llb);
+ mlxsw_reg_sgcr_lag_lookup_pgt_base_set(payload, lag_lookup_pgt_base);
}
/* SPAD - Switch Physical Address Register
@@ -9551,7 +9551,7 @@ MLXSW_ITEM_BIT_ARRAY(reg, mtwe, sensor_warning, 0x0, 0x10, 1);
#define MLXSW_REG_MTBR_ID 0x900F
#define MLXSW_REG_MTBR_BASE_LEN 0x10 /* base length, without records */
#define MLXSW_REG_MTBR_REC_LEN 0x04 /* record length */
-#define MLXSW_REG_MTBR_REC_MAX_COUNT 47 /* firmware limitation */
+#define MLXSW_REG_MTBR_REC_MAX_COUNT 1
#define MLXSW_REG_MTBR_LEN (MLXSW_REG_MTBR_BASE_LEN + \
MLXSW_REG_MTBR_REC_LEN * \
MLXSW_REG_MTBR_REC_MAX_COUNT)
@@ -9597,12 +9597,12 @@ MLXSW_ITEM32_INDEXED(reg, mtbr, rec_temp, MLXSW_REG_MTBR_BASE_LEN, 0, 16,
MLXSW_REG_MTBR_REC_LEN, 0x00, false);
static inline void mlxsw_reg_mtbr_pack(char *payload, u8 slot_index,
- u16 base_sensor_index, u8 num_rec)
+ u16 base_sensor_index)
{
MLXSW_REG_ZERO(mtbr, payload);
mlxsw_reg_mtbr_slot_index_set(payload, slot_index);
mlxsw_reg_mtbr_base_sensor_index_set(payload, base_sensor_index);
- mlxsw_reg_mtbr_num_rec_set(payload, num_rec);
+ mlxsw_reg_mtbr_num_rec_set(payload, 1);
}
/* Error codes from temperatute reading */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 9dbd5edff0b0..cec72d99d9c9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2692,6 +2692,63 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
kfree(mlxsw_sp->trap);
}
+static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char sgcr_pl[MLXSW_REG_SGCR_LEN];
+ u16 max_lag;
+ int err;
+
+ if (mlxsw_core_lag_mode(mlxsw_sp->core) !=
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
+ return 0;
+
+ err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
+ if (err)
+ return err;
+
+ /* In DDD mode, which we by default use, each LAG entry is 8 PGT
+ * entries. The LAG table address needs to be 8-aligned, but that ought
+ * to be the case, since the LAG table is allocated first.
+ */
+ err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &mlxsw_sp->lag_pgt_base,
+ max_lag * 8);
+ if (err)
+ return err;
+ if (WARN_ON_ONCE(mlxsw_sp->lag_pgt_base % 8)) {
+ err = -EINVAL;
+ goto err_mid_alloc_range;
+ }
+
+ mlxsw_reg_sgcr_pack(sgcr_pl, mlxsw_sp->lag_pgt_base);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sgcr), sgcr_pl);
+ if (err)
+ goto err_mid_alloc_range;
+
+ return 0;
+
+err_mid_alloc_range:
+ mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base,
+ max_lag * 8);
+ return err;
+}
+
+static void mlxsw_sp_lag_pgt_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ u16 max_lag;
+ int err;
+
+ if (mlxsw_core_lag_mode(mlxsw_sp->core) !=
+ MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
+ return;
+
+ err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
+ if (err)
+ return;
+
+ mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base,
+ max_lag * 8);
+}
+
#define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
@@ -2723,16 +2780,27 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
return -EIO;
+ err = mlxsw_sp_lag_pgt_init(mlxsw_sp);
+ if (err)
+ return err;
+
mlxsw_sp->lags = kcalloc(max_lag, sizeof(struct mlxsw_sp_upper),
GFP_KERNEL);
- if (!mlxsw_sp->lags)
- return -ENOMEM;
+ if (!mlxsw_sp->lags) {
+ err = -ENOMEM;
+ goto err_kcalloc;
+ }
return 0;
+
+err_kcalloc:
+ mlxsw_sp_lag_pgt_fini(mlxsw_sp);
+ return err;
}
static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
{
+ mlxsw_sp_lag_pgt_fini(mlxsw_sp);
kfree(mlxsw_sp->lags);
}
@@ -3113,6 +3181,15 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_pgt_init;
}
+ /* Initialize before FIDs so that the LAG table is at the start of PGT
+ * and 8-aligned without overallocation.
+ */
+ err = mlxsw_sp_lag_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
+ goto err_lag_init;
+ }
+
err = mlxsw_sp_fids_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
@@ -3143,12 +3220,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_buffers_init;
}
- err = mlxsw_sp_lag_init(mlxsw_sp);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
- goto err_lag_init;
- }
-
/* Initialize SPAN before router and switchdev, so that those components
* can call mlxsw_sp_span_respin().
*/
@@ -3300,8 +3371,6 @@ err_counter_pool_init:
err_switchdev_init:
mlxsw_sp_span_fini(mlxsw_sp);
err_span_init:
- mlxsw_sp_lag_fini(mlxsw_sp);
-err_lag_init:
mlxsw_sp_buffers_fini(mlxsw_sp);
err_buffers_init:
mlxsw_sp_devlink_traps_fini(mlxsw_sp);
@@ -3312,6 +3381,8 @@ err_traps_init:
err_policers_init:
mlxsw_sp_fids_fini(mlxsw_sp);
err_fids_init:
+ mlxsw_sp_lag_fini(mlxsw_sp);
+err_lag_init:
mlxsw_sp_pgt_fini(mlxsw_sp);
err_pgt_init:
mlxsw_sp_kvdl_fini(mlxsw_sp);
@@ -3477,12 +3548,12 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_counter_pool_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
mlxsw_sp_span_fini(mlxsw_sp);
- mlxsw_sp_lag_fini(mlxsw_sp);
mlxsw_sp_buffers_fini(mlxsw_sp);
mlxsw_sp_devlink_traps_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_policers_fini(mlxsw_sp);
mlxsw_sp_fids_fini(mlxsw_sp);
+ mlxsw_sp_lag_fini(mlxsw_sp);
mlxsw_sp_pgt_fini(mlxsw_sp);
mlxsw_sp_kvdl_fini(mlxsw_sp);
mlxsw_sp_parsing_fini(mlxsw_sp);
@@ -3526,6 +3597,7 @@ static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
},
.used_cqe_time_stamp_type = 1,
.cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
+ .lag_mode_prefer_sw = true,
};
/* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs
@@ -3553,6 +3625,7 @@ static const struct mlxsw_config_profile mlxsw_sp4_config_profile = {
},
.used_cqe_time_stamp_type = 1,
.cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
+ .lag_mode_prefer_sw = true,
};
static void
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 02ca2871b6f9..c70333b460ea 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -212,6 +212,7 @@ struct mlxsw_sp {
struct mutex ipv6_addr_ht_lock; /* Protects ipv6_addr_ht */
struct mlxsw_sp_pgt *pgt;
bool pgt_smpe_index_valid;
+ u16 lag_pgt_base;
};
struct mlxsw_sp_ptp_ops {
@@ -1480,7 +1481,7 @@ int mlxsw_sp_policer_resources_register(struct mlxsw_core *mlxsw_core);
/* spectrum_pgt.c */
int mlxsw_sp_pgt_mid_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_mid);
void mlxsw_sp_pgt_mid_free(struct mlxsw_sp *mlxsw_sp, u16 mid_base);
-int mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base,
+int mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 *mid_base,
u16 count);
void mlxsw_sp_pgt_mid_free_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base,
u16 count);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
index b1178b7a7f51..99eeafdc8d1e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c
@@ -45,8 +45,7 @@ static int mlxsw_sp2_mr_tcam_bind_group(struct mlxsw_sp *mlxsw_sp,
}
static const enum mlxsw_afk_element mlxsw_sp2_mr_tcam_usage_ipv4[] = {
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER,
MLXSW_AFK_ELEMENT_SRC_IP_0_31,
MLXSW_AFK_ELEMENT_DST_IP_0_31,
};
@@ -89,8 +88,9 @@ static void mlxsw_sp2_mr_tcam_ipv4_fini(struct mlxsw_sp2_mr_tcam *mr_tcam)
}
static const enum mlxsw_afk_element mlxsw_sp2_mr_tcam_usage_ipv6[] = {
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_3,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_4_7,
MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB,
MLXSW_AFK_ELEMENT_SRC_IP_96_127,
MLXSW_AFK_ELEMENT_SRC_IP_64_95,
MLXSW_AFK_ELEMENT_SRC_IP_32_63,
@@ -142,6 +142,8 @@ static void
mlxsw_sp2_mr_tcam_rule_parse4(struct mlxsw_sp_acl_rule_info *rulei,
struct mlxsw_sp_mr_route_key *key)
{
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER,
+ key->vrid, GENMASK(11, 0));
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
(char *) &key->source.addr4,
(char *) &key->source_mask.addr4, 4);
@@ -154,6 +156,13 @@ static void
mlxsw_sp2_mr_tcam_rule_parse6(struct mlxsw_sp_acl_rule_info *rulei,
struct mlxsw_sp_mr_route_key *key)
{
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_3,
+ key->vrid, GENMASK(3, 0));
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_4_7,
+ key->vrid >> 4, GENMASK(3, 0));
+ mlxsw_sp_acl_rulei_keymask_u32(rulei,
+ MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
+ key->vrid >> 8, GENMASK(3, 0));
mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
&key->source.addr6.s6_addr[0x0],
&key->source_mask.addr6.s6_addr[0x0], 4);
@@ -189,11 +198,6 @@ mlxsw_sp2_mr_tcam_rule_parse(struct mlxsw_sp_acl_rule *rule,
rulei = mlxsw_sp_acl_rule_rulei(rule);
rulei->priority = priority;
- mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB,
- key->vrid, GENMASK(7, 0));
- mlxsw_sp_acl_rulei_keymask_u32(rulei,
- MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
- key->vrid >> 8, GENMASK(3, 0));
switch (key->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
return mlxsw_sp2_mr_tcam_rule_parse4(rulei, key);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
index e2aced7ab454..95f63fcf4ba1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
@@ -496,7 +496,7 @@ mlxsw_sp_acl_bf_init(struct mlxsw_sp *mlxsw_sp, unsigned int num_erp_banks)
* is 2^ACL_MAX_BF_LOG
*/
bf_bank_size = 1 << MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_BF_LOG);
- bf = kzalloc(struct_size(bf, refcnt, bf_bank_size * num_erp_banks),
+ bf = kzalloc(struct_size(bf, refcnt, size_mul(bf_bank_size, num_erp_banks)),
GFP_KERNEL);
if (!bf)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
index cb746a43b24b..eaad78605602 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
@@ -171,20 +171,22 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = {
MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x04, 16, 8),
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4[] = {
- MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 24, 8),
- MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x00, 0, 3, 0, true),
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5[] = {
+ MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER, 0x04, 20, 11, 0, true),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_0_3, 0x00, 0, 4),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x04, 4),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_1[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_4_7, 0x00, 0, 4),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_64_95, 0x04, 4),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2[] = {
+ MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x00, 0, 3, 0, true),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4),
};
@@ -220,7 +222,7 @@ static const struct mlxsw_afk_block mlxsw_sp2_afk_blocks[] = {
MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0),
MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1),
MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2),
- MLXSW_AFK_BLOCK(0x3C, mlxsw_sp_afk_element_info_ipv4_4),
+ MLXSW_AFK_BLOCK(0x3D, mlxsw_sp_afk_element_info_ipv4_5),
MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0),
MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1),
MLXSW_AFK_BLOCK(0x42, mlxsw_sp_afk_element_info_ipv6_2),
@@ -322,33 +324,33 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] = {
MLXSW_AFK_ELEMENT_INST_EXT_U32(SRC_SYS_PORT, 0x04, 0, 9, -1, true), /* RX_ACL_SYSTEM_PORT */
};
-static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4b[] = {
- MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 13, 8),
- MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x04, 21, 4),
+static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_5b[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER, 0x04, 20, 12),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x00, 0, 4),
MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_96_127, 0x04, 4),
};
static const struct mlxsw_afk_block mlxsw_sp4_afk_blocks[] = {
- MLXSW_AFK_BLOCK(0x10, mlxsw_sp_afk_element_info_mac_0),
- MLXSW_AFK_BLOCK(0x11, mlxsw_sp_afk_element_info_mac_1),
+ MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x10, mlxsw_sp_afk_element_info_mac_0),
+ MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x11, mlxsw_sp_afk_element_info_mac_1),
MLXSW_AFK_BLOCK(0x12, mlxsw_sp_afk_element_info_mac_2),
MLXSW_AFK_BLOCK(0x13, mlxsw_sp_afk_element_info_mac_3),
MLXSW_AFK_BLOCK(0x14, mlxsw_sp_afk_element_info_mac_4),
- MLXSW_AFK_BLOCK(0x1A, mlxsw_sp_afk_element_info_mac_5b),
- MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0),
- MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1),
+ MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x1A, mlxsw_sp_afk_element_info_mac_5b),
+ MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x38, mlxsw_sp_afk_element_info_ipv4_0),
+ MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x39, mlxsw_sp_afk_element_info_ipv4_1),
MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2),
- MLXSW_AFK_BLOCK(0x35, mlxsw_sp_afk_element_info_ipv4_4b),
+ MLXSW_AFK_BLOCK(0x36, mlxsw_sp_afk_element_info_ipv4_5b),
MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0),
MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1),
MLXSW_AFK_BLOCK(0x47, mlxsw_sp_afk_element_info_ipv6_2b),
MLXSW_AFK_BLOCK(0x43, mlxsw_sp_afk_element_info_ipv6_3),
MLXSW_AFK_BLOCK(0x44, mlxsw_sp_afk_element_info_ipv6_4),
MLXSW_AFK_BLOCK(0x45, mlxsw_sp_afk_element_info_ipv6_5),
- MLXSW_AFK_BLOCK(0x90, mlxsw_sp_afk_element_info_l4_0),
+ MLXSW_AFK_BLOCK_HIGH_ENTROPY(0x90, mlxsw_sp_afk_element_info_l4_0),
MLXSW_AFK_BLOCK(0x92, mlxsw_sp_afk_element_info_l4_2),
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
index ee59c79156e4..50e591420bd9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -24,7 +24,7 @@ struct mlxsw_sp_counter_pool {
spinlock_t counter_pool_lock; /* Protects counter pool allocations */
atomic_t active_entries_count;
unsigned int sub_pools_count;
- struct mlxsw_sp_counter_sub_pool sub_pools[];
+ struct mlxsw_sp_counter_sub_pool sub_pools[] __counted_by(sub_pools_count);
};
static const struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 472830d07ac1..0f29e9c19411 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -619,7 +619,7 @@ static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
int i;
for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
- snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
+ snprintf(*p, ETH_GSTRING_LEN, "%.28s_%d",
mlxsw_sp_port_hw_tc_stats[i].str, tc);
*p += ETH_GSTRING_LEN;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
index 9df098474743..e954b8cd2ee8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c
@@ -321,6 +321,14 @@ mlxsw_sp_fid_family_num_fids(const struct mlxsw_sp_fid_family *fid_family)
}
static u16
+mlxsw_sp_fid_family_pgt_size(const struct mlxsw_sp_fid_family *fid_family)
+{
+ u16 num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
+
+ return num_fids * fid_family->nr_flood_tables;
+}
+
+static u16
mlxsw_sp_fid_flood_table_mid(const struct mlxsw_sp_fid_family *fid_family,
const struct mlxsw_sp_flood_table *flood_table,
u16 fid_offset)
@@ -1068,8 +1076,6 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = {
#define MLXSW_SP_FID_8021Q_MAX (VLAN_N_VID - 2)
#define MLXSW_SP_FID_RFID_MAX (11 * 1024)
-#define MLXSW_SP_FID_8021Q_PGT_BASE 0
-#define MLXSW_SP_FID_8021D_PGT_BASE (3 * MLXSW_SP_FID_8021Q_MAX)
static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021d_flood_tables[] = {
{
@@ -1434,7 +1440,6 @@ static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021q_family = {
.ops = &mlxsw_sp_fid_8021q_ops,
.flood_rsp = false,
.bridge_type = MLXSW_REG_BRIDGE_TYPE_0,
- .pgt_base = MLXSW_SP_FID_8021Q_PGT_BASE,
.smpe_index_valid = false,
};
@@ -1448,7 +1453,6 @@ static const struct mlxsw_sp_fid_family mlxsw_sp1_fid_8021d_family = {
.rif_type = MLXSW_SP_RIF_TYPE_FID,
.ops = &mlxsw_sp_fid_8021d_ops,
.bridge_type = MLXSW_REG_BRIDGE_TYPE_1,
- .pgt_base = MLXSW_SP_FID_8021D_PGT_BASE,
.smpe_index_valid = false,
};
@@ -1490,7 +1494,6 @@ static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021q_family = {
.ops = &mlxsw_sp_fid_8021q_ops,
.flood_rsp = false,
.bridge_type = MLXSW_REG_BRIDGE_TYPE_0,
- .pgt_base = MLXSW_SP_FID_8021Q_PGT_BASE,
.smpe_index_valid = true,
};
@@ -1504,7 +1507,6 @@ static const struct mlxsw_sp_fid_family mlxsw_sp2_fid_8021d_family = {
.rif_type = MLXSW_SP_RIF_TYPE_FID,
.ops = &mlxsw_sp_fid_8021d_ops,
.bridge_type = MLXSW_REG_BRIDGE_TYPE_1,
- .pgt_base = MLXSW_SP_FID_8021D_PGT_BASE,
.smpe_index_valid = true,
};
@@ -1654,14 +1656,10 @@ mlxsw_sp_fid_flood_table_init(struct mlxsw_sp_fid_family *fid_family,
enum mlxsw_sp_flood_type packet_type = flood_table->packet_type;
struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
const int *sfgc_packet_types;
- u16 num_fids, mid_base;
+ u16 mid_base;
int err, i;
mid_base = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table, 0);
- num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
- err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, mid_base, num_fids);
- if (err)
- return err;
sfgc_packet_types = mlxsw_sp_packet_type_sfgc_types[packet_type];
for (i = 0; i < MLXSW_REG_SFGC_TYPE_MAX; i++) {
@@ -1675,57 +1673,56 @@ mlxsw_sp_fid_flood_table_init(struct mlxsw_sp_fid_family *fid_family,
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfgc), sfgc_pl);
if (err)
- goto err_reg_write;
+ return err;
}
return 0;
-
-err_reg_write:
- mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mid_base, num_fids);
- return err;
-}
-
-static void
-mlxsw_sp_fid_flood_table_fini(struct mlxsw_sp_fid_family *fid_family,
- const struct mlxsw_sp_flood_table *flood_table)
-{
- struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
- u16 num_fids, mid_base;
-
- mid_base = mlxsw_sp_fid_flood_table_mid(fid_family, flood_table, 0);
- num_fids = mlxsw_sp_fid_family_num_fids(fid_family);
- mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mid_base, num_fids);
}
static int
mlxsw_sp_fid_flood_tables_init(struct mlxsw_sp_fid_family *fid_family)
{
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
+ u16 pgt_size;
+ int err;
int i;
+ if (!fid_family->nr_flood_tables)
+ return 0;
+
+ pgt_size = mlxsw_sp_fid_family_pgt_size(fid_family);
+ err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &fid_family->pgt_base,
+ pgt_size);
+ if (err)
+ return err;
+
for (i = 0; i < fid_family->nr_flood_tables; i++) {
const struct mlxsw_sp_flood_table *flood_table;
- int err;
flood_table = &fid_family->flood_tables[i];
err = mlxsw_sp_fid_flood_table_init(fid_family, flood_table);
if (err)
- return err;
+ goto err_flood_table_init;
}
return 0;
+
+err_flood_table_init:
+ mlxsw_sp_pgt_mid_free_range(mlxsw_sp, fid_family->pgt_base, pgt_size);
+ return err;
}
static void
mlxsw_sp_fid_flood_tables_fini(struct mlxsw_sp_fid_family *fid_family)
{
- int i;
+ struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp;
+ u16 pgt_size;
- for (i = 0; i < fid_family->nr_flood_tables; i++) {
- const struct mlxsw_sp_flood_table *flood_table;
+ if (!fid_family->nr_flood_tables)
+ return;
- flood_table = &fid_family->flood_tables[i];
- mlxsw_sp_fid_flood_table_fini(fid_family, flood_table);
- }
+ pgt_size = mlxsw_sp_fid_family_pgt_size(fid_family);
+ mlxsw_sp_pgt_mid_free_range(mlxsw_sp, fid_family->pgt_base, pgt_size);
}
static int mlxsw_sp_fid_family_register(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c
index 7dd3dba0fa83..4ef81bac17d6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_pgt.c
@@ -54,25 +54,15 @@ void mlxsw_sp_pgt_mid_free(struct mlxsw_sp *mlxsw_sp, u16 mid_base)
mutex_unlock(&mlxsw_sp->pgt->lock);
}
-int
-mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count)
+int mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 *p_mid_base,
+ u16 count)
{
- unsigned int idr_cursor;
+ unsigned int mid_base;
int i, err;
mutex_lock(&mlxsw_sp->pgt->lock);
- /* This function is supposed to be called several times as part of
- * driver init, in specific order. Verify that the mid_index is the
- * first free index in the idr, to be able to free the indexes in case
- * of error.
- */
- idr_cursor = idr_get_cursor(&mlxsw_sp->pgt->pgt_idr);
- if (WARN_ON(idr_cursor != mid_base)) {
- err = -EINVAL;
- goto err_idr_cursor;
- }
-
+ mid_base = idr_get_cursor(&mlxsw_sp->pgt->pgt_idr);
for (i = 0; i < count; i++) {
err = idr_alloc_cyclic(&mlxsw_sp->pgt->pgt_idr, NULL,
mid_base, mid_base + count, GFP_KERNEL);
@@ -81,12 +71,12 @@ mlxsw_sp_pgt_mid_alloc_range(struct mlxsw_sp *mlxsw_sp, u16 mid_base, u16 count)
}
mutex_unlock(&mlxsw_sp->pgt->lock);
+ *p_mid_base = mid_base;
return 0;
err_idr_alloc_cyclic:
for (i--; i >= 0; i--)
idr_remove(&mlxsw_sp->pgt->pgt_idr, mid_base + i);
-err_idr_cursor:
mutex_unlock(&mlxsw_sp->pgt->lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index debd2c466f11..82a95125d9ca 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -3107,7 +3107,7 @@ struct mlxsw_sp_nexthop_group_info {
gateway:1, /* routes using the group use a gateway */
is_resilient:1;
struct list_head list; /* member in nh_res_grp_list */
- struct mlxsw_sp_nexthop nexthops[];
+ struct mlxsw_sp_nexthop nexthops[] __counted_by(count);
};
static struct mlxsw_sp_rif *
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index b3472fb94617..af50ff9e5f26 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -31,7 +31,7 @@ struct mlxsw_sp_span {
refcount_t policer_id_base_ref_count;
atomic_t active_entries_count;
int entries_count;
- struct mlxsw_sp_span_entry entries[];
+ struct mlxsw_sp_span_entry entries[] __counted_by(entries_count);
};
struct mlxsw_sp_span_analyzed_port {
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index c11b118dc415..ddd87ef71caf 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1228,7 +1228,7 @@ err_mem_region:
return err;
}
-static int ks8842_remove(struct platform_device *pdev)
+static void ks8842_remove(struct platform_device *pdev)
{
struct net_device *netdev = platform_get_drvdata(pdev);
struct ks8842_adapter *adapter = netdev_priv(netdev);
@@ -1239,7 +1239,6 @@ static int ks8842_remove(struct platform_device *pdev)
iounmap(adapter->hw_addr);
free_netdev(netdev);
release_mem_region(iomem->start, resource_size(iomem));
- return 0;
}
@@ -1248,7 +1247,7 @@ static struct platform_driver ks8842_platform_driver = {
.name = DRV_NAME,
},
.probe = ks8842_probe,
- .remove = ks8842_remove,
+ .remove_new = ks8842_remove,
};
module_platform_driver(ks8842_platform_driver);
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 7f49042484bd..2a7f29854267 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -327,11 +327,9 @@ static int ks8851_probe_par(struct platform_device *pdev)
return ks8851_probe_common(netdev, dev, msg_enable);
}
-static int ks8851_remove_par(struct platform_device *pdev)
+static void ks8851_remove_par(struct platform_device *pdev)
{
ks8851_remove_common(&pdev->dev);
-
- return 0;
}
static const struct of_device_id ks8851_match_table[] = {
@@ -347,7 +345,7 @@ static struct platform_driver ks8851_driver = {
.pm = &ks8851_pm_ops,
},
.probe = ks8851_probe_par,
- .remove = ks8851_remove_par,
+ .remove_new = ks8851_remove_par,
};
module_platform_driver(ks8851_driver);
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 2db5949b4c7e..6961cfc55fb9 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -1047,7 +1047,8 @@ static int lan743x_ethtool_get_ts_info(struct net_device *netdev,
BIT(HWTSTAMP_TX_ON) |
BIT(HWTSTAMP_TX_ONESTEP_SYNC);
ts_info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
- BIT(HWTSTAMP_FILTER_ALL);
+ BIT(HWTSTAMP_FILTER_ALL) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index c81cdeb4d4e7..45e209a7d083 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1466,9 +1466,15 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
static void lan743x_phy_close(struct lan743x_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ struct phy_device *phydev = netdev->phydev;
phy_stop(netdev->phydev);
phy_disconnect(netdev->phydev);
+
+ /* using phydev here as phy_disconnect NULLs netdev->phydev */
+ if (phy_is_pseudo_fixed_link(phydev))
+ fixed_phy_unregister(phydev);
+
}
static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
@@ -1864,6 +1870,50 @@ static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx)
return last_head - last_tail - 1;
}
+static void lan743x_rx_cfg_b_tstamp_config(struct lan743x_adapter *adapter,
+ int rx_ts_config)
+{
+ int channel_number;
+ int index;
+ u32 data;
+
+ for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) {
+ channel_number = adapter->rx[index].channel_number;
+ data = lan743x_csr_read(adapter, RX_CFG_B(channel_number));
+ data &= RX_CFG_B_TS_MASK_;
+ data |= rx_ts_config;
+ lan743x_csr_write(adapter, RX_CFG_B(channel_number),
+ data);
+ }
+}
+
+int lan743x_rx_set_tstamp_mode(struct lan743x_adapter *adapter,
+ int rx_filter)
+{
+ u32 data;
+
+ switch (rx_filter) {
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ lan743x_rx_cfg_b_tstamp_config(adapter,
+ RX_CFG_B_TS_DESCR_EN_);
+ data = lan743x_csr_read(adapter, PTP_RX_TS_CFG);
+ data |= PTP_RX_TS_CFG_EVENT_MSGS_;
+ lan743x_csr_write(adapter, PTP_RX_TS_CFG, data);
+ break;
+ case HWTSTAMP_FILTER_NONE:
+ lan743x_rx_cfg_b_tstamp_config(adapter,
+ RX_CFG_B_TS_NONE_);
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ lan743x_rx_cfg_b_tstamp_config(adapter,
+ RX_CFG_B_TS_ALL_RX_);
+ break;
+ default:
+ return -ERANGE;
+ }
+ return 0;
+}
+
void lan743x_tx_set_timestamping_mode(struct lan743x_tx *tx,
bool enable_timestamping,
bool enable_onestep_sync)
@@ -2938,7 +2988,6 @@ static int lan743x_rx_open(struct lan743x_rx *rx)
data |= RX_CFG_B_RX_PAD_2_;
data &= ~RX_CFG_B_RX_RING_LEN_MASK_;
data |= ((rx->ring_size) & RX_CFG_B_RX_RING_LEN_MASK_);
- data |= RX_CFG_B_TS_ALL_RX_;
if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0))
data |= RX_CFG_B_RDMABL_512_;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 52609fc13ad9..b648461787d2 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -522,6 +522,8 @@
(((u32)(rx_latency)) & 0x0000FFFF)
#define PTP_CAP_INFO (0x0A60)
#define PTP_CAP_INFO_TX_TS_CNT_GET_(reg_val) (((reg_val) & 0x00000070) >> 4)
+#define PTP_RX_TS_CFG (0x0A68)
+#define PTP_RX_TS_CFG_EVENT_MSGS_ GENMASK(3, 0)
#define PTP_TX_MOD (0x0AA4)
#define PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_ (0x10000000)
@@ -657,6 +659,9 @@
#define RX_CFG_B(channel) (0xC44 + ((channel) << 6))
#define RX_CFG_B_TS_ALL_RX_ BIT(29)
+#define RX_CFG_B_TS_DESCR_EN_ BIT(28)
+#define RX_CFG_B_TS_NONE_ 0
+#define RX_CFG_B_TS_MASK_ (0xCFFFFFFF)
#define RX_CFG_B_RX_PAD_MASK_ (0x03000000)
#define RX_CFG_B_RX_PAD_0_ (0x00000000)
#define RX_CFG_B_RX_PAD_2_ (0x02000000)
@@ -991,6 +996,9 @@ struct lan743x_rx {
struct sk_buff *skb_head, *skb_tail;
};
+int lan743x_rx_set_tstamp_mode(struct lan743x_adapter *adapter,
+ int rx_filter);
+
/* SGMII Link Speed Duplex status */
enum lan743x_sgmii_lsd {
POWER_DOWN = 0,
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 39e1066ecd5f..2f04bc77a118 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -1493,6 +1493,10 @@ int lan743x_ptp_open(struct lan743x_adapter *adapter)
temp = lan743x_csr_read(adapter, PTP_TX_MOD2);
temp |= PTP_TX_MOD2_TX_PTP_CLR_UDPV4_CHKSUM_;
lan743x_csr_write(adapter, PTP_TX_MOD2, temp);
+
+ /* Default Timestamping */
+ lan743x_rx_set_tstamp_mode(adapter, HWTSTAMP_FILTER_NONE);
+
lan743x_ptp_enable(adapter);
lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_1588_);
lan743x_csr_write(adapter, PTP_INT_EN_SET,
@@ -1653,6 +1657,9 @@ static void lan743x_ptp_disable(struct lan743x_adapter *adapter)
{
struct lan743x_ptp *ptp = &adapter->ptp;
+ /* Disable Timestamping */
+ lan743x_rx_set_tstamp_mode(adapter, HWTSTAMP_FILTER_NONE);
+
mutex_lock(&ptp->command_lock);
if (!lan743x_ptp_is_enabled(adapter)) {
netif_warn(adapter, drv, adapter->netdev,
@@ -1785,6 +1792,8 @@ int lan743x_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
break;
}
+ ret = lan743x_rx_set_tstamp_mode(adapter, config.rx_filter);
+
if (!ret)
return copy_to_user(ifr->ifr_data, &config,
sizeof(config)) ? -EFAULT : 0;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 0d6e79af2410..2635ef8958c8 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -671,7 +671,6 @@ static irqreturn_t lan966x_xtr_irq_handler(int irq, void *args)
skb = netdev_alloc_skb(dev, len);
if (unlikely(!skb)) {
netdev_err(dev, "Unable to allocate sk_buff\n");
- err = -ENOMEM;
break;
}
buf_len = len - ETH_FCS_LEN;
@@ -1261,7 +1260,7 @@ cleanup_ports:
return err;
}
-static int lan966x_remove(struct platform_device *pdev)
+static void lan966x_remove(struct platform_device *pdev)
{
struct lan966x *lan966x = platform_get_drvdata(pdev);
@@ -1280,13 +1279,11 @@ static int lan966x_remove(struct platform_device *pdev)
lan966x_ptp_deinit(lan966x);
debugfs_remove_recursive(lan966x->debugfs_root);
-
- return 0;
}
static struct platform_driver lan966x_driver = {
.probe = lan966x_probe,
- .remove = lan966x_remove,
+ .remove_new = lan966x_remove,
.driver = {
.name = "lan966x-switch",
.of_match_table = lan966x_match,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
index 01f3a3a41cdb..37d2584b48a7 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
@@ -1012,8 +1012,7 @@ static void sparx5_get_sset_strings(struct net_device *ndev, u32 sset, u8 *data)
return;
for (idx = 0; idx < sparx5->num_ethtool_stats; idx++)
- strncpy(data + idx * ETH_GSTRING_LEN,
- sparx5->stats_layout[idx], ETH_GSTRING_LEN);
+ ethtool_sprintf(&data, "%s", sparx5->stats_layout[idx]);
}
static void sparx5_get_sset_data(struct net_device *ndev,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index dc9af480bfea..d1f7fc8b1b71 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -911,7 +911,7 @@ cleanup_pnode:
return err;
}
-static int mchp_sparx5_remove(struct platform_device *pdev)
+static void mchp_sparx5_remove(struct platform_device *pdev)
{
struct sparx5 *sparx5 = platform_get_drvdata(pdev);
@@ -931,8 +931,6 @@ static int mchp_sparx5_remove(struct platform_device *pdev)
/* Unregister netdevs */
sparx5_unregister_notifier_blocks(sparx5);
destroy_workqueue(sparx5->mact_queue);
-
- return 0;
}
static const struct of_device_id mchp_sparx5_match[] = {
@@ -943,7 +941,7 @@ MODULE_DEVICE_TABLE(of, mchp_sparx5_match);
static struct platform_driver mchp_sparx5_driver = {
.probe = mchp_sparx5_probe,
- .remove = mchp_sparx5_remove,
+ .remove_new = mchp_sparx5_remove,
.driver = {
.name = "sparx5-switch",
.of_match_table = mchp_sparx5_match,
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c
index c2c3397c5898..59bfbda29bb3 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c
@@ -300,7 +300,7 @@ static int vcap_show_admin(struct vcap_control *vctrl,
vcap_show_admin_info(vctrl, admin, out);
list_for_each_entry(elem, &admin->rules, list) {
vrule = vcap_decode_rule(elem);
- if (IS_ERR_OR_NULL(vrule)) {
+ if (IS_ERR(vrule)) {
ret = PTR_ERR(vrule);
break;
}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 48ea4aeeea5d..fc3d2903a80f 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -2687,8 +2687,9 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
ndev->vlan_features = ndev->features;
- ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
- NETDEV_XDP_ACT_NDO_XMIT;
+ xdp_set_features_flag(ndev, NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT);
err = register_netdev(ndev);
if (err) {
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 3da99b62797d..96dc69e7141f 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -558,7 +558,7 @@ irq_map_fail:
return ret;
}
-static int moxart_remove(struct platform_device *pdev)
+static void moxart_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
@@ -566,8 +566,6 @@ static int moxart_remove(struct platform_device *pdev)
devm_free_irq(&pdev->dev, ndev->irq, ndev);
moxart_mac_free_memory(ndev);
free_netdev(ndev);
-
- return 0;
}
static const struct of_device_id moxart_mac_match[] = {
@@ -578,7 +576,7 @@ MODULE_DEVICE_TABLE(of, moxart_mac_match);
static struct platform_driver moxart_mac_driver = {
.probe = moxart_mac_probe,
- .remove = moxart_remove,
+ .remove_new = moxart_remove,
.driver = {
.name = "moxart-ethernet",
.of_match_table = moxart_mac_match,
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 151b42465348..993212c3a7da 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -392,7 +392,7 @@ out_free_devlink:
return err;
}
-static int mscc_ocelot_remove(struct platform_device *pdev)
+static void mscc_ocelot_remove(struct platform_device *pdev)
{
struct ocelot *ocelot = platform_get_drvdata(pdev);
@@ -408,13 +408,11 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
unregister_switchdev_notifier(&ocelot_switchdev_nb);
unregister_netdevice_notifier(&ocelot_netdevice_nb);
devlink_free(ocelot->devlink);
-
- return 0;
}
static struct platform_driver mscc_ocelot_driver = {
.probe = mscc_ocelot_probe,
- .remove = mscc_ocelot_remove,
+ .remove_new = mscc_ocelot_remove,
.driver = {
.name = "ocelot-switch",
.of_match_table = mscc_ocelot_match,
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index 3f371faeb6d0..2b6e097df28f 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -227,7 +227,7 @@ MODULE_ALIAS("platform:jazzsonic");
#include "sonic.c"
-static int jazz_sonic_device_remove(struct platform_device *pdev)
+static void jazz_sonic_device_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct sonic_local* lp = netdev_priv(dev);
@@ -237,13 +237,11 @@ static int jazz_sonic_device_remove(struct platform_device *pdev)
lp->descriptors, lp->descriptors_laddr);
release_mem_region(dev->base_addr, SONIC_MEM_SIZE);
free_netdev(dev);
-
- return 0;
}
static struct platform_driver jazz_sonic_driver = {
.probe = jazz_sonic_probe,
- .remove = jazz_sonic_device_remove,
+ .remove_new = jazz_sonic_device_remove,
.driver = {
.name = jazz_sonic_string,
},
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index b16f7c830f9b..2fc63860dbdb 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -532,7 +532,7 @@ MODULE_ALIAS("platform:macsonic");
#include "sonic.c"
-static int mac_sonic_platform_remove(struct platform_device *pdev)
+static void mac_sonic_platform_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct sonic_local* lp = netdev_priv(dev);
@@ -541,13 +541,11 @@ static int mac_sonic_platform_remove(struct platform_device *pdev)
dma_free_coherent(lp->device, SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
lp->descriptors, lp->descriptors_laddr);
free_netdev(dev);
-
- return 0;
}
static struct platform_driver mac_sonic_platform_driver = {
.probe = mac_sonic_platform_probe,
- .remove = mac_sonic_platform_remove,
+ .remove_new = mac_sonic_platform_remove,
.driver = {
.name = "macsonic",
},
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index 52fef34d43f9..8943e7244310 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -249,7 +249,7 @@ MODULE_DESCRIPTION("Xtensa XT2000 SONIC ethernet driver");
#include "sonic.c"
-static int xtsonic_device_remove(struct platform_device *pdev)
+static void xtsonic_device_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct sonic_local *lp = netdev_priv(dev);
@@ -260,13 +260,11 @@ static int xtsonic_device_remove(struct platform_device *pdev)
lp->descriptors, lp->descriptors_laddr);
release_region (dev->base_addr, SONIC_MEM_SIZE);
free_netdev(dev);
-
- return 0;
}
static struct platform_driver xtsonic_driver = {
.probe = xtsonic_probe,
- .remove = xtsonic_device_remove,
+ .remove_new = xtsonic_device_remove,
.driver = {
.name = xtsonic_string,
},
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
index b1f026b81dea..cc54faca2283 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
@@ -378,6 +378,34 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x,
/* Encryption */
switch (x->props.ealgo) {
case SADB_EALG_NONE:
+ /* The xfrm descriptor for CHACAH20_POLY1305 does not set the algorithm id, which
+ * is the default value SADB_EALG_NONE. In the branch of SADB_EALG_NONE, driver
+ * uses algorithm name to identify CHACAH20_POLY1305's algorithm.
+ */
+ if (x->aead && !strcmp(x->aead->alg_name, "rfc7539esp(chacha20,poly1305)")) {
+ if (nn->pdev->device != PCI_DEVICE_ID_NFP3800) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported encryption algorithm for offload");
+ return -EINVAL;
+ }
+ if (x->aead->alg_icv_len != 128) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "ICV must be 128bit with CHACHA20_POLY1305");
+ return -EINVAL;
+ }
+
+ /* Aead->alg_key_len includes 32-bit salt */
+ if (x->aead->alg_key_len - 32 != 256) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported CHACHA20 key length");
+ return -EINVAL;
+ }
+
+ /* The CHACHA20's mode is not configured */
+ cfg->ctrl_word.hash = NFP_IPSEC_HASH_POLY1305_128;
+ cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_CHACHA20;
+ break;
+ }
+ fallthrough;
case SADB_EALG_NULL:
cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CBC;
cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_NULL;
@@ -427,6 +455,7 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x,
}
if (x->aead) {
+ int key_offset = 0;
int salt_len = 4;
key_len = DIV_ROUND_UP(x->aead->alg_key_len, BITS_PER_BYTE);
@@ -437,9 +466,19 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x,
return -EINVAL;
}
- for (i = 0; i < key_len / sizeof(cfg->ciph_key[0]) ; i++)
- cfg->ciph_key[i] = get_unaligned_be32(x->aead->alg_key +
- sizeof(cfg->ciph_key[0]) * i);
+ /* The CHACHA20's key order needs to be adjusted based on hardware design.
+ * Other's key order: {K0, K1, K2, K3, K4, K5, K6, K7}
+ * CHACHA20's key order: {K4, K5, K6, K7, K0, K1, K2, K3}
+ */
+ if (!strcmp(x->aead->alg_name, "rfc7539esp(chacha20,poly1305)"))
+ key_offset = key_len / sizeof(cfg->ciph_key[0]) >> 1;
+
+ for (i = 0; i < key_len / sizeof(cfg->ciph_key[0]); i++) {
+ int index = (i + key_offset) % (key_len / sizeof(cfg->ciph_key[0]));
+
+ cfg->ciph_key[index] = get_unaligned_be32(x->aead->alg_key +
+ sizeof(cfg->ciph_key[0]) * i);
+ }
/* Load up the salt */
cfg->aesgcm_fields.salt = get_unaligned_be32(x->aead->alg_key + key_len);
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
index 0cc026b0aefd..17381bfc15d7 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -1070,7 +1070,7 @@ static int nfp_nfd3_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_repr_inc_rx_stats(netdev, pkt_len);
}
- skb = build_skb(rxbuf->frag, true_bufsz);
+ skb = napi_build_skb(rxbuf->frag, true_bufsz);
if (unlikely(!skb)) {
nfp_nfd3_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
continue;
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
index 5d9db8c2a5b4..45be6954d5aa 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
@@ -256,7 +256,7 @@ nfp_nfd3_xsk_rx(struct nfp_net_rx_ring *rx_ring, int budget,
nfp_net_xsk_rx_ring_fill_freelist(r_vec->rx_ring);
if (xdp_redir)
- xdp_do_flush_map();
+ xdp_do_flush();
if (tx_ring->wr_ptr_add)
nfp_net_tx_xmit_more_flush(tx_ring);
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
index 33b6d74adb4b..8d78c6faefa8 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -1189,7 +1189,7 @@ static int nfp_nfdk_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_repr_inc_rx_stats(netdev, pkt_len);
}
- skb = build_skb(rxbuf->frag, true_bufsz);
+ skb = napi_build_skb(rxbuf->frag, true_bufsz);
if (unlikely(!skb)) {
nfp_nfdk_rx_drop(dp, r_vec, rx_ring, rxbuf, NULL);
continue;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
index 48a74accbbd3..77bf4198dbde 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h
@@ -18,7 +18,7 @@ struct nfp_port;
*/
struct nfp_reprs {
unsigned int num_reprs;
- struct net_device __rcu *reprs[];
+ struct net_device __rcu *reprs[] __counted_by(num_reprs);
};
/**
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index 6e044ac04917..00264af13b49 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -241,7 +241,7 @@ struct nfp_eth_table {
u64 link_modes_supp[2];
u64 link_modes_ad[2];
- } ports[];
+ } ports[] __counted_by(count);
};
struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
index ce7492a6a98f..279ea0b56955 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c
@@ -159,7 +159,7 @@ nfp_resource_acquire(struct nfp_cpp *cpp, const char *name)
if (!res)
return ERR_PTR(-ENOMEM);
- strncpy(res->name, name, NFP_RESOURCE_ENTRY_NAME_SZ);
+ strscpy(res->name, name, sizeof(res->name));
dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET,
NFP_RESOURCE_TBL_BASE,
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index ba27bbc68f85..fa1f78b03cb2 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -683,7 +683,7 @@ static int nixge_poll(struct napi_struct *napi, int budget)
if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
/* If there's more, reschedule, but clear */
nixge_dma_write_reg(priv, XAXIDMA_RX_SR_OFFSET, status);
- napi_reschedule(napi);
+ napi_schedule(napi);
} else {
/* if not, turn on RX IRQs again ... */
cr = nixge_dma_read_reg(priv, XAXIDMA_RX_CR_OFFSET);
@@ -755,8 +755,7 @@ static irqreturn_t nixge_rx_irq(int irq, void *_ndev)
cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
nixge_dma_write_reg(priv, XAXIDMA_RX_CR_OFFSET, cr);
- if (napi_schedule_prep(&priv->napi))
- __napi_schedule(&priv->napi);
+ napi_schedule(&priv->napi);
goto out;
}
if (!(status & XAXIDMA_IRQ_ALL_MASK)) {
@@ -1397,7 +1396,7 @@ free_netdev:
return err;
}
-static int nixge_remove(struct platform_device *pdev)
+static void nixge_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct nixge_priv *priv = netdev_priv(ndev);
@@ -1412,13 +1411,11 @@ static int nixge_remove(struct platform_device *pdev)
mdiobus_unregister(priv->mii_bus);
free_netdev(ndev);
-
- return 0;
}
static struct platform_driver nixge_driver = {
.probe = nixge_probe,
- .remove = nixge_remove,
+ .remove_new = nixge_remove,
.driver = {
.name = "nixge",
.of_match_table = nixge_dt_ids,
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 1a4a272f4c5c..dd3e58a1319c 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1417,7 +1417,7 @@ err_exit:
return ret;
}
-static int lpc_eth_drv_remove(struct platform_device *pdev)
+static void lpc_eth_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct netdata_local *pldat = netdev_priv(ndev);
@@ -1436,8 +1436,6 @@ static int lpc_eth_drv_remove(struct platform_device *pdev)
clk_disable_unprepare(pldat->clk);
clk_put(pldat->clk);
free_netdev(ndev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -1505,7 +1503,7 @@ MODULE_DEVICE_TABLE(of, lpc_eth_match);
static struct platform_driver lpc_eth_driver = {
.probe = lpc_eth_drv_probe,
- .remove = lpc_eth_drv_remove,
+ .remove_new = lpc_eth_drv_remove,
#ifdef CONFIG_PM
.suspend = lpc_eth_drv_suspend,
.resume = lpc_eth_drv_resume,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index aae4131f146a..1dbc3cb50b1d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -7,6 +7,7 @@
#include <linux/atomic.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
+#include <linux/skbuff.h>
#include "ionic_if.h"
#include "ionic_regs.h"
@@ -217,7 +218,7 @@ struct ionic_desc_info {
};
unsigned int bytes;
unsigned int nbufs;
- struct ionic_buf_info bufs[IONIC_MAX_FRAGS];
+ struct ionic_buf_info bufs[MAX_SKB_FRAGS + 1];
ionic_desc_cb cb;
void *cb_arg;
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 2c3e36b2dd7f..edc14730ce88 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -3831,6 +3831,18 @@ static void ionic_lif_queue_identify(struct ionic_lif *lif)
qtype, qti->max_sg_elems);
dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
qtype, qti->sg_desc_stride);
+
+ if (qti->max_sg_elems >= IONIC_MAX_FRAGS) {
+ qti->max_sg_elems = IONIC_MAX_FRAGS - 1;
+ dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to IONIC_MAX_FRAGS-1 %d\n",
+ qtype, qti->max_sg_elems);
+ }
+
+ if (qti->max_sg_elems > MAX_SKB_FRAGS) {
+ qti->max_sg_elems = MAX_SKB_FRAGS;
+ dev_dbg(ionic->dev, "limiting qtype %d max_sg_elems to MAX_SKB_FRAGS %d\n",
+ qtype, qti->max_sg_elems);
+ }
}
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 1dc79cecc5cc..835577392178 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -554,8 +554,8 @@ int ionic_identify(struct ionic *ionic)
memset(ident, 0, sizeof(*ident));
ident->drv.os_type = cpu_to_le32(IONIC_OS_TYPE_LINUX);
- strncpy(ident->drv.driver_ver_str, UTS_RELEASE,
- sizeof(ident->drv.driver_ver_str) - 1);
+ strscpy(ident->drv.driver_ver_str, UTS_RELEASE,
+ sizeof(ident->drv.driver_ver_str));
mutex_lock(&ionic->dev_cmd_lock);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 44466e8c5d77..ccc1b1d407e4 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -1243,25 +1243,84 @@ static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
+ bool too_many_frags = false;
+ skb_frag_t *frag;
+ int desc_bufs;
+ int chunk_len;
+ int frag_rem;
+ int tso_rem;
+ int seg_rem;
+ bool encap;
+ int hdrlen;
int ndescs;
int err;
/* Each desc is mss long max, so a descriptor for each gso_seg */
- if (skb_is_gso(skb))
+ if (skb_is_gso(skb)) {
ndescs = skb_shinfo(skb)->gso_segs;
- else
+ } else {
ndescs = 1;
+ if (skb_shinfo(skb)->nr_frags > q->max_sg_elems) {
+ too_many_frags = true;
+ goto linearize;
+ }
+ }
- /* If non-TSO, just need 1 desc and nr_frags sg elems */
- if (skb_shinfo(skb)->nr_frags <= q->max_sg_elems)
+ /* If non-TSO, or no frags to check, we're done */
+ if (!skb_is_gso(skb) || !skb_shinfo(skb)->nr_frags)
return ndescs;
- /* Too many frags, so linearize */
- err = skb_linearize(skb);
- if (err)
- return err;
+ /* We need to scan the skb to be sure that none of the MTU sized
+ * packets in the TSO will require more sgs per descriptor than we
+ * can support. We loop through the frags, add up the lengths for
+ * a packet, and count the number of sgs used per packet.
+ */
+ tso_rem = skb->len;
+ frag = skb_shinfo(skb)->frags;
+ encap = skb->encapsulation;
+
+ /* start with just hdr in first part of first descriptor */
+ if (encap)
+ hdrlen = skb_inner_tcp_all_headers(skb);
+ else
+ hdrlen = skb_tcp_all_headers(skb);
+ seg_rem = min_t(int, tso_rem, hdrlen + skb_shinfo(skb)->gso_size);
+ frag_rem = hdrlen;
+
+ while (tso_rem > 0) {
+ desc_bufs = 0;
+ while (seg_rem > 0) {
+ desc_bufs++;
+
+ /* We add the +1 because we can take buffers for one
+ * more than we have SGs: one for the initial desc data
+ * in addition to the SG segments that might follow.
+ */
+ if (desc_bufs > q->max_sg_elems + 1) {
+ too_many_frags = true;
+ goto linearize;
+ }
+
+ if (frag_rem == 0) {
+ frag_rem = skb_frag_size(frag);
+ frag++;
+ }
+ chunk_len = min(frag_rem, seg_rem);
+ frag_rem -= chunk_len;
+ tso_rem -= chunk_len;
+ seg_rem -= chunk_len;
+ }
+
+ seg_rem = min_t(int, tso_rem, skb_shinfo(skb)->gso_size);
+ }
- stats->linearize++;
+linearize:
+ if (too_many_frags) {
+ err = skb_linearize(skb);
+ if (err)
+ return err;
+ stats->linearize++;
+ }
return ndescs;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index cdcead614e9f..f67be4b8ad43 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -3204,8 +3204,8 @@ static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
: 128;
- strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
- strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
+ memcpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
+ memcpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
/* Dump memory header */
offset += qed_grc_dump_mem_hdr(p_hwfn,
@@ -6359,8 +6359,7 @@ static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
{
const char *source_str = &((const char *)buf)[*offset];
- strncpy(dest, source_str, size);
- dest[size - 1] = '\0';
+ strscpy(dest, source_str, size);
*offset += size;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
index be5cc8b79bd5..dad8e617c393 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
@@ -66,12 +66,12 @@ qed_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
return err;
}
- err = devlink_fmsg_binary_pair_put(fmsg, "dump_data",
- p_dbg_data_buf, dbg_data_buf_size);
+ devlink_fmsg_binary_pair_put(fmsg, "dump_data", p_dbg_data_buf,
+ dbg_data_buf_size);
vfree(p_dbg_data_buf);
- return err;
+ return 0;
}
static int
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index 95820cf1cd6c..b6b849a079ed 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -201,21 +201,6 @@ static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = {
/* Forced speed capabilities maps */
-struct qede_forced_speed_map {
- u32 speed;
- __ETHTOOL_DECLARE_LINK_MODE_MASK(caps);
-
- const u32 *cap_arr;
- u32 arr_size;
-};
-
-#define QEDE_FORCED_SPEED_MAP(value) \
-{ \
- .speed = SPEED_##value, \
- .cap_arr = qede_forced_speed_##value, \
- .arr_size = ARRAY_SIZE(qede_forced_speed_##value), \
-}
-
static const u32 qede_forced_speed_1000[] __initconst = {
ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
@@ -263,28 +248,21 @@ static const u32 qede_forced_speed_100000[] __initconst = {
ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
};
-static struct qede_forced_speed_map qede_forced_speed_maps[] __ro_after_init = {
- QEDE_FORCED_SPEED_MAP(1000),
- QEDE_FORCED_SPEED_MAP(10000),
- QEDE_FORCED_SPEED_MAP(20000),
- QEDE_FORCED_SPEED_MAP(25000),
- QEDE_FORCED_SPEED_MAP(40000),
- QEDE_FORCED_SPEED_MAP(50000),
- QEDE_FORCED_SPEED_MAP(100000),
+static struct ethtool_forced_speed_map
+qede_forced_speed_maps[] __ro_after_init = {
+ ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed, 1000),
+ ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed, 10000),
+ ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed, 20000),
+ ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed, 25000),
+ ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed, 40000),
+ ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed, 50000),
+ ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed, 100000),
};
void __init qede_forced_speed_maps_init(void)
{
- struct qede_forced_speed_map *map;
- u32 i;
-
- for (i = 0; i < ARRAY_SIZE(qede_forced_speed_maps); i++) {
- map = qede_forced_speed_maps + i;
-
- linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps);
- map->cap_arr = NULL;
- map->arr_size = 0;
- }
+ ethtool_forced_speed_maps_init(qede_forced_speed_maps,
+ ARRAY_SIZE(qede_forced_speed_maps));
}
/* Ethtool callbacks */
@@ -564,8 +542,8 @@ static int qede_set_link_ksettings(struct net_device *dev,
const struct ethtool_link_ksettings *cmd)
{
const struct ethtool_link_settings *base = &cmd->base;
+ const struct ethtool_forced_speed_map *map;
struct qede_dev *edev = netdev_priv(dev);
- const struct qede_forced_speed_map *map;
struct qed_link_output current_link;
struct qed_link_params params;
u32 i;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 19bb16daf4e7..3270df72541b 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -718,7 +718,7 @@ err_undo_netdev:
return ret;
}
-static int emac_remove(struct platform_device *pdev)
+static void emac_remove(struct platform_device *pdev)
{
struct net_device *netdev = dev_get_drvdata(&pdev->dev);
struct emac_adapter *adpt = netdev_priv(netdev);
@@ -742,8 +742,6 @@ static int emac_remove(struct platform_device *pdev)
iounmap(adpt->phy.base);
free_netdev(netdev);
-
- return 0;
}
static void emac_shutdown(struct platform_device *pdev)
@@ -762,7 +760,7 @@ static void emac_shutdown(struct platform_device *pdev)
static struct platform_driver emac_platform_driver = {
.probe = emac_probe,
- .remove = emac_remove,
+ .remove_new = emac_remove,
.driver = {
.name = "qcom-emac",
.of_match_table = emac_dt_match,
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 361b90007148..a987defb575c 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -4596,7 +4596,11 @@ static void r8169_phylink_handler(struct net_device *ndev)
if (netif_carrier_ok(ndev)) {
rtl_link_chg_patch(tp);
pm_request_resume(d);
+ netif_wake_queue(tp->dev);
} else {
+ /* In few cases rx is broken after link-down otherwise */
+ if (rtl_is_8125(tp))
+ rtl_reset_work(tp);
pm_runtime_idle(d);
}
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 3ceb57408ed0..8ef5b0241e64 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
#
-# Renesas device configuration
+# Renesas network device configuration
#
config NET_VENDOR_RENESAS
@@ -25,9 +25,6 @@ config SH_ETH
select PHYLIB
help
Renesas SuperH Ethernet device driver.
- This driver supporting CPUs are:
- - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
- R8A7740, R8A774x, R8A777x and R8A779x.
config RAVB
tristate "Renesas Ethernet AVB support"
@@ -39,8 +36,6 @@ config RAVB
select PHYLIB
help
Renesas Ethernet AVB device driver.
- This driver supports the following SoCs:
- - R8A779x.
config RENESAS_ETHER_SWITCH
tristate "Renesas Ethernet Switch support"
@@ -51,7 +46,5 @@ config RENESAS_ETHER_SWITCH
select PHYLINK
help
Renesas Ethernet Switch device driver.
- This driver supports the following SoCs:
- - R8A779Fx.
endif # NET_VENDOR_RENESAS
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile
index 592005893464..e8fd85b5fe8f 100644
--- a/drivers/net/ethernet/renesas/Makefile
+++ b/drivers/net/ethernet/renesas/Makefile
@@ -1,14 +1,12 @@
# SPDX-License-Identifier: GPL-2.0
#
-# Makefile for the Renesas device drivers.
+# Makefile for the Renesas network device drivers
#
obj-$(CONFIG_SH_ETH) += sh_eth.o
ravb-objs := ravb_main.o ravb_ptp.o
-
obj-$(CONFIG_RAVB) += ravb.o
rswitch_drv-objs := rswitch.o rcar_gen4_ptp.o
-
obj-$(CONFIG_RENESAS_ETHER_SWITCH) += rswitch_drv.o
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 0ef0b88b7145..c70cff80cc99 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2880,7 +2880,7 @@ out_release:
return error;
}
-static int ravb_remove(struct platform_device *pdev)
+static void ravb_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct ravb_private *priv = netdev_priv(ndev);
@@ -2907,8 +2907,6 @@ static int ravb_remove(struct platform_device *pdev)
reset_control_assert(priv->rstc);
free_netdev(ndev);
platform_set_drvdata(pdev, NULL);
-
- return 0;
}
static int ravb_wol_setup(struct net_device *ndev)
@@ -3046,7 +3044,7 @@ static const struct dev_pm_ops ravb_dev_pm_ops = {
static struct platform_driver ravb_driver = {
.probe = ravb_probe,
- .remove = ravb_remove,
+ .remove_new = ravb_remove,
.driver = {
.name = "ravb",
.pm = &ravb_dev_pm_ops,
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 0fc0b6bea753..43a7795d6591 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -17,6 +17,7 @@
#include <linux/of_net.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
@@ -1315,6 +1316,7 @@ static int rswitch_phy_device_init(struct rswitch_device *rdev)
if (!phydev)
goto out;
__set_bit(rdev->etha->phy_interface, phydev->host_interfaces);
+ phydev->mac_managed_pm = true;
phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0,
rdev->etha->phy_interface);
@@ -1405,7 +1407,8 @@ static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev)
static int rswitch_ether_port_init_all(struct rswitch_private *priv)
{
- int i, err;
+ unsigned int i;
+ int err;
rswitch_for_each_enabled_port(priv, i) {
err = rswitch_ether_port_init_one(priv->rdev[i]);
@@ -1786,7 +1789,8 @@ static void rswitch_device_free(struct rswitch_private *priv, int index)
static int rswitch_init(struct rswitch_private *priv)
{
- int i, err;
+ unsigned int i;
+ int err;
for (i = 0; i < RSWITCH_NUM_PORTS; i++)
rswitch_etha_init(priv, i);
@@ -1816,7 +1820,7 @@ static int rswitch_init(struct rswitch_private *priv)
for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
err = rswitch_device_alloc(priv, i);
if (err < 0) {
- for (i--; i >= 0; i--)
+ for (; i-- > 0; )
rswitch_device_free(priv, i);
goto err_device_alloc;
}
@@ -1959,7 +1963,7 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
static void rswitch_deinit(struct rswitch_private *priv)
{
- int i;
+ unsigned int i;
rswitch_gwca_hw_deinit(priv);
rcar_gen4_ptp_unregister(priv->ptp_priv);
@@ -1981,7 +1985,7 @@ static void rswitch_deinit(struct rswitch_private *priv)
rswitch_clock_disable(priv);
}
-static int renesas_eth_sw_remove(struct platform_device *pdev)
+static void renesas_eth_sw_remove(struct platform_device *pdev)
{
struct rswitch_private *priv = platform_get_drvdata(pdev);
@@ -1991,15 +1995,54 @@ static int renesas_eth_sw_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
platform_set_drvdata(pdev, NULL);
+}
+
+static int renesas_eth_sw_suspend(struct device *dev)
+{
+ struct rswitch_private *priv = dev_get_drvdata(dev);
+ struct net_device *ndev;
+ unsigned int i;
+
+ rswitch_for_each_enabled_port(priv, i) {
+ ndev = priv->rdev[i]->ndev;
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ rswitch_stop(ndev);
+ }
+ if (priv->rdev[i]->serdes->init_count)
+ phy_exit(priv->rdev[i]->serdes);
+ }
+
+ return 0;
+}
+
+static int renesas_eth_sw_resume(struct device *dev)
+{
+ struct rswitch_private *priv = dev_get_drvdata(dev);
+ struct net_device *ndev;
+ unsigned int i;
+
+ rswitch_for_each_enabled_port(priv, i) {
+ phy_init(priv->rdev[i]->serdes);
+ ndev = priv->rdev[i]->ndev;
+ if (netif_running(ndev)) {
+ rswitch_open(ndev);
+ netif_device_attach(ndev);
+ }
+ }
return 0;
}
+static DEFINE_SIMPLE_DEV_PM_OPS(renesas_eth_sw_pm_ops, renesas_eth_sw_suspend,
+ renesas_eth_sw_resume);
+
static struct platform_driver renesas_eth_sw_driver_platform = {
.probe = renesas_eth_sw_probe,
- .remove = renesas_eth_sw_remove,
+ .remove_new = renesas_eth_sw_remove,
.driver = {
.name = "renesas_eth_sw",
+ .pm = pm_sleep_ptr(&renesas_eth_sw_pm_ops),
.of_match_table = renesas_eth_sw_of_table,
}
};
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index 04f49a7a5843..27c9d3872c0e 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -20,7 +20,7 @@
else
#define rswitch_for_each_enabled_port_continue_reverse(priv, i) \
- for (i--; i >= 0; i--) \
+ for (; i-- > 0; ) \
if (priv->rdev[i]->disabled) \
continue; \
else
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 274ea16c0a1f..475e1e8c1d35 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3431,7 +3431,7 @@ out_release:
return ret;
}
-static int sh_eth_drv_remove(struct platform_device *pdev)
+static void sh_eth_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -3441,8 +3441,6 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
sh_mdio_release(mdp);
pm_runtime_disable(&pdev->dev);
free_netdev(ndev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -3562,7 +3560,7 @@ MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
static struct platform_driver sh_eth_driver = {
.probe = sh_eth_drv_probe,
- .remove = sh_eth_drv_remove,
+ .remove_new = sh_eth_drv_remove,
.id_table = sh_eth_id_table,
.driver = {
.name = CARDNAME,
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index fb59ff94509a..e6e130dbe1de 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -169,13 +169,11 @@ err_out:
* Description: this function calls the main to free the net resources
* and calls the platforms hook and release the resources (e.g. mem).
*/
-static int sxgbe_platform_remove(struct platform_device *pdev)
+static void sxgbe_platform_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
sxgbe_drv_remove(ndev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -226,7 +224,7 @@ MODULE_DEVICE_TABLE(of, sxgbe_dt_ids);
static struct platform_driver sxgbe_platform_driver = {
.probe = sxgbe_platform_probe,
- .remove = sxgbe_platform_remove,
+ .remove_new = sxgbe_platform_remove,
.driver = {
.name = SXGBE_RESOURCE_NAME,
.pm = &sxgbe_platform_pm_ops,
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 96065dfc747b..76356dadf233 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -819,7 +819,7 @@ err_out:
return err;
}
-static int sgiseeq_remove(struct platform_device *pdev)
+static void sgiseeq_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct sgiseeq_private *sp = netdev_priv(dev);
@@ -828,13 +828,11 @@ static int sgiseeq_remove(struct platform_device *pdev)
dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
sp->srings_dma, DMA_BIDIRECTIONAL);
free_netdev(dev);
-
- return 0;
}
static struct platform_driver sgiseeq_driver = {
.probe = sgiseeq_probe,
- .remove = sgiseeq_remove,
+ .remove_new = sgiseeq_remove,
.driver = {
.name = "sgiseeq",
}
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index 8d2d7ea2ebef..c9e17a8208a9 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -1260,7 +1260,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
spent = efx_process_channel(channel, budget);
- xdp_do_flush_map();
+ xdp_do_flush();
if (spent < budget) {
if (efx_channel_has_rx_queue(channel) &&
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
index c3e2b4a21d10..10709d828a63 100644
--- a/drivers/net/ethernet/sfc/mae.c
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -1291,10 +1291,11 @@ int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act)
size_t outlen;
int rc;
- MCDI_POPULATE_DWORD_4(inbuf, MAE_ACTION_SET_ALLOC_IN_FLAGS,
+ MCDI_POPULATE_DWORD_5(inbuf, MAE_ACTION_SET_ALLOC_IN_FLAGS,
MAE_ACTION_SET_ALLOC_IN_VLAN_PUSH, act->vlan_push,
MAE_ACTION_SET_ALLOC_IN_VLAN_POP, act->vlan_pop,
MAE_ACTION_SET_ALLOC_IN_DECAP, act->decap,
+ MAE_ACTION_SET_ALLOC_IN_DO_NAT, act->do_nat,
MAE_ACTION_SET_ALLOC_IN_DO_DECR_IP_TTL,
act->do_ttl_dec);
@@ -1705,8 +1706,10 @@ static int efx_mae_insert_lhs_outer_rule(struct efx_nic *efx,
/* action */
act = &rule->lhs_act;
- MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_ENCAP_TYPE,
- MAE_MCDI_ENCAP_TYPE_NONE);
+ rc = efx_mae_encap_type_to_mae_type(act->tun_type);
+ if (rc < 0)
+ return rc;
+ MCDI_SET_DWORD(inbuf, MAE_OUTER_RULE_INSERT_IN_ENCAP_TYPE, rc);
/* We always inhibit CT lookup on TCP_INTERESTING_FLAGS, since the
* SW path needs to process the packet to update the conntrack tables
* on connection establishment (SYN) or termination (FIN, RST).
@@ -1734,9 +1737,60 @@ static int efx_mae_insert_lhs_outer_rule(struct efx_nic *efx,
return 0;
}
+static int efx_mae_populate_match_criteria(MCDI_DECLARE_STRUCT_PTR(match_crit),
+ const struct efx_tc_match *match);
+
+static int efx_mae_insert_lhs_action_rule(struct efx_nic *efx,
+ struct efx_tc_lhs_rule *rule,
+ u32 prio)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_ACTION_RULE_INSERT_IN_LEN(MAE_FIELD_MASK_VALUE_PAIRS_V2_LEN));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_RULE_INSERT_OUT_LEN);
+ struct efx_tc_lhs_action *act = &rule->lhs_act;
+ MCDI_DECLARE_STRUCT_PTR(match_crit);
+ MCDI_DECLARE_STRUCT_PTR(response);
+ size_t outlen;
+ int rc;
+
+ match_crit = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_MATCH_CRITERIA);
+ response = _MCDI_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_RESPONSE);
+ MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_ASL_ID,
+ MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL);
+ MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_AS_ID,
+ MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL);
+ EFX_POPULATE_DWORD_5(*_MCDI_STRUCT_DWORD(response, MAE_ACTION_RULE_RESPONSE_LOOKUP_CONTROL),
+ MAE_ACTION_RULE_RESPONSE_DO_CT, !!act->zone,
+ MAE_ACTION_RULE_RESPONSE_DO_RECIRC,
+ act->rid && !act->zone,
+ MAE_ACTION_RULE_RESPONSE_CT_VNI_MODE,
+ MAE_CT_VNI_MODE_ZERO,
+ MAE_ACTION_RULE_RESPONSE_RECIRC_ID,
+ act->rid ? act->rid->fw_id : 0,
+ MAE_ACTION_RULE_RESPONSE_CT_DOMAIN,
+ act->zone ? act->zone->zone : 0);
+ MCDI_STRUCT_SET_DWORD(response, MAE_ACTION_RULE_RESPONSE_COUNTER_ID,
+ act->count ? act->count->cnt->fw_id :
+ MC_CMD_MAE_COUNTER_ALLOC_OUT_COUNTER_ID_NULL);
+ MCDI_SET_DWORD(inbuf, MAE_ACTION_RULE_INSERT_IN_PRIO, prio);
+ rc = efx_mae_populate_match_criteria(match_crit, &rule->match);
+ if (rc)
+ return rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_ACTION_RULE_INSERT, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ rule->fw_id = MCDI_DWORD(outbuf, MAE_ACTION_RULE_INSERT_OUT_AR_ID);
+ return 0;
+}
+
int efx_mae_insert_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule,
u32 prio)
{
+ if (rule->is_ar)
+ return efx_mae_insert_lhs_action_rule(efx, rule, prio);
return efx_mae_insert_lhs_outer_rule(efx, rule, prio);
}
@@ -1770,6 +1824,8 @@ static int efx_mae_remove_lhs_outer_rule(struct efx_nic *efx,
int efx_mae_remove_lhs_rule(struct efx_nic *efx, struct efx_tc_lhs_rule *rule)
{
+ if (rule->is_ar)
+ return efx_mae_delete_rule(efx, rule->fw_id);
return efx_mae_remove_lhs_outer_rule(efx, rule);
}
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index d23da9627338..76578502226e 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -2205,10 +2205,9 @@ int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type,
goto out_free;
}
- strncpy(desc,
+ strscpy(desc,
MCDI_PTR(outbuf, NVRAM_METADATA_OUT_DESCRIPTION),
MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(outlen));
- desc[MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(outlen)] = '\0';
} else {
desc[0] = '\0';
}
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index f54200f03e15..b04fdbb8aece 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -108,11 +108,17 @@
#define PTP_MIN_LENGTH 63
#define PTP_ADDR_IPV4 0xe0000181 /* 224.0.1.129 */
-#define PTP_ADDR_IPV6 {0xff, 0x0e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
- 0, 0x01, 0x81} /* ff0e::181 */
+
+/* ff0e::181 */
+static const struct in6_addr ptp_addr_ipv6 = { { {
+ 0xff, 0x0e, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01, 0x81 } } };
+
+/* 01-1B-19-00-00-00 */
+static const u8 ptp_addr_ether[ETH_ALEN] __aligned(2) = {
+ 0x01, 0x1b, 0x19, 0x00, 0x00, 0x00 };
+
#define PTP_EVENT_PORT 319
#define PTP_GENERAL_PORT 320
-#define PTP_ADDR_ETHER {0x01, 0x1b, 0x19, 0, 0, 0} /* 01-1B-19-00-00-00 */
/* Annoyingly the format of the version numbers are different between
* versions 1 and 2 so it isn't possible to simply look for 1 or 2.
@@ -1296,7 +1302,7 @@ static int efx_ptp_insert_ipv4_filter(struct efx_nic *efx,
static int efx_ptp_insert_ipv6_filter(struct efx_nic *efx,
struct list_head *filter_list,
- struct in6_addr *addr, u16 port,
+ const struct in6_addr *addr, u16 port,
unsigned long expiry)
{
struct efx_filter_spec spec;
@@ -1309,11 +1315,10 @@ static int efx_ptp_insert_ipv6_filter(struct efx_nic *efx,
static int efx_ptp_insert_eth_multicast_filter(struct efx_nic *efx)
{
struct efx_ptp_data *ptp = efx->ptp_data;
- const u8 addr[ETH_ALEN] = PTP_ADDR_ETHER;
struct efx_filter_spec spec;
efx_ptp_init_filter(efx, &spec);
- efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, addr);
+ efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, ptp_addr_ether);
spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
spec.ether_type = htons(ETH_P_1588);
return efx_ptp_insert_filter(efx, &ptp->rxfilters_mcast, &spec, 0);
@@ -1346,15 +1351,13 @@ static int efx_ptp_insert_multicast_filters(struct efx_nic *efx)
* PTP over IPv6 and Ethernet
*/
if (efx_ptp_use_mac_tx_timestamps(efx)) {
- struct in6_addr ipv6_addr = {{PTP_ADDR_IPV6}};
-
rc = efx_ptp_insert_ipv6_filter(efx, &ptp->rxfilters_mcast,
- &ipv6_addr, PTP_EVENT_PORT, 0);
+ &ptp_addr_ipv6, PTP_EVENT_PORT, 0);
if (rc < 0)
goto fail;
rc = efx_ptp_insert_ipv6_filter(efx, &ptp->rxfilters_mcast,
- &ipv6_addr, PTP_GENERAL_PORT, 0);
+ &ptp_addr_ipv6, PTP_GENERAL_PORT, 0);
if (rc < 0)
goto fail;
@@ -1379,9 +1382,7 @@ static bool efx_ptp_valid_unicast_event_pkt(struct sk_buff *skb)
ip_hdr(skb)->protocol == IPPROTO_UDP &&
udp_hdr(skb)->source == htons(PTP_EVENT_PORT);
} else if (skb->protocol == htons(ETH_P_IPV6)) {
- struct in6_addr mcast_addr = {{PTP_ADDR_IPV6}};
-
- return !ipv6_addr_equal(&ipv6_hdr(skb)->daddr, &mcast_addr) &&
+ return !ipv6_addr_equal(&ipv6_hdr(skb)->daddr, &ptp_addr_ipv6) &&
ipv6_hdr(skb)->nexthdr == IPPROTO_UDP &&
udp_hdr(skb)->source == htons(PTP_EVENT_PORT);
}
diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c
index 1776f7f8a7a9..a7346e965bfe 100644
--- a/drivers/net/ethernet/sfc/siena/efx_channels.c
+++ b/drivers/net/ethernet/sfc/siena/efx_channels.c
@@ -1285,7 +1285,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
spent = efx_process_channel(channel, budget);
- xdp_do_flush_map();
+ xdp_do_flush();
if (spent < budget) {
if (efx_channel_has_rx_queue(channel) &&
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 30ebef88248d..82e8891a619a 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -642,6 +642,15 @@ static int efx_tc_flower_record_encap_match(struct efx_nic *efx,
return -EEXIST;
}
break;
+ case EFX_TC_EM_PSEUDO_OR:
+ /* old EM corresponds to an OR that has to be unique
+ * (it must not overlap with any other OR, whether
+ * direct-EM or pseudo).
+ */
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "%s encap match conflicts with existing pseudo(OR) entry",
+ em_type ? "Pseudo" : "Direct");
+ return -EEXIST;
default: /* Unrecognised pseudo-type. Just say no */
NL_SET_ERR_MSG_FMT_MOD(extack,
"%s encap match conflicts with existing pseudo(%d) entry",
@@ -872,6 +881,93 @@ static bool efx_tc_rule_is_lhs_rule(struct flow_rule *fr,
return false;
}
+/* A foreign LHS rule has matches on enc_ keys at the TC layer (including an
+ * implied match on enc_ip_proto UDP). Translate these into non-enc_ keys,
+ * so that we can use the same MAE machinery as local LHS rules (and so that
+ * the lhs_rules entries have uniform semantics). It may seem odd to do it
+ * this way round, given that the corresponding fields in the MAE MCDIs are
+ * all ENC_, but (a) we don't have enc_L2 or enc_ip_proto in struct
+ * efx_tc_match_fields and (b) semantically an LHS rule doesn't have inner
+ * fields so it's just matching on *the* header rather than the outer header.
+ * Make sure that the non-enc_ keys were not already being matched on, as that
+ * would imply a rule that needed a triple lookup. (Hardware can do that,
+ * with OR-AR-CT-AR, but it halves packet rate so we avoid it where possible;
+ * see efx_tc_flower_flhs_needs_ar().)
+ */
+static int efx_tc_flower_translate_flhs_match(struct efx_tc_match *match)
+{
+ int rc = 0;
+
+#define COPY_MASK_AND_VALUE(_key, _ekey) ({ \
+ if (match->mask._key) { \
+ rc = -EOPNOTSUPP; \
+ } else { \
+ match->mask._key = match->mask._ekey; \
+ match->mask._ekey = 0; \
+ match->value._key = match->value._ekey; \
+ match->value._ekey = 0; \
+ } \
+ rc; \
+})
+#define COPY_FROM_ENC(_key) COPY_MASK_AND_VALUE(_key, enc_##_key)
+ if (match->mask.ip_proto)
+ return -EOPNOTSUPP;
+ match->mask.ip_proto = ~0;
+ match->value.ip_proto = IPPROTO_UDP;
+ if (COPY_FROM_ENC(src_ip) || COPY_FROM_ENC(dst_ip))
+ return rc;
+#ifdef CONFIG_IPV6
+ if (!ipv6_addr_any(&match->mask.src_ip6))
+ return -EOPNOTSUPP;
+ match->mask.src_ip6 = match->mask.enc_src_ip6;
+ memset(&match->mask.enc_src_ip6, 0, sizeof(struct in6_addr));
+ if (!ipv6_addr_any(&match->mask.dst_ip6))
+ return -EOPNOTSUPP;
+ match->mask.dst_ip6 = match->mask.enc_dst_ip6;
+ memset(&match->mask.enc_dst_ip6, 0, sizeof(struct in6_addr));
+#endif
+ if (COPY_FROM_ENC(ip_tos) || COPY_FROM_ENC(ip_ttl))
+ return rc;
+ /* should really copy enc_ip_frag but we don't have that in
+ * parse_match yet
+ */
+ if (COPY_MASK_AND_VALUE(l4_sport, enc_sport) ||
+ COPY_MASK_AND_VALUE(l4_dport, enc_dport))
+ return rc;
+ return 0;
+#undef COPY_FROM_ENC
+#undef COPY_MASK_AND_VALUE
+}
+
+/* If a foreign LHS rule wants to match on keys that are only available after
+ * encap header identification and parsing, then it can't be done in the Outer
+ * Rule lookup, because that lookup determines the encap type used to parse
+ * beyond the outer headers. Thus, such rules must use the OR-AR-CT-AR lookup
+ * sequence, with an EM (struct efx_tc_encap_match) in the OR step.
+ * Return true iff the passed match requires this.
+ */
+static bool efx_tc_flower_flhs_needs_ar(struct efx_tc_match *match)
+{
+ /* matches on inner-header keys can't be done in OR */
+ return match->mask.eth_proto ||
+ match->mask.vlan_tci[0] || match->mask.vlan_tci[1] ||
+ match->mask.vlan_proto[0] || match->mask.vlan_proto[1] ||
+ memchr_inv(match->mask.eth_saddr, 0, ETH_ALEN) ||
+ memchr_inv(match->mask.eth_daddr, 0, ETH_ALEN) ||
+ match->mask.ip_proto ||
+ match->mask.ip_tos || match->mask.ip_ttl ||
+ match->mask.src_ip || match->mask.dst_ip ||
+#ifdef CONFIG_IPV6
+ !ipv6_addr_any(&match->mask.src_ip6) ||
+ !ipv6_addr_any(&match->mask.dst_ip6) ||
+#endif
+ match->mask.ip_frag || match->mask.ip_firstfrag ||
+ match->mask.l4_sport || match->mask.l4_dport ||
+ match->mask.tcp_flags ||
+ /* nor can VNI */
+ match->mask.enc_keyid;
+}
+
static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx,
struct flow_cls_offload *tc,
struct flow_rule *fr,
@@ -882,9 +978,12 @@ static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx,
struct netlink_ext_ack *extack = tc->common.extack;
struct efx_tc_lhs_action *act = &rule->lhs_act;
const struct flow_action_entry *fa;
+ enum efx_tc_counter_type ctype;
bool pipe = true;
int i;
+ ctype = rule->is_ar ? EFX_TC_COUNTER_TYPE_AR : EFX_TC_COUNTER_TYPE_OR;
+
flow_action_for_each(i, fa, &fr->action) {
struct efx_tc_ct_zone *ct_zone;
struct efx_tc_recirc_id *rid;
@@ -917,7 +1016,7 @@ static int efx_tc_flower_handle_lhs_actions(struct efx_nic *efx,
return -EOPNOTSUPP;
}
cnt = efx_tc_flower_get_counter_index(efx, tc->cookie,
- EFX_TC_COUNTER_TYPE_OR);
+ ctype);
if (IS_ERR(cnt)) {
NL_SET_ERR_MSG_MOD(extack, "Failed to obtain a counter");
return PTR_ERR(cnt);
@@ -1354,6 +1453,222 @@ static int efx_tc_incomplete_mangle(struct efx_tc_mangler_state *mung,
return 0;
}
+static int efx_tc_flower_replace_foreign_lhs_ar(struct efx_nic *efx,
+ struct flow_cls_offload *tc,
+ struct flow_rule *fr,
+ struct efx_tc_match *match,
+ struct net_device *net_dev)
+{
+ struct netlink_ext_ack *extack = tc->common.extack;
+ struct efx_tc_lhs_rule *rule, *old;
+ enum efx_encap_type type;
+ int rc;
+
+ type = efx_tc_indr_netdev_type(net_dev);
+ if (type == EFX_ENCAP_TYPE_NONE) {
+ NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device");
+ return -EOPNOTSUPP;
+ }
+
+ rc = efx_mae_check_encap_type_supported(efx, type);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Firmware reports no support for %s encap match",
+ efx_tc_encap_type_name(type));
+ return rc;
+ }
+ /* This is an Action Rule, so it needs a separate Encap Match in the
+ * Outer Rule table. Insert that now.
+ */
+ rc = efx_tc_flower_record_encap_match(efx, match, type,
+ EFX_TC_EM_DIRECT, 0, 0, extack);
+ if (rc)
+ return rc;
+
+ match->mask.recirc_id = 0xff;
+ if (match->mask.ct_state_trk && match->value.ct_state_trk) {
+ NL_SET_ERR_MSG_MOD(extack, "LHS rule can never match +trk");
+ rc = -EOPNOTSUPP;
+ goto release_encap_match;
+ }
+ /* LHS rules are always -trk, so we don't need to match on that */
+ match->mask.ct_state_trk = 0;
+ match->value.ct_state_trk = 0;
+ /* We must inhibit match on TCP SYN/FIN/RST, so that SW can see
+ * the packet and update the conntrack table.
+ * Outer Rules will do that with CT_TCP_FLAGS_INHIBIT, but Action
+ * Rules don't have that; instead they support matching on
+ * TCP_SYN_FIN_RST (aka TCP_INTERESTING_FLAGS), so use that.
+ * This is only strictly needed if there will be a DO_CT action,
+ * which we don't know yet, but typically there will be and it's
+ * simpler not to bother checking here.
+ */
+ match->mask.tcp_syn_fin_rst = true;
+
+ rc = efx_mae_match_check_caps(efx, &match->mask, extack);
+ if (rc)
+ goto release_encap_match;
+
+ rule = kzalloc(sizeof(*rule), GFP_USER);
+ if (!rule) {
+ rc = -ENOMEM;
+ goto release_encap_match;
+ }
+ rule->cookie = tc->cookie;
+ rule->is_ar = true;
+ old = rhashtable_lookup_get_insert_fast(&efx->tc->lhs_rule_ht,
+ &rule->linkage,
+ efx_tc_lhs_rule_ht_params);
+ if (old) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Already offloaded rule (cookie %lx)\n", tc->cookie);
+ rc = -EEXIST;
+ NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded");
+ goto release;
+ }
+
+ /* Parse actions */
+ rc = efx_tc_flower_handle_lhs_actions(efx, tc, fr, net_dev, rule);
+ if (rc)
+ goto release;
+
+ rule->match = *match;
+ rule->lhs_act.tun_type = type;
+
+ rc = efx_mae_insert_lhs_rule(efx, rule, EFX_TC_PRIO_TC);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to insert rule in hw");
+ goto release;
+ }
+ netif_dbg(efx, drv, efx->net_dev,
+ "Successfully parsed lhs rule (cookie %lx)\n",
+ tc->cookie);
+ return 0;
+
+release:
+ efx_tc_flower_release_lhs_actions(efx, &rule->lhs_act);
+ if (!old)
+ rhashtable_remove_fast(&efx->tc->lhs_rule_ht, &rule->linkage,
+ efx_tc_lhs_rule_ht_params);
+ kfree(rule);
+release_encap_match:
+ if (match->encap)
+ efx_tc_flower_release_encap_match(efx, match->encap);
+ return rc;
+}
+
+static int efx_tc_flower_replace_foreign_lhs(struct efx_nic *efx,
+ struct flow_cls_offload *tc,
+ struct flow_rule *fr,
+ struct efx_tc_match *match,
+ struct net_device *net_dev)
+{
+ struct netlink_ext_ack *extack = tc->common.extack;
+ struct efx_tc_lhs_rule *rule, *old;
+ enum efx_encap_type type;
+ int rc;
+
+ if (tc->common.chain_index) {
+ NL_SET_ERR_MSG_MOD(extack, "LHS rule only allowed in chain 0");
+ return -EOPNOTSUPP;
+ }
+
+ if (!efx_tc_match_is_encap(&match->mask)) {
+ /* This is not a tunnel decap rule, ignore it */
+ netif_dbg(efx, drv, efx->net_dev, "Ignoring foreign LHS filter without encap match\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (efx_tc_flower_flhs_needs_ar(match))
+ return efx_tc_flower_replace_foreign_lhs_ar(efx, tc, fr, match,
+ net_dev);
+
+ type = efx_tc_indr_netdev_type(net_dev);
+ if (type == EFX_ENCAP_TYPE_NONE) {
+ NL_SET_ERR_MSG_MOD(extack, "Egress encap match on unsupported tunnel device\n");
+ return -EOPNOTSUPP;
+ }
+
+ rc = efx_mae_check_encap_type_supported(efx, type);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Firmware reports no support for %s encap match",
+ efx_tc_encap_type_name(type));
+ return rc;
+ }
+ /* Reserve the outer tuple with a pseudo Encap Match */
+ rc = efx_tc_flower_record_encap_match(efx, match, type,
+ EFX_TC_EM_PSEUDO_OR, 0, 0,
+ extack);
+ if (rc)
+ return rc;
+
+ if (match->mask.ct_state_trk && match->value.ct_state_trk) {
+ NL_SET_ERR_MSG_MOD(extack, "LHS rule can never match +trk");
+ rc = -EOPNOTSUPP;
+ goto release_encap_match;
+ }
+ /* LHS rules are always -trk, so we don't need to match on that */
+ match->mask.ct_state_trk = 0;
+ match->value.ct_state_trk = 0;
+
+ rc = efx_tc_flower_translate_flhs_match(match);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "LHS rule cannot match on inner fields");
+ goto release_encap_match;
+ }
+
+ rc = efx_mae_match_check_caps_lhs(efx, &match->mask, extack);
+ if (rc)
+ goto release_encap_match;
+
+ rule = kzalloc(sizeof(*rule), GFP_USER);
+ if (!rule) {
+ rc = -ENOMEM;
+ goto release_encap_match;
+ }
+ rule->cookie = tc->cookie;
+ old = rhashtable_lookup_get_insert_fast(&efx->tc->lhs_rule_ht,
+ &rule->linkage,
+ efx_tc_lhs_rule_ht_params);
+ if (old) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "Already offloaded rule (cookie %lx)\n", tc->cookie);
+ rc = -EEXIST;
+ NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded");
+ goto release;
+ }
+
+ /* Parse actions */
+ rc = efx_tc_flower_handle_lhs_actions(efx, tc, fr, net_dev, rule);
+ if (rc)
+ goto release;
+
+ rule->match = *match;
+ rule->lhs_act.tun_type = type;
+
+ rc = efx_mae_insert_lhs_rule(efx, rule, EFX_TC_PRIO_TC);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to insert rule in hw");
+ goto release;
+ }
+ netif_dbg(efx, drv, efx->net_dev,
+ "Successfully parsed lhs rule (cookie %lx)\n",
+ tc->cookie);
+ return 0;
+
+release:
+ efx_tc_flower_release_lhs_actions(efx, &rule->lhs_act);
+ if (!old)
+ rhashtable_remove_fast(&efx->tc->lhs_rule_ht, &rule->linkage,
+ efx_tc_lhs_rule_ht_params);
+ kfree(rule);
+release_encap_match:
+ if (match->encap)
+ efx_tc_flower_release_encap_match(efx, match->encap);
+ return rc;
+}
+
static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
struct net_device *net_dev,
struct flow_cls_offload *tc)
@@ -1371,7 +1686,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
/* Parse match */
memset(&match, 0, sizeof(match));
- rc = efx_tc_flower_parse_match(efx, fr, &match, NULL);
+ rc = efx_tc_flower_parse_match(efx, fr, &match, extack);
if (rc)
return rc;
/* The rule as given to us doesn't specify a source netdevice.
@@ -1387,6 +1702,10 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
match.value.ingress_port = rc;
match.mask.ingress_port = ~0;
+ if (efx_tc_rule_is_lhs_rule(fr, &match))
+ return efx_tc_flower_replace_foreign_lhs(efx, tc, fr, &match,
+ net_dev);
+
if (tc->common.chain_index) {
struct efx_tc_recirc_id *rid;
@@ -1416,6 +1735,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
if (match.mask.ct_state_est && !match.value.ct_state_est) {
if (match.value.tcp_syn_fin_rst) {
/* Can't offload this combination */
+ NL_SET_ERR_MSG_MOD(extack, "TCP flags and -est conflict for offload");
rc = -EOPNOTSUPP;
goto release;
}
@@ -1442,7 +1762,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
goto release;
}
- rc = efx_mae_match_check_caps(efx, &match.mask, NULL);
+ rc = efx_mae_match_check_caps(efx, &match.mask, extack);
if (rc)
goto release;
@@ -1470,7 +1790,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
extack);
if (rc)
goto release;
- } else {
+ } else if (!tc->common.chain_index) {
/* This is not a tunnel decap rule, ignore it */
netif_dbg(efx, drv, efx->net_dev,
"Ignoring foreign filter without encap match\n");
@@ -1530,6 +1850,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
goto release;
}
if (!efx_tc_flower_action_order_ok(act, EFX_TC_AO_COUNT)) {
+ NL_SET_ERR_MSG_MOD(extack, "Count action violates action order (can't happen)");
rc = -EOPNOTSUPP;
goto release;
}
@@ -2136,6 +2457,14 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
NL_SET_ERR_MSG_MOD(extack, "Cannot offload tunnel decap action without tunnel device");
rc = -EOPNOTSUPP;
goto release;
+ case FLOW_ACTION_CT:
+ if (fa->ct.action != TCA_CT_ACT_NAT) {
+ rc = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Can only offload CT 'nat' action in RHS rules, not %d", fa->ct.action);
+ goto release;
+ }
+ act->do_nat = 1;
+ break;
default:
NL_SET_ERR_MSG_FMT_MOD(extack, "Unhandled action %u",
fa->id);
diff --git a/drivers/net/ethernet/sfc/tc.h b/drivers/net/ethernet/sfc/tc.h
index 4dd2c378fd9f..7b5190078bee 100644
--- a/drivers/net/ethernet/sfc/tc.h
+++ b/drivers/net/ethernet/sfc/tc.h
@@ -48,6 +48,7 @@ struct efx_tc_encap_action; /* see tc_encap_actions.h */
* @vlan_push: the number of vlan headers to push
* @vlan_pop: the number of vlan headers to pop
* @decap: used to indicate a tunnel header decapsulation should take place
+ * @do_nat: perform NAT/NPT with values returned by conntrack match
* @do_ttl_dec: used to indicate IP TTL / Hop Limit should be decremented
* @deliver: used to indicate a deliver action should take place
* @vlan_tci: tci fields for vlan push actions
@@ -68,6 +69,7 @@ struct efx_tc_action_set {
u16 vlan_push:2;
u16 vlan_pop:2;
u16 decap:1;
+ u16 do_nat:1;
u16 do_ttl_dec:1;
u16 deliver:1;
__be16 vlan_tci[2];
@@ -140,10 +142,14 @@ static inline bool efx_tc_match_is_encap(const struct efx_tc_match_fields *mask)
* The pseudo encap match may be referenced again by an encap match
* with different values for these fields, but all masks must match the
* first (stored in our child_* fields).
+ * @EFX_TC_EM_PSEUDO_OR: registered by an fLHS rule that fits in the OR
+ * table. The &struct efx_tc_lhs_rule already holds the HW OR entry.
+ * Only one reference to this encap match may exist.
*/
enum efx_tc_em_pseudo_type {
EFX_TC_EM_DIRECT,
EFX_TC_EM_PSEUDO_MASK,
+ EFX_TC_EM_PSEUDO_OR,
};
struct efx_tc_encap_match {
@@ -183,6 +189,7 @@ struct efx_tc_action_set_list {
};
struct efx_tc_lhs_action {
+ enum efx_encap_type tun_type;
struct efx_tc_recirc_id *rid;
struct efx_tc_ct_zone *zone;
struct efx_tc_counter_index *count;
@@ -203,6 +210,7 @@ struct efx_tc_lhs_rule {
struct efx_tc_lhs_action lhs_act;
struct rhash_head linkage;
u32 fw_id;
+ bool is_ar; /* Action Rule (for OR-AR-CT-AR sequence) */
};
enum efx_tc_rule_prios {
diff --git a/drivers/net/ethernet/sfc/tc_conntrack.c b/drivers/net/ethernet/sfc/tc_conntrack.c
index 44bb57670340..d90206f27161 100644
--- a/drivers/net/ethernet/sfc/tc_conntrack.c
+++ b/drivers/net/ethernet/sfc/tc_conntrack.c
@@ -276,10 +276,84 @@ static int efx_tc_ct_parse_match(struct efx_nic *efx, struct flow_rule *fr,
return 0;
}
+/**
+ * struct efx_tc_ct_mangler_state - tracks which fields have been pedited
+ *
+ * @ipv4: IP source or destination addr has been set
+ * @tcpudp: TCP/UDP source or destination port has been set
+ */
+struct efx_tc_ct_mangler_state {
+ u8 ipv4:1;
+ u8 tcpudp:1;
+};
+
+static int efx_tc_ct_mangle(struct efx_nic *efx, struct efx_tc_ct_entry *conn,
+ const struct flow_action_entry *fa,
+ struct efx_tc_ct_mangler_state *mung)
+{
+ /* Is this the first mangle we've processed for this rule? */
+ bool first = !(mung->ipv4 || mung->tcpudp);
+ bool dnat = false;
+
+ switch (fa->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ switch (fa->mangle.offset) {
+ case offsetof(struct iphdr, daddr):
+ dnat = true;
+ fallthrough;
+ case offsetof(struct iphdr, saddr):
+ if (fa->mangle.mask)
+ return -EOPNOTSUPP;
+ conn->nat_ip = htonl(fa->mangle.val);
+ mung->ipv4 = 1;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ /* Both struct tcphdr and struct udphdr start with
+ * __be16 source;
+ * __be16 dest;
+ * so we can use the same code for both.
+ */
+ switch (fa->mangle.offset) {
+ case offsetof(struct tcphdr, dest):
+ BUILD_BUG_ON(offsetof(struct tcphdr, dest) !=
+ offsetof(struct udphdr, dest));
+ dnat = true;
+ fallthrough;
+ case offsetof(struct tcphdr, source):
+ BUILD_BUG_ON(offsetof(struct tcphdr, source) !=
+ offsetof(struct udphdr, source));
+ if (~fa->mangle.mask != 0xffff)
+ return -EOPNOTSUPP;
+ conn->l4_natport = htons(fa->mangle.val);
+ mung->tcpudp = 1;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ /* first mangle tells us whether this is SNAT or DNAT;
+ * subsequent mangles must match that
+ */
+ if (first)
+ conn->dnat = dnat;
+ else if (conn->dnat != dnat)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
static int efx_tc_ct_replace(struct efx_tc_ct_zone *ct_zone,
struct flow_cls_offload *tc)
{
struct flow_rule *fr = flow_cls_offload_flow_rule(tc);
+ struct efx_tc_ct_mangler_state mung = {};
struct efx_tc_ct_entry *conn, *old;
struct efx_nic *efx = ct_zone->efx;
const struct flow_action_entry *fa;
@@ -326,6 +400,17 @@ static int efx_tc_ct_replace(struct efx_tc_ct_zone *ct_zone,
goto release;
}
break;
+ case FLOW_ACTION_MANGLE:
+ if (conn->eth_proto != htons(ETH_P_IP)) {
+ netif_dbg(efx, drv, efx->net_dev,
+ "NAT only supported for IPv4\n");
+ rc = -EOPNOTSUPP;
+ goto release;
+ }
+ rc = efx_tc_ct_mangle(efx, conn, fa, &mung);
+ if (rc)
+ goto release;
+ break;
default:
netif_dbg(efx, drv, efx->net_dev,
"Unhandled action %u for conntrack\n", fa->id);
@@ -335,8 +420,10 @@ static int efx_tc_ct_replace(struct efx_tc_ct_zone *ct_zone,
}
/* fill in defaults for unmangled values */
- conn->nat_ip = conn->dnat ? conn->dst_ip : conn->src_ip;
- conn->l4_natport = conn->dnat ? conn->l4_dport : conn->l4_sport;
+ if (!mung.ipv4)
+ conn->nat_ip = conn->dnat ? conn->dst_ip : conn->src_ip;
+ if (!mung.tcpudp)
+ conn->l4_natport = conn->dnat ? conn->l4_dport : conn->l4_sport;
cnt = efx_tc_flower_allocate_counter(efx, EFX_TC_COUNTER_TYPE_CT);
if (IS_ERR(cnt)) {
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 8fc3f5272fa7..98d0b561a057 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -962,7 +962,7 @@ out_free:
return err;
}
-static int ioc3eth_remove(struct platform_device *pdev)
+static void ioc3eth_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct ioc3_private *ip = netdev_priv(dev);
@@ -973,8 +973,6 @@ static int ioc3eth_remove(struct platform_device *pdev)
unregister_netdev(dev);
del_timer_sync(&ip->ioc3_timer);
free_netdev(dev);
-
- return 0;
}
@@ -1275,7 +1273,7 @@ static void ioc3_set_multicast_list(struct net_device *dev)
static struct platform_driver ioc3eth_driver = {
.probe = ioc3eth_probe,
- .remove = ioc3eth_remove,
+ .remove_new = ioc3eth_remove,
.driver = {
.name = "ioc3-eth",
}
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 6d850ea2b94c..18b6f93d875e 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -854,19 +854,17 @@ static int meth_probe(struct platform_device *pdev)
return 0;
}
-static int meth_remove(struct platform_device *pdev)
+static void meth_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
unregister_netdev(dev);
free_netdev(dev);
-
- return 0;
}
static struct platform_driver meth_driver = {
.probe = meth_probe,
- .remove = meth_remove,
+ .remove_new = meth_remove,
.driver = {
.name = "meth",
}
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 032eccf8eb42..758347616535 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2411,7 +2411,7 @@ static int smc_drv_probe(struct platform_device *pdev)
return ret;
}
-static int smc_drv_remove(struct platform_device *pdev)
+static void smc_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct smc_local *lp = netdev_priv(ndev);
@@ -2436,8 +2436,6 @@ static int smc_drv_remove(struct platform_device *pdev)
release_mem_region(res->start, SMC_IO_EXTENT);
free_netdev(ndev);
-
- return 0;
}
static int smc_drv_suspend(struct device *dev)
@@ -2480,7 +2478,7 @@ static const struct dev_pm_ops smc_drv_pm_ops = {
static struct platform_driver smc_driver = {
.probe = smc_drv_probe,
- .remove = smc_drv_remove,
+ .remove_new = smc_drv_remove,
.driver = {
.name = CARDNAME,
.pm = &smc_drv_pm_ops,
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index cb590db625e8..31cb7d0166f0 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2314,7 +2314,7 @@ static int smsc911x_init(struct net_device *dev)
return 0;
}
-static int smsc911x_drv_remove(struct platform_device *pdev)
+static void smsc911x_drv_remove(struct platform_device *pdev)
{
struct net_device *dev;
struct smsc911x_data *pdata;
@@ -2348,8 +2348,6 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
free_netdev(dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
/* standard register acces */
@@ -2668,7 +2666,7 @@ MODULE_DEVICE_TABLE(acpi, smsc911x_acpi_match);
static struct platform_driver smsc911x_driver = {
.probe = smsc911x_drv_probe,
- .remove = smsc911x_drv_remove,
+ .remove_new = smsc911x_drv_remove,
.driver = {
.name = SMSC_CHIPNAME,
.pm = SMSC911X_PM_OPS,
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index f358ea003193..0891e9e49ecb 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -780,7 +780,7 @@ static void netsec_finalize_xdp_rx(struct netsec_priv *priv, u32 xdp_res,
u16 pkts)
{
if (xdp_res & NETSEC_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_res & NETSEC_XDP_TX)
netsec_xdp_ring_tx_db(priv, pkts);
@@ -2150,7 +2150,7 @@ free_ndev:
return ret;
}
-static int netsec_remove(struct platform_device *pdev)
+static void netsec_remove(struct platform_device *pdev)
{
struct netsec_priv *priv = platform_get_drvdata(pdev);
@@ -2162,8 +2162,6 @@ static int netsec_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
free_netdev(priv->ndev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -2211,7 +2209,7 @@ MODULE_DEVICE_TABLE(acpi, netsec_acpi_ids);
static struct platform_driver netsec_driver = {
.probe = netsec_probe,
- .remove = netsec_remove,
+ .remove_new = netsec_remove,
.driver = {
.name = "netsec",
.pm = &netsec_pm_ops,
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 4838d2383a43..eed24e67c5a6 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1719,7 +1719,7 @@ out_del_napi:
return ret;
}
-static int ave_remove(struct platform_device *pdev)
+static void ave_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct ave_private *priv = netdev_priv(ndev);
@@ -1727,8 +1727,6 @@ static int ave_remove(struct platform_device *pdev)
unregister_netdev(ndev);
netif_napi_del(&priv->napi_rx);
netif_napi_del(&priv->napi_tx);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -1976,7 +1974,7 @@ MODULE_DEVICE_TABLE(of, of_ave_match);
static struct platform_driver ave_driver = {
.probe = ave_probe,
- .remove = ave_remove,
+ .remove_new = ave_remove,
.driver = {
.name = "ave",
.pm = AVE_PM_OPS,
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 06c6871f8788..a2b9e289aa36 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -239,6 +239,17 @@ config DWMAC_INTEL_PLAT
the stmmac device driver. This driver is used for the Intel Keem Bay
SoC.
+config DWMAC_LOONGSON1
+ tristate "Loongson1 GMAC support"
+ default MACH_LOONGSON32
+ depends on OF && (MACH_LOONGSON32 || COMPILE_TEST)
+ help
+ Support for ethernet controller on Loongson1 SoC.
+
+ This selects Loongson1 SoC glue layer support for the stmmac
+ device driver. This driver is used for Loongson1-based boards
+ like Loongson LS1B/LS1C.
+
config DWMAC_TEGRA
tristate "NVIDIA Tegra MGBE support"
depends on ARCH_TEGRA || COMPILE_TEST
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 5b57aee19267..80e598bd4255 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o
obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o
obj-$(CONFIG_DWMAC_INTEL_PLAT) += dwmac-intel-plat.o
+obj-$(CONFIG_DWMAC_LOONGSON1) += dwmac-loongson1.o
obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
obj-$(CONFIG_DWMAC_IMX8) += dwmac-imx.o
obj-$(CONFIG_DWMAC_TEGRA) += dwmac-tegra.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 1e996c29043d..e3f650e88f82 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -293,7 +293,7 @@ struct stmmac_safety_stats {
#define MIN_DMA_RIWT 0x10
#define DEF_DMA_RIWT 0xa0
/* Tx coalesce parameters */
-#define STMMAC_COAL_TX_TIMER 1000
+#define STMMAC_COAL_TX_TIMER 5000
#define STMMAC_MAX_COAL_TX_TICK 100000
#define STMMAC_TX_MAX_FRAMES 256
#define STMMAC_TX_FRAMES 25
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index 58a7f08e8d78..643ee6d8d4dd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -115,7 +115,7 @@ static int anarion_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(gmac))
return PTR_ERR(gmac);
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
@@ -124,13 +124,7 @@ static int anarion_dwmac_probe(struct platform_device *pdev)
anarion_gmac_init(pdev, gmac);
plat_dat->bsp_priv = gmac;
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret) {
- stmmac_remove_config_dt(pdev, plat_dat);
- return ret;
- }
-
- return 0;
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
static const struct of_device_id anarion_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 61ebf36da13d..ec924c6c76c6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -435,15 +435,14 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
if (IS_ERR(stmmac_res.addr))
return PTR_ERR(stmmac_res.addr);
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
ret = data->probe(pdev, plat_dat, &stmmac_res);
if (ret < 0) {
dev_err_probe(&pdev->dev, ret, "failed to probe subdriver\n");
-
- goto remove_config;
+ return ret;
}
ret = dwc_eth_dwmac_config_dt(pdev, plat_dat);
@@ -458,25 +457,17 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
remove:
data->remove(pdev);
-remove_config:
- stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
static void dwc_eth_dwmac_remove(struct platform_device *pdev)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- const struct dwc_eth_dwmac_data *data;
-
- data = device_get_match_data(&pdev->dev);
+ const struct dwc_eth_dwmac_data *data = device_get_match_data(&pdev->dev);
stmmac_dvr_remove(&pdev->dev);
data->remove(pdev);
-
- stmmac_remove_config_dt(pdev, priv->plat);
}
static const struct of_device_id dwc_eth_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
index 20fc455b3337..598eff926815 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -27,7 +27,7 @@ static int dwmac_generic_probe(struct platform_device *pdev)
return ret;
if (pdev->dev.of_node) {
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat)) {
dev_err(&pdev->dev, "dt configuration failed\n");
return PTR_ERR(plat_dat);
@@ -46,17 +46,7 @@ static int dwmac_generic_probe(struct platform_device *pdev)
plat_dat->unicast_filter_entries = 1;
}
- ret = stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
- if (ret)
- goto err_remove_config_dt;
-
- return 0;
-
-err_remove_config_dt:
- if (pdev->dev.of_node)
- stmmac_remove_config_dt(pdev, plat_dat);
-
- return ret;
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
}
static const struct of_device_id dwmac_generic_match[] = {
@@ -77,7 +67,6 @@ MODULE_DEVICE_TABLE(of, dwmac_generic_match);
static struct platform_driver dwmac_generic_driver = {
.probe = dwmac_generic_probe,
- .remove_new = stmmac_pltfr_remove,
.driver = {
.name = STMMAC_RESOURCE_NAME,
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index df34e34cc14f..8f730ada71f9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -331,15 +331,14 @@ static int imx_dwmac_probe(struct platform_device *pdev)
if (!dwmac)
return -ENOMEM;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
data = of_device_get_match_data(&pdev->dev);
if (!data) {
dev_err(&pdev->dev, "failed to get match data\n");
- ret = -EINVAL;
- goto err_match_data;
+ return -EINVAL;
}
dwmac->ops = data;
@@ -348,7 +347,7 @@ static int imx_dwmac_probe(struct platform_device *pdev)
ret = imx_dwmac_parse_dt(dwmac, &pdev->dev);
if (ret) {
dev_err(&pdev->dev, "failed to parse OF data\n");
- goto err_parse_dt;
+ return ret;
}
if (data->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
@@ -365,7 +364,7 @@ static int imx_dwmac_probe(struct platform_device *pdev)
ret = imx_dwmac_clks_config(dwmac, true);
if (ret)
- goto err_clks_config;
+ return ret;
ret = imx_dwmac_init(pdev, dwmac);
if (ret)
@@ -385,10 +384,6 @@ err_drv_probe:
imx_dwmac_exit(pdev, plat_dat->bsp_priv);
err_dwmac_init:
imx_dwmac_clks_config(dwmac, false);
-err_clks_config:
-err_parse_dt:
-err_match_data:
- stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
index 0a20c3d24722..19c93b998fb3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
@@ -241,29 +241,25 @@ static int ingenic_mac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
mac = devm_kzalloc(&pdev->dev, sizeof(*mac), GFP_KERNEL);
- if (!mac) {
- ret = -ENOMEM;
- goto err_remove_config_dt;
- }
+ if (!mac)
+ return -ENOMEM;
data = of_device_get_match_data(&pdev->dev);
if (!data) {
dev_err(&pdev->dev, "No of match data provided\n");
- ret = -EINVAL;
- goto err_remove_config_dt;
+ return -EINVAL;
}
/* Get MAC PHY control register */
mac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "mode-reg");
if (IS_ERR(mac->regmap)) {
dev_err(&pdev->dev, "%s: Failed to get syscon regmap\n", __func__);
- ret = PTR_ERR(mac->regmap);
- goto err_remove_config_dt;
+ return PTR_ERR(mac->regmap);
}
if (!of_property_read_u32(pdev->dev.of_node, "tx-clk-delay-ps", &tx_delay_ps)) {
@@ -272,8 +268,7 @@ static int ingenic_mac_probe(struct platform_device *pdev)
mac->tx_delay = tx_delay_ps * 1000;
} else {
dev_err(&pdev->dev, "Invalid TX clock delay: %dps\n", tx_delay_ps);
- ret = -EINVAL;
- goto err_remove_config_dt;
+ return -EINVAL;
}
}
@@ -283,8 +278,7 @@ static int ingenic_mac_probe(struct platform_device *pdev)
mac->rx_delay = rx_delay_ps * 1000;
} else {
dev_err(&pdev->dev, "Invalid RX clock delay: %dps\n", rx_delay_ps);
- ret = -EINVAL;
- goto err_remove_config_dt;
+ return -EINVAL;
}
}
@@ -295,18 +289,9 @@ static int ingenic_mac_probe(struct platform_device *pdev)
ret = ingenic_mac_init(plat_dat);
if (ret)
- goto err_remove_config_dt;
-
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto err_remove_config_dt;
-
- return 0;
-
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
+ return ret;
- return ret;
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index d352a14f9d48..d68f0c4e7835 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -7,8 +7,8 @@
#include <linux/ethtool.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/stmmac.h>
#include "dwmac4.h"
@@ -76,7 +76,6 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
- const struct of_device_id *match;
struct intel_dwmac *dwmac;
unsigned long rate;
int ret;
@@ -85,35 +84,29 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat)) {
dev_err(&pdev->dev, "dt configuration failed\n");
return PTR_ERR(plat_dat);
}
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac) {
- ret = -ENOMEM;
- goto err_remove_config_dt;
- }
+ if (!dwmac)
+ return -ENOMEM;
dwmac->dev = &pdev->dev;
dwmac->tx_clk = NULL;
- match = of_match_device(intel_eth_plat_match, &pdev->dev);
- if (match && match->data) {
- dwmac->data = (const struct intel_dwmac_data *)match->data;
-
+ dwmac->data = device_get_match_data(&pdev->dev);
+ if (dwmac->data) {
if (dwmac->data->fix_mac_speed)
plat_dat->fix_mac_speed = dwmac->data->fix_mac_speed;
/* Enable TX clock */
if (dwmac->data->tx_clk_en) {
dwmac->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
- if (IS_ERR(dwmac->tx_clk)) {
- ret = PTR_ERR(dwmac->tx_clk);
- goto err_remove_config_dt;
- }
+ if (IS_ERR(dwmac->tx_clk))
+ return PTR_ERR(dwmac->tx_clk);
clk_prepare_enable(dwmac->tx_clk);
@@ -126,7 +119,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Failed to set tx_clk\n");
- goto err_remove_config_dt;
+ return ret;
}
}
}
@@ -140,7 +133,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Failed to set clk_ptp_ref\n");
- goto err_remove_config_dt;
+ return ret;
}
}
}
@@ -158,15 +151,10 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret) {
clk_disable_unprepare(dwmac->tx_clk);
- goto err_remove_config_dt;
+ return ret;
}
return 0;
-
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
-
- return ret;
}
static void intel_eth_plat_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index a3a249c63598..60283543ffc8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -605,7 +605,6 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR;
plat->int_snapshot_num = AUX_SNAPSHOT1;
- plat->ext_snapshot_num = AUX_SNAPSHOT0;
plat->crosststamp = intel_crosststamp;
plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 9b0200749109..281687d7083b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -384,22 +384,20 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
if (val)
return val;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
- if (!gmac) {
- err = -ENOMEM;
- goto err_remove_config_dt;
- }
+ if (!gmac)
+ return -ENOMEM;
gmac->pdev = pdev;
err = ipq806x_gmac_of_parse(gmac);
if (err) {
dev_err(dev, "device tree parsing error\n");
- goto err_remove_config_dt;
+ return err;
}
regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL,
@@ -459,11 +457,11 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) {
err = ipq806x_gmac_configure_qsgmii_params(gmac);
if (err)
- goto err_remove_config_dt;
+ return err;
err = ipq806x_gmac_configure_qsgmii_pcs_speed(gmac);
if (err)
- goto err_remove_config_dt;
+ return err;
}
plat_dat->has_gmac = true;
@@ -473,21 +471,12 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
plat_dat->tx_fifo_size = 8192;
plat_dat->rx_fifo_size = 8192;
- err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (err)
- goto err_remove_config_dt;
-
- return 0;
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
err_unsupported_phy:
dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
phy_modes(gmac->phy_mode));
- err = -EINVAL;
-
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
-
- return err;
+ return -EINVAL;
}
static const struct of_device_id ipq806x_gmac_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
new file mode 100644
index 000000000000..3e86810717d3
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson1.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Loongson-1 DWMAC glue layer
+ *
+ * Copyright (C) 2011-2023 Keguang Zhang <keguang.zhang@gmail.com>
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+#define LS1B_GMAC0_BASE (0x1fe10000)
+#define LS1B_GMAC1_BASE (0x1fe20000)
+
+/* Loongson-1 SYSCON Registers */
+#define LS1X_SYSCON0 (0x0)
+#define LS1X_SYSCON1 (0x4)
+
+/* Loongson-1B SYSCON Register Bits */
+#define GMAC1_USE_UART1 BIT(4)
+#define GMAC1_USE_UART0 BIT(3)
+
+#define GMAC1_SHUT BIT(13)
+#define GMAC0_SHUT BIT(12)
+
+#define GMAC1_USE_TXCLK BIT(3)
+#define GMAC0_USE_TXCLK BIT(2)
+#define GMAC1_USE_PWM23 BIT(1)
+#define GMAC0_USE_PWM01 BIT(0)
+
+/* Loongson-1C SYSCON Register Bits */
+#define GMAC_SHUT BIT(6)
+
+#define PHY_INTF_SELI GENMASK(30, 28)
+#define PHY_INTF_MII FIELD_PREP(PHY_INTF_SELI, 0)
+#define PHY_INTF_RMII FIELD_PREP(PHY_INTF_SELI, 4)
+
+struct ls1x_dwmac {
+ struct plat_stmmacenet_data *plat_dat;
+ struct regmap *regmap;
+};
+
+static int ls1b_dwmac_syscon_init(struct platform_device *pdev, void *priv)
+{
+ struct ls1x_dwmac *dwmac = priv;
+ struct plat_stmmacenet_data *plat = dwmac->plat_dat;
+ struct regmap *regmap = dwmac->regmap;
+ struct resource *res;
+ unsigned long reg_base;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Could not get IO_MEM resources\n");
+ return -EINVAL;
+ }
+ reg_base = (unsigned long)res->start;
+
+ if (reg_base == LS1B_GMAC0_BASE) {
+ switch (plat->phy_interface) {
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ regmap_update_bits(regmap, LS1X_SYSCON0,
+ GMAC0_USE_TXCLK | GMAC0_USE_PWM01,
+ 0);
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ regmap_update_bits(regmap, LS1X_SYSCON0,
+ GMAC0_USE_TXCLK | GMAC0_USE_PWM01,
+ GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported PHY mode %u\n",
+ plat->phy_interface);
+ return -EOPNOTSUPP;
+ }
+
+ regmap_update_bits(regmap, LS1X_SYSCON0, GMAC0_SHUT, 0);
+ } else if (reg_base == LS1B_GMAC1_BASE) {
+ regmap_update_bits(regmap, LS1X_SYSCON0,
+ GMAC1_USE_UART1 | GMAC1_USE_UART0,
+ GMAC1_USE_UART1 | GMAC1_USE_UART0);
+
+ switch (plat->phy_interface) {
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ regmap_update_bits(regmap, LS1X_SYSCON1,
+ GMAC1_USE_TXCLK | GMAC1_USE_PWM23,
+ 0);
+
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ regmap_update_bits(regmap, LS1X_SYSCON1,
+ GMAC1_USE_TXCLK | GMAC1_USE_PWM23,
+ GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported PHY mode %u\n",
+ plat->phy_interface);
+ return -EOPNOTSUPP;
+ }
+
+ regmap_update_bits(regmap, LS1X_SYSCON1, GMAC1_SHUT, 0);
+ } else {
+ dev_err(&pdev->dev, "Invalid Ethernet MAC base address %lx",
+ reg_base);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ls1c_dwmac_syscon_init(struct platform_device *pdev, void *priv)
+{
+ struct ls1x_dwmac *dwmac = priv;
+ struct plat_stmmacenet_data *plat = dwmac->plat_dat;
+ struct regmap *regmap = dwmac->regmap;
+
+ switch (plat->phy_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ regmap_update_bits(regmap, LS1X_SYSCON1, PHY_INTF_SELI,
+ PHY_INTF_MII);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ regmap_update_bits(regmap, LS1X_SYSCON1, PHY_INTF_SELI,
+ PHY_INTF_RMII);
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported PHY-mode %u\n",
+ plat->phy_interface);
+ return -EOPNOTSUPP;
+ }
+
+ regmap_update_bits(regmap, LS1X_SYSCON0, GMAC0_SHUT, 0);
+
+ return 0;
+}
+
+static int ls1x_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct regmap *regmap;
+ struct ls1x_dwmac *dwmac;
+ int (*init)(struct platform_device *pdev, void *priv);
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ /* Probe syscon */
+ regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "loongson,ls1-syscon");
+ if (IS_ERR(regmap))
+ return dev_err_probe(&pdev->dev, PTR_ERR(regmap),
+ "Unable to find syscon\n");
+
+ init = of_device_get_match_data(&pdev->dev);
+ if (!init) {
+ dev_err(&pdev->dev, "No of match data provided\n");
+ return -EINVAL;
+ }
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return -ENOMEM;
+
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat),
+ "dt configuration failed\n");
+
+ plat_dat->bsp_priv = dwmac;
+ plat_dat->init = init;
+ dwmac->plat_dat = plat_dat;
+ dwmac->regmap = regmap;
+
+ return devm_stmmac_pltfr_probe(pdev, plat_dat, &stmmac_res);
+}
+
+static const struct of_device_id ls1x_dwmac_match[] = {
+ {
+ .compatible = "loongson,ls1b-gmac",
+ .data = &ls1b_dwmac_syscon_init,
+ },
+ {
+ .compatible = "loongson,ls1c-emac",
+ .data = &ls1c_dwmac_syscon_init,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ls1x_dwmac_match);
+
+static struct platform_driver ls1x_dwmac_driver = {
+ .probe = ls1x_dwmac_probe,
+ .driver = {
+ .name = "loongson1-dwmac",
+ .of_match_table = ls1x_dwmac_match,
+ },
+};
+module_platform_driver(ls1x_dwmac_driver);
+
+MODULE_AUTHOR("Keguang Zhang <keguang.zhang@gmail.com>");
+MODULE_DESCRIPTION("Loongson-1 DWMAC glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
index d0aa674ce705..4c810d8f5bea 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -37,7 +37,7 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
@@ -46,8 +46,7 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev)
reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
if (IS_ERR(reg)) {
dev_err(&pdev->dev, "syscon lookup failed\n");
- ret = PTR_ERR(reg);
- goto err_remove_config_dt;
+ return PTR_ERR(reg);
}
if (plat_dat->mac_interface == PHY_INTERFACE_MODE_MII) {
@@ -56,23 +55,13 @@ static int lpc18xx_dwmac_probe(struct platform_device *pdev)
ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
} else {
dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
- ret = -EINVAL;
- goto err_remove_config_dt;
+ return -EINVAL;
}
regmap_update_bits(reg, LPC18XX_CREG_CREG6,
LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode);
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto err_remove_config_dt;
-
- return 0;
-
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
-
- return ret;
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
static const struct of_device_id lpc18xx_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
index cd796ec04132..2a9132d6d743 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -656,7 +656,7 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
@@ -665,7 +665,7 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
ret = mediatek_dwmac_clks_config(priv_plat, true);
if (ret)
- goto err_remove_config_dt;
+ return ret;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
@@ -675,8 +675,6 @@ static int mediatek_dwmac_probe(struct platform_device *pdev)
err_drv_probe:
mediatek_dwmac_clks_config(priv_plat, false);
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index 959f88c6da16..a16bfa9089ea 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -52,35 +52,22 @@ static int meson6_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac) {
- ret = -ENOMEM;
- goto err_remove_config_dt;
- }
+ if (!dwmac)
+ return -ENOMEM;
dwmac->reg = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(dwmac->reg)) {
- ret = PTR_ERR(dwmac->reg);
- goto err_remove_config_dt;
- }
+ if (IS_ERR(dwmac->reg))
+ return PTR_ERR(dwmac->reg);
plat_dat->bsp_priv = dwmac;
plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed;
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto err_remove_config_dt;
-
- return 0;
-
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
-
- return ret;
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
static const struct of_device_id meson6_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 0b159dc0d5f6..b23944aa344e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -400,33 +400,27 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac) {
- ret = -ENOMEM;
- goto err_remove_config_dt;
- }
+ if (!dwmac)
+ return -ENOMEM;
dwmac->data = (const struct meson8b_dwmac_data *)
of_device_get_match_data(&pdev->dev);
- if (!dwmac->data) {
- ret = -EINVAL;
- goto err_remove_config_dt;
- }
+ if (!dwmac->data)
+ return -EINVAL;
dwmac->regs = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(dwmac->regs)) {
- ret = PTR_ERR(dwmac->regs);
- goto err_remove_config_dt;
- }
+ if (IS_ERR(dwmac->regs))
+ return PTR_ERR(dwmac->regs);
dwmac->dev = &pdev->dev;
ret = of_get_phy_mode(pdev->dev.of_node, &dwmac->phy_mode);
if (ret) {
dev_err(&pdev->dev, "missing phy-mode property\n");
- goto err_remove_config_dt;
+ return ret;
}
/* use 2ns as fallback since this value was previously hardcoded */
@@ -448,53 +442,40 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
if (dwmac->rx_delay_ps > 3000 || dwmac->rx_delay_ps % 200) {
dev_err(dwmac->dev,
"The RGMII RX delay range is 0..3000ps in 200ps steps");
- ret = -EINVAL;
- goto err_remove_config_dt;
+ return -EINVAL;
}
} else {
if (dwmac->rx_delay_ps != 0 && dwmac->rx_delay_ps != 2000) {
dev_err(dwmac->dev,
"The only allowed RGMII RX delays values are: 0ps, 2000ps");
- ret = -EINVAL;
- goto err_remove_config_dt;
+ return -EINVAL;
}
}
dwmac->timing_adj_clk = devm_clk_get_optional(dwmac->dev,
"timing-adjustment");
- if (IS_ERR(dwmac->timing_adj_clk)) {
- ret = PTR_ERR(dwmac->timing_adj_clk);
- goto err_remove_config_dt;
- }
+ if (IS_ERR(dwmac->timing_adj_clk))
+ return PTR_ERR(dwmac->timing_adj_clk);
ret = meson8b_init_rgmii_delays(dwmac);
if (ret)
- goto err_remove_config_dt;
+ return ret;
ret = meson8b_init_rgmii_tx_clk(dwmac);
if (ret)
- goto err_remove_config_dt;
+ return ret;
ret = dwmac->data->set_phy_mode(dwmac);
if (ret)
- goto err_remove_config_dt;
+ return ret;
ret = meson8b_init_prg_eth(dwmac);
if (ret)
- goto err_remove_config_dt;
+ return ret;
plat_dat->bsp_priv = dwmac;
- ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (ret)
- goto err_remove_config_dt;
-
- return 0;
-
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
-
- return ret;
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
static const struct meson8b_dwmac_data meson8b_dwmac_data = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index d920a50dd16c..382e8de1255d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1824,7 +1824,7 @@ static int rk_gmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
@@ -1836,18 +1836,16 @@ static int rk_gmac_probe(struct platform_device *pdev)
plat_dat->fix_mac_speed = rk_fix_speed;
plat_dat->bsp_priv = rk_gmac_setup(pdev, plat_dat, data);
- if (IS_ERR(plat_dat->bsp_priv)) {
- ret = PTR_ERR(plat_dat->bsp_priv);
- goto err_remove_config_dt;
- }
+ if (IS_ERR(plat_dat->bsp_priv))
+ return PTR_ERR(plat_dat->bsp_priv);
ret = rk_gmac_clk_init(plat_dat);
if (ret)
- goto err_remove_config_dt;
+ return ret;
ret = rk_gmac_powerup(plat_dat->bsp_priv);
if (ret)
- goto err_remove_config_dt;
+ return ret;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
@@ -1857,8 +1855,6 @@ static int rk_gmac_probe(struct platform_device *pdev)
err_gmac_powerdown:
rk_gmac_powerdown(plat_dat->bsp_priv);
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 9bf102bbc6a0..ba2ce776bd4d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -400,21 +400,19 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac) {
- ret = -ENOMEM;
- goto err_remove_config_dt;
- }
+ if (!dwmac)
+ return -ENOMEM;
dwmac->stmmac_ocp_rst = devm_reset_control_get_optional(dev, "stmmaceth-ocp");
if (IS_ERR(dwmac->stmmac_ocp_rst)) {
ret = PTR_ERR(dwmac->stmmac_ocp_rst);
dev_err(dev, "error getting reset control of ocp %d\n", ret);
- goto err_remove_config_dt;
+ return ret;
}
reset_control_deassert(dwmac->stmmac_ocp_rst);
@@ -422,7 +420,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
ret = socfpga_dwmac_parse_data(dwmac, dev);
if (ret) {
dev_err(dev, "Unable to parse OF data\n");
- goto err_remove_config_dt;
+ return ret;
}
dwmac->ops = ops;
@@ -431,7 +429,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
- goto err_remove_config_dt;
+ return ret;
ndev = platform_get_drvdata(pdev);
stpriv = netdev_priv(ndev);
@@ -492,8 +490,6 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
err_dvr_remove:
stmmac_dvr_remove(&pdev->dev);
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
index 9289bb87c3e3..5d630affb4d1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-starfive.c
@@ -105,7 +105,7 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, err,
"failed to get resources\n");
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return dev_err_probe(&pdev->dev, PTR_ERR(plat_dat),
"dt configuration failed\n");
@@ -141,13 +141,7 @@ static int starfive_dwmac_probe(struct platform_device *pdev)
if (err)
return err;
- err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (err) {
- stmmac_remove_config_dt(pdev, plat_dat);
- return err;
- }
-
- return 0;
+ return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
}
static const struct of_device_id starfive_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index 0d653bbb931b..4445cddc4cbe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -273,20 +273,18 @@ static int sti_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac) {
- ret = -ENOMEM;
- goto err_remove_config_dt;
- }
+ if (!dwmac)
+ return -ENOMEM;
ret = sti_dwmac_parse_data(dwmac, pdev);
if (ret) {
dev_err(&pdev->dev, "Unable to parse OF data\n");
- goto err_remove_config_dt;
+ return ret;
}
dwmac->fix_retime_src = data->fix_retime_src;
@@ -296,7 +294,7 @@ static int sti_dwmac_probe(struct platform_device *pdev)
ret = clk_prepare_enable(dwmac->clk);
if (ret)
- goto err_remove_config_dt;
+ return ret;
ret = sti_dwmac_set_mode(dwmac);
if (ret)
@@ -310,8 +308,6 @@ static int sti_dwmac_probe(struct platform_device *pdev)
disable_clk:
clk_disable_unprepare(dwmac->clk);
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index a0e276783e65..c92dfc4ecf57 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -98,7 +98,6 @@ struct stm32_dwmac {
struct stm32_ops {
int (*set_mode)(struct plat_stmmacenet_data *plat_dat);
- int (*clk_prepare)(struct stm32_dwmac *dwmac, bool prepare);
int (*suspend)(struct stm32_dwmac *dwmac);
void (*resume)(struct stm32_dwmac *dwmac);
int (*parse_data)(struct stm32_dwmac *dwmac,
@@ -107,62 +106,55 @@ struct stm32_ops {
bool clk_rx_enable_in_suspend;
};
-static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
+static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume)
{
- struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
int ret;
- if (dwmac->ops->set_mode) {
- ret = dwmac->ops->set_mode(plat_dat);
- if (ret)
- return ret;
- }
-
ret = clk_prepare_enable(dwmac->clk_tx);
if (ret)
- return ret;
+ goto err_clk_tx;
- if (!dwmac->ops->clk_rx_enable_in_suspend ||
- !dwmac->dev->power.is_suspended) {
+ if (!dwmac->ops->clk_rx_enable_in_suspend || !resume) {
ret = clk_prepare_enable(dwmac->clk_rx);
- if (ret) {
- clk_disable_unprepare(dwmac->clk_tx);
- return ret;
- }
+ if (ret)
+ goto err_clk_rx;
}
- if (dwmac->ops->clk_prepare) {
- ret = dwmac->ops->clk_prepare(dwmac, true);
- if (ret) {
- clk_disable_unprepare(dwmac->clk_rx);
- clk_disable_unprepare(dwmac->clk_tx);
- }
+ ret = clk_prepare_enable(dwmac->syscfg_clk);
+ if (ret)
+ goto err_syscfg_clk;
+
+ if (dwmac->enable_eth_ck) {
+ ret = clk_prepare_enable(dwmac->clk_eth_ck);
+ if (ret)
+ goto err_clk_eth_ck;
}
return ret;
+
+err_clk_eth_ck:
+ clk_disable_unprepare(dwmac->syscfg_clk);
+err_syscfg_clk:
+ if (!dwmac->ops->clk_rx_enable_in_suspend || !resume)
+ clk_disable_unprepare(dwmac->clk_rx);
+err_clk_rx:
+ clk_disable_unprepare(dwmac->clk_tx);
+err_clk_tx:
+ return ret;
}
-static int stm32mp1_clk_prepare(struct stm32_dwmac *dwmac, bool prepare)
+static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat, bool resume)
{
- int ret = 0;
+ struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
+ int ret;
- if (prepare) {
- ret = clk_prepare_enable(dwmac->syscfg_clk);
+ if (dwmac->ops->set_mode) {
+ ret = dwmac->ops->set_mode(plat_dat);
if (ret)
return ret;
- if (dwmac->enable_eth_ck) {
- ret = clk_prepare_enable(dwmac->clk_eth_ck);
- if (ret) {
- clk_disable_unprepare(dwmac->syscfg_clk);
- return ret;
- }
- }
- } else {
- clk_disable_unprepare(dwmac->syscfg_clk);
- if (dwmac->enable_eth_ck)
- clk_disable_unprepare(dwmac->clk_eth_ck);
}
- return ret;
+
+ return stm32_dwmac_clk_enable(dwmac, resume);
}
static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
@@ -252,13 +244,15 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
dwmac->ops->syscfg_eth_mask, val << 23);
}
-static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac)
+static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac, bool suspend)
{
clk_disable_unprepare(dwmac->clk_tx);
- clk_disable_unprepare(dwmac->clk_rx);
+ if (!dwmac->ops->clk_rx_enable_in_suspend || !suspend)
+ clk_disable_unprepare(dwmac->clk_rx);
- if (dwmac->ops->clk_prepare)
- dwmac->ops->clk_prepare(dwmac, false);
+ clk_disable_unprepare(dwmac->syscfg_clk);
+ if (dwmac->enable_eth_ck)
+ clk_disable_unprepare(dwmac->clk_eth_ck);
}
static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac,
@@ -372,21 +366,18 @@ static int stm32_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac) {
- ret = -ENOMEM;
- goto err_remove_config_dt;
- }
+ if (!dwmac)
+ return -ENOMEM;
data = of_device_get_match_data(&pdev->dev);
if (!data) {
dev_err(&pdev->dev, "no of match data provided\n");
- ret = -EINVAL;
- goto err_remove_config_dt;
+ return -EINVAL;
}
dwmac->ops = data;
@@ -395,14 +386,14 @@ static int stm32_dwmac_probe(struct platform_device *pdev)
ret = stm32_dwmac_parse_data(dwmac, &pdev->dev);
if (ret) {
dev_err(&pdev->dev, "Unable to parse OF data\n");
- goto err_remove_config_dt;
+ return ret;
}
plat_dat->bsp_priv = dwmac;
- ret = stm32_dwmac_init(plat_dat);
+ ret = stm32_dwmac_init(plat_dat, false);
if (ret)
- goto err_remove_config_dt;
+ return ret;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
@@ -411,9 +402,7 @@ static int stm32_dwmac_probe(struct platform_device *pdev)
return 0;
err_clk_disable:
- stm32_dwmac_clk_disable(dwmac);
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
+ stm32_dwmac_clk_disable(dwmac, false);
return ret;
}
@@ -426,7 +415,7 @@ static void stm32_dwmac_remove(struct platform_device *pdev)
stmmac_dvr_remove(&pdev->dev);
- stm32_dwmac_clk_disable(priv->plat->bsp_priv);
+ stm32_dwmac_clk_disable(dwmac, false);
if (dwmac->irq_pwr_wakeup >= 0) {
dev_pm_clear_wake_irq(&pdev->dev);
@@ -436,18 +425,7 @@ static void stm32_dwmac_remove(struct platform_device *pdev)
static int stm32mp1_suspend(struct stm32_dwmac *dwmac)
{
- int ret = 0;
-
- ret = clk_prepare_enable(dwmac->clk_ethstp);
- if (ret)
- return ret;
-
- clk_disable_unprepare(dwmac->clk_tx);
- clk_disable_unprepare(dwmac->syscfg_clk);
- if (dwmac->enable_eth_ck)
- clk_disable_unprepare(dwmac->clk_eth_ck);
-
- return ret;
+ return clk_prepare_enable(dwmac->clk_ethstp);
}
static void stm32mp1_resume(struct stm32_dwmac *dwmac)
@@ -455,14 +433,6 @@ static void stm32mp1_resume(struct stm32_dwmac *dwmac)
clk_disable_unprepare(dwmac->clk_ethstp);
}
-static int stm32mcu_suspend(struct stm32_dwmac *dwmac)
-{
- clk_disable_unprepare(dwmac->clk_tx);
- clk_disable_unprepare(dwmac->clk_rx);
-
- return 0;
-}
-
#ifdef CONFIG_PM_SLEEP
static int stm32_dwmac_suspend(struct device *dev)
{
@@ -473,6 +443,10 @@ static int stm32_dwmac_suspend(struct device *dev)
int ret;
ret = stmmac_suspend(dev);
+ if (ret)
+ return ret;
+
+ stm32_dwmac_clk_disable(dwmac, true);
if (dwmac->ops->suspend)
ret = dwmac->ops->suspend(dwmac);
@@ -490,7 +464,7 @@ static int stm32_dwmac_resume(struct device *dev)
if (dwmac->ops->resume)
dwmac->ops->resume(dwmac);
- ret = stm32_dwmac_init(priv->plat);
+ ret = stm32_dwmac_init(priv->plat, true);
if (ret)
return ret;
@@ -505,13 +479,11 @@ static SIMPLE_DEV_PM_OPS(stm32_dwmac_pm_ops,
static struct stm32_ops stm32mcu_dwmac_data = {
.set_mode = stm32mcu_set_mode,
- .suspend = stm32mcu_suspend,
.syscfg_eth_mask = SYSCFG_MCU_ETH_MASK
};
static struct stm32_ops stm32mp1_dwmac_data = {
.set_mode = stm32mp1_set_mode,
- .clk_prepare = stm32mp1_clk_prepare,
.suspend = stm32mp1_suspend,
.resume = stm32mp1_resume,
.parse_data = stm32mp1_parse_data,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 465ff1fd4785..137741b94122 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -1224,7 +1224,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
if (ret)
return -EINVAL;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
@@ -1244,7 +1244,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat);
if (ret)
- goto dwmac_deconfig;
+ return ret;
ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
if (ret)
@@ -1295,8 +1295,6 @@ dwmac_exit:
sun8i_dwmac_exit(pdev, gmac);
dwmac_syscon:
sun8i_dwmac_unset_syscon(gmac);
-dwmac_deconfig:
- stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index beceeae579bf..2653a9f0958c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -108,36 +108,31 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
- if (!gmac) {
- ret = -ENOMEM;
- goto err_remove_config_dt;
- }
+ if (!gmac)
+ return -ENOMEM;
ret = of_get_phy_mode(dev->of_node, &gmac->interface);
if (ret && ret != -ENODEV) {
dev_err(dev, "Can't get phy-mode\n");
- goto err_remove_config_dt;
+ return ret;
}
gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
if (IS_ERR(gmac->tx_clk)) {
dev_err(dev, "could not get tx clock\n");
- ret = PTR_ERR(gmac->tx_clk);
- goto err_remove_config_dt;
+ return PTR_ERR(gmac->tx_clk);
}
/* Optional regulator for PHY */
gmac->regulator = devm_regulator_get_optional(dev, "phy");
if (IS_ERR(gmac->regulator)) {
- if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
- goto err_remove_config_dt;
- }
+ if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
dev_info(dev, "no regulator found\n");
gmac->regulator = NULL;
}
@@ -155,7 +150,7 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv);
if (ret)
- goto err_remove_config_dt;
+ return ret;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
@@ -165,8 +160,6 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
err_gmac_exit:
sun7i_gmac_exit(pdev, plat_dat->bsp_priv);
-err_remove_config_dt:
- stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
index e0f3cbd36852..362f85136c3e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
@@ -284,7 +284,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
if (err < 0)
goto disable_clks;
- plat = stmmac_probe_config_dt(pdev, res.mac);
+ plat = devm_stmmac_probe_config_dt(pdev, res.mac);
if (IS_ERR(plat)) {
err = PTR_ERR(plat);
goto disable_clks;
@@ -303,7 +303,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!plat->mdio_bus_data) {
err = -ENOMEM;
- goto remove;
+ goto disable_clks;
}
}
@@ -321,7 +321,7 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
500, 500 * 2000);
if (err < 0) {
dev_err(mgbe->dev, "timeout waiting for TX lane to become enabled\n");
- goto remove;
+ goto disable_clks;
}
plat->serdes_powerup = mgbe_uphy_lane_bringup_serdes_up;
@@ -342,12 +342,10 @@ static int tegra_mgbe_probe(struct platform_device *pdev)
err = stmmac_dvr_probe(&pdev->dev, plat, &res);
if (err < 0)
- goto remove;
+ goto disable_clks;
return 0;
-remove:
- stmmac_remove_config_dt(pdev, plat);
disable_clks:
clk_bulk_disable_unprepare(ARRAY_SIZE(mgbe_clks), mgbe->clks);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index 22d113fb8e09..a5a5cfa989c6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -220,15 +220,13 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
if (IS_ERR(plat_dat))
return PTR_ERR(plat_dat);
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
- if (!dwmac) {
- ret = -ENOMEM;
- goto remove_config;
- }
+ if (!dwmac)
+ return -ENOMEM;
spin_lock_init(&dwmac->lock);
dwmac->reg = stmmac_res.addr;
@@ -238,7 +236,7 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev)
ret = visconti_eth_clock_probe(pdev, plat_dat);
if (ret)
- goto remove_config;
+ return ret;
visconti_eth_init_hw(pdev, plat_dat);
@@ -252,22 +250,14 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev)
remove:
visconti_eth_clock_remove(pdev);
-remove_config:
- stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
static void visconti_eth_dwmac_remove(struct platform_device *pdev)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(ndev);
-
stmmac_pltfr_remove(pdev);
-
visconti_eth_clock_remove(pdev);
-
- stmmac_remove_config_dt(pdev, priv->plat);
}
static const struct of_device_id visconti_eth_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 6aa5c0556d22..f628411ae4ae 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -981,7 +981,7 @@ static int __stmmac_set_coalesce(struct net_device *dev,
else if (queue >= max_cnt)
return -EINVAL;
- if (priv->use_riwt && (ec->rx_coalesce_usecs > 0)) {
+ if (priv->use_riwt) {
rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv);
if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT))
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 5801f4d50f95..3e50fd53a617 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2551,9 +2551,13 @@ static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
* @priv: driver private structure
* @budget: napi budget limiting this functions packet handling
* @queue: TX queue index
+ * @pending_packets: signal to arm the TX coal timer
* Description: it reclaims the transmit resources after transmission completes.
+ * If some packets still needs to be handled, due to TX coalesce, set
+ * pending_packets to true to make NAPI arm the TX coal timer.
*/
-static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
+static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
+ bool *pending_packets)
{
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
@@ -2714,7 +2718,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
/* We still have pending packets, let's call for a new scheduling */
if (tx_q->dirty_tx != tx_q->cur_tx)
- stmmac_tx_timer_arm(priv, queue);
+ *pending_packets = true;
flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
txq_stats->tx_packets += tx_packets;
@@ -3004,13 +3008,25 @@ static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
u32 tx_coal_timer = priv->tx_coal_timer[queue];
+ struct stmmac_channel *ch;
+ struct napi_struct *napi;
if (!tx_coal_timer)
return;
- hrtimer_start(&tx_q->txtimer,
- STMMAC_COAL_TIMER(tx_coal_timer),
- HRTIMER_MODE_REL);
+ ch = &priv->channel[tx_q->queue_index];
+ napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
+
+ /* Arm timer only if napi is not already scheduled.
+ * Try to cancel any timer if napi is scheduled, timer will be armed
+ * again in the next scheduled napi.
+ */
+ if (unlikely(!napi_is_scheduled(napi)))
+ hrtimer_start(&tx_q->txtimer,
+ STMMAC_COAL_TIMER(tx_coal_timer),
+ HRTIMER_MODE_REL);
+ else
+ hrtimer_try_to_cancel(&tx_q->txtimer);
}
/**
@@ -4415,6 +4431,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
WARN_ON(tx_q->tx_skbuff[first_entry]);
csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
+ /* DWMAC IPs can be synthesized to support tx coe only for a few tx
+ * queues. In that case, checksum offloading for those queues that don't
+ * support tx coe needs to fallback to software checksum calculation.
+ */
+ if (csum_insertion &&
+ priv->plat->tx_queues_cfg[queue].coe_unsupported) {
+ if (unlikely(skb_checksum_help(skb)))
+ goto dma_map_err;
+ csum_insertion = !csum_insertion;
+ }
if (likely(priv->extend_desc))
desc = (struct dma_desc *)(tx_q->dma_etx + entry);
@@ -5558,6 +5584,7 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
container_of(napi, struct stmmac_channel, tx_napi);
struct stmmac_priv *priv = ch->priv_data;
struct stmmac_txq_stats *txq_stats;
+ bool pending_packets = false;
u32 chan = ch->index;
unsigned long flags;
int work_done;
@@ -5567,7 +5594,7 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
txq_stats->napi_poll++;
u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
- work_done = stmmac_tx_clean(priv, budget, chan);
+ work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
work_done = min(work_done, budget);
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -5578,6 +5605,10 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
spin_unlock_irqrestore(&ch->lock, flags);
}
+ /* TX still have packet to handle, check if we need to arm tx timer */
+ if (pending_packets)
+ stmmac_tx_timer_arm(priv, chan);
+
return work_done;
}
@@ -5586,6 +5617,7 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, rxtx_napi);
struct stmmac_priv *priv = ch->priv_data;
+ bool tx_pending_packets = false;
int rx_done, tx_done, rxtx_done;
struct stmmac_rxq_stats *rxq_stats;
struct stmmac_txq_stats *txq_stats;
@@ -5602,7 +5634,7 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
txq_stats->napi_poll++;
u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
- tx_done = stmmac_tx_clean(priv, budget, chan);
+ tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
tx_done = min(tx_done, budget);
rx_done = stmmac_rx_zc(priv, budget, chan);
@@ -5627,6 +5659,10 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
spin_unlock_irqrestore(&ch->lock, flags);
}
+ /* TX still have packet to handle, check if we need to arm tx timer */
+ if (tx_pending_packets)
+ stmmac_tx_timer_arm(priv, chan);
+
return min(rxtx_done, budget - 1);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 2f0678f15fb7..1ffde555da47 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -276,6 +276,9 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
plat->tx_queues_cfg[queue].use_prio = true;
}
+ plat->tx_queues_cfg[queue].coe_unsupported =
+ of_property_read_bool(q_node, "snps,coe-unsupported");
+
queue++;
}
if (queue != plat->tx_queues_to_use) {
@@ -385,6 +388,22 @@ static int stmmac_of_get_mac_mode(struct device_node *np)
}
/**
+ * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
+ * @pdev: platform_device structure
+ * @plat: driver data platform structure
+ *
+ * Release resources claimed by stmmac_probe_config_dt().
+ */
+static void stmmac_remove_config_dt(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ clk_disable_unprepare(plat->stmmac_clk);
+ clk_disable_unprepare(plat->pclk);
+ of_node_put(plat->phy_node);
+ of_node_put(plat->mdio_node);
+}
+
+/**
* stmmac_probe_config_dt - parse device-tree driver parameters
* @pdev: platform_device structure
* @mac: MAC address to use
@@ -392,7 +411,7 @@ static int stmmac_of_get_mac_mode(struct device_node *np)
* this function is to read the driver parameters from device-tree and
* set some private fields that will be used by the main at runtime.
*/
-struct plat_stmmacenet_data *
+static struct plat_stmmacenet_data *
stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
{
struct device_node *np = pdev->dev.of_node;
@@ -662,43 +681,14 @@ devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
return plat;
}
-
-/**
- * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
- * @pdev: platform_device structure
- * @plat: driver data platform structure
- *
- * Release resources claimed by stmmac_probe_config_dt().
- */
-void stmmac_remove_config_dt(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat)
-{
- clk_disable_unprepare(plat->stmmac_clk);
- clk_disable_unprepare(plat->pclk);
- of_node_put(plat->phy_node);
- of_node_put(plat->mdio_node);
-}
#else
struct plat_stmmacenet_data *
-stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
-{
- return ERR_PTR(-EINVAL);
-}
-
-struct plat_stmmacenet_data *
devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
{
return ERR_PTR(-EINVAL);
}
-
-void stmmac_remove_config_dt(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat)
-{
-}
#endif /* CONFIG_OF */
-EXPORT_SYMBOL_GPL(stmmac_probe_config_dt);
EXPORT_SYMBOL_GPL(devm_stmmac_probe_config_dt);
-EXPORT_SYMBOL_GPL(stmmac_remove_config_dt);
int stmmac_get_platform_resources(struct platform_device *pdev,
struct stmmac_resources *stmmac_res)
@@ -807,7 +797,7 @@ static void devm_stmmac_pltfr_remove(void *data)
{
struct platform_device *pdev = data;
- stmmac_pltfr_remove_no_dt(pdev);
+ stmmac_pltfr_remove(pdev);
}
/**
@@ -834,12 +824,12 @@ int devm_stmmac_pltfr_probe(struct platform_device *pdev,
EXPORT_SYMBOL_GPL(devm_stmmac_pltfr_probe);
/**
- * stmmac_pltfr_remove_no_dt
+ * stmmac_pltfr_remove
* @pdev: pointer to the platform device
* Description: This undoes the effects of stmmac_pltfr_probe() by removing the
* driver and calling the platform's exit() callback.
*/
-void stmmac_pltfr_remove_no_dt(struct platform_device *pdev)
+void stmmac_pltfr_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -848,23 +838,6 @@ void stmmac_pltfr_remove_no_dt(struct platform_device *pdev)
stmmac_dvr_remove(&pdev->dev);
stmmac_pltfr_exit(pdev, plat);
}
-EXPORT_SYMBOL_GPL(stmmac_pltfr_remove_no_dt);
-
-/**
- * stmmac_pltfr_remove
- * @pdev: platform device pointer
- * Description: this function calls the main to free the net resources
- * and calls the platforms hook and release the resources (e.g. mem).
- */
-void stmmac_pltfr_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct plat_stmmacenet_data *plat = priv->plat;
-
- stmmac_pltfr_remove_no_dt(pdev);
- stmmac_remove_config_dt(pdev, plat);
-}
EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
/**
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index c5565b2a70ac..bb6fc7e59aed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -12,11 +12,7 @@
#include "stmmac.h"
struct plat_stmmacenet_data *
-stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac);
-struct plat_stmmacenet_data *
devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac);
-void stmmac_remove_config_dt(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat);
int stmmac_get_platform_resources(struct platform_device *pdev,
struct stmmac_resources *stmmac_res);
@@ -32,7 +28,6 @@ int stmmac_pltfr_probe(struct platform_device *pdev,
int devm_stmmac_pltfr_probe(struct platform_device *pdev,
struct plat_stmmacenet_data *plat,
struct stmmac_resources *res);
-void stmmac_pltfr_remove_no_dt(struct platform_device *pdev);
void stmmac_pltfr_remove(struct platform_device *pdev);
extern const struct dev_pm_ops stmmac_pltfr_pm_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 3d7825cb30bb..bffa5c017032 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -81,7 +81,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, xmac);
write_unlock_irqrestore(&priv->ptp_lock, flags);
- /* Caculate new basetime and re-configured EST after PTP time adjust. */
+ /* Calculate new basetime and re-configured EST after PTP time adjust. */
if (est_rst) {
struct timespec64 current_time, time;
ktime_t current_time_ns, basetime;
@@ -191,26 +191,33 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
priv->systime_flags);
write_unlock_irqrestore(&priv->ptp_lock, flags);
break;
- case PTP_CLK_REQ_EXTTS:
- if (on)
- priv->plat->flags |= STMMAC_FLAG_EXT_SNAPSHOT_EN;
- else
- priv->plat->flags &= ~STMMAC_FLAG_EXT_SNAPSHOT_EN;
+ case PTP_CLK_REQ_EXTTS: {
+ u8 channel;
+
mutex_lock(&priv->aux_ts_lock);
acr_value = readl(ptpaddr + PTP_ACR);
+ channel = ilog2(FIELD_GET(PTP_ACR_MASK, acr_value));
acr_value &= ~PTP_ACR_MASK;
+
if (on) {
+ if (FIELD_GET(PTP_ACR_MASK, acr_value)) {
+ netdev_err(priv->dev,
+ "Cannot enable auxiliary snapshot %d as auxiliary snapshot %d is already enabled",
+ rq->extts.index, channel);
+ mutex_unlock(&priv->aux_ts_lock);
+ return -EBUSY;
+ }
+
+ priv->plat->flags |= STMMAC_FLAG_EXT_SNAPSHOT_EN;
+
/* Enable External snapshot trigger */
- acr_value |= priv->plat->ext_snapshot_num;
+ acr_value |= PTP_ACR_ATSEN(rq->extts.index);
acr_value |= PTP_ACR_ATSFC;
- netdev_dbg(priv->dev, "Auxiliary Snapshot %d enabled.\n",
- priv->plat->ext_snapshot_num >>
- PTP_ACR_ATSEN_SHIFT);
} else {
- netdev_dbg(priv->dev, "Auxiliary Snapshot %d disabled.\n",
- priv->plat->ext_snapshot_num >>
- PTP_ACR_ATSEN_SHIFT);
+ priv->plat->flags &= ~STMMAC_FLAG_EXT_SNAPSHOT_EN;
}
+ netdev_dbg(priv->dev, "Auxiliary Snapshot %d %s.\n",
+ rq->extts.index, on ? "enabled" : "disabled");
writel(acr_value, ptpaddr + PTP_ACR);
mutex_unlock(&priv->aux_ts_lock);
/* wait for auxts fifo clear to finish */
@@ -218,6 +225,7 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
!(acr_value & PTP_ACR_ATSFC),
10, 10000);
break;
+ }
default:
break;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index d1fe4b46f162..fce3fba2ffd2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -79,7 +79,7 @@
#define PTP_ACR_ATSEN1 BIT(5) /* Auxiliary Snapshot 1 Enable */
#define PTP_ACR_ATSEN2 BIT(6) /* Auxiliary Snapshot 2 Enable */
#define PTP_ACR_ATSEN3 BIT(7) /* Auxiliary Snapshot 3 Enable */
-#define PTP_ACR_ATSEN_SHIFT 5 /* Auxiliary Snapshot shift */
+#define PTP_ACR_ATSEN(index) (PTP_ACR_ATSEN0 << (index))
#define PTP_ACR_MASK GENMASK(7, 4) /* Aux Snapshot Mask */
#define PMC_ART_VALUE0 0x01 /* PMC_ART[15:0] timer value */
#define PMC_ART_VALUE1 0x02 /* PMC_ART[31:16] timer value */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index f9e43fc32ee8..3ca1c2a816ff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -802,7 +802,7 @@ static int stmmac_test_flowctrl(struct stmmac_priv *priv)
stmmac_start_rx(priv, priv->ioaddr, i);
local_bh_disable();
- napi_reschedule(&ch->rx_napi);
+ napi_schedule(&ch->rx_napi);
local_bh_enable();
}
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 011d74087f86..21431f43e4c2 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -10132,7 +10132,7 @@ err_out:
return err;
}
-static int niu_of_remove(struct platform_device *op)
+static void niu_of_remove(struct platform_device *op)
{
struct net_device *dev = platform_get_drvdata(op);
@@ -10165,7 +10165,6 @@ static int niu_of_remove(struct platform_device *op)
free_netdev(dev);
}
- return 0;
}
static const struct of_device_id niu_match[] = {
@@ -10183,7 +10182,7 @@ static struct platform_driver niu_of_driver = {
.of_match_table = niu_match,
},
.probe = niu_of_probe,
- .remove = niu_of_remove,
+ .remove_new = niu_of_remove,
};
#endif /* CONFIG_SPARC64 */
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index cc34d92d2e3d..16c86b13c185 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1234,7 +1234,7 @@ static int bigmac_sbus_probe(struct platform_device *op)
return bigmac_ether_init(op, qec_op);
}
-static int bigmac_sbus_remove(struct platform_device *op)
+static void bigmac_sbus_remove(struct platform_device *op)
{
struct bigmac *bp = platform_get_drvdata(op);
struct device *parent = op->dev.parent;
@@ -1255,8 +1255,6 @@ static int bigmac_sbus_remove(struct platform_device *op)
bp->bblock_dvma);
free_netdev(net_dev);
-
- return 0;
}
static const struct of_device_id bigmac_sbus_match[] = {
@@ -1274,7 +1272,7 @@ static struct platform_driver bigmac_sbus_driver = {
.of_match_table = bigmac_sbus_match,
},
.probe = bigmac_sbus_probe,
- .remove = bigmac_sbus_remove,
+ .remove_new = bigmac_sbus_remove,
};
module_platform_driver(bigmac_sbus_driver);
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index b37360f44972..aedd13c94225 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -933,7 +933,7 @@ static int qec_sbus_probe(struct platform_device *op)
return qec_ether_init(op);
}
-static int qec_sbus_remove(struct platform_device *op)
+static void qec_sbus_remove(struct platform_device *op)
{
struct sunqe *qp = platform_get_drvdata(op);
struct net_device *net_dev = qp->dev;
@@ -948,8 +948,6 @@ static int qec_sbus_remove(struct platform_device *op)
qp->buffers, qp->buffers_dvma);
free_netdev(net_dev);
-
- return 0;
}
static const struct of_device_id qec_sbus_match[] = {
@@ -967,7 +965,7 @@ static struct platform_driver qec_sbus_driver = {
.of_match_table = qec_sbus_match,
},
.probe = qec_sbus_probe,
- .remove = qec_sbus_remove,
+ .remove_new = qec_sbus_remove,
};
static int __init qec_init(void)
diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c
index c499a14314f1..391a1bc7f446 100644
--- a/drivers/net/ethernet/sunplus/spl2sw_driver.c
+++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c
@@ -511,7 +511,7 @@ out_clk_disable:
return ret;
}
-static int spl2sw_remove(struct platform_device *pdev)
+static void spl2sw_remove(struct platform_device *pdev)
{
struct spl2sw_common *comm;
int i;
@@ -538,8 +538,6 @@ static int spl2sw_remove(struct platform_device *pdev)
spl2sw_mdio_remove(comm);
clk_disable_unprepare(comm->clk);
-
- return 0;
}
static const struct of_device_id spl2sw_of_match[] = {
@@ -551,7 +549,7 @@ MODULE_DEVICE_TABLE(of, spl2sw_of_match);
static struct platform_driver spl2sw_driver = {
.probe = spl2sw_probe,
- .remove = spl2sw_remove,
+ .remove_new = spl2sw_remove,
.driver = {
.name = "sp7021_emac",
.of_match_table = spl2sw_of_match,
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index cac61f5d3fd4..e60b557d59b9 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_TI
bool "Texas Instruments (TI) devices"
default y
- depends on PCI || EISA || AR7 || ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3
+ depends on PCI || EISA || ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -180,13 +180,6 @@ config TLAN
Please email feedback to <torben.mathiasen@compaq.com>.
-config CPMAC
- tristate "TI AR7 CPMAC Ethernet support"
- depends on AR7
- select PHYLIB
- help
- TI AR7 CPMAC Ethernet support
-
config TI_ICSSG_PRUETH
tristate "TI Gigabit PRU Ethernet driver"
select PHYLIB
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 67bed861f31d..27de1d697134 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o
obj-$(CONFIG_TI_CPSW_SWITCHDEV) += cpsw-common.o
obj-$(CONFIG_TLAN) += tlan.o
-obj-$(CONFIG_CPMAC) += cpmac.o
obj-$(CONFIG_TI_DAVINCI_EMAC) += ti_davinci_emac.o
ti_davinci_emac-y := davinci_emac.o davinci_cpdma.o
obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
deleted file mode 100644
index 80eeeb463c4f..000000000000
--- a/drivers/net/ethernet/ti/cpmac.c
+++ /dev/null
@@ -1,1251 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (C) 2006, 2007 Eugene Konev
- *
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/moduleparam.h>
-
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-
-#include <linux/netdevice.h>
-#include <linux/if_vlan.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/skbuff.h>
-#include <linux/mii.h>
-#include <linux/phy.h>
-#include <linux/phy_fixed.h>
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/clk.h>
-#include <linux/gpio.h>
-#include <linux/atomic.h>
-
-#include <asm/mach-ar7/ar7.h>
-
-MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
-MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:cpmac");
-
-static int debug_level = 8;
-static int dumb_switch;
-
-/* Next 2 are only used in cpmac_probe, so it's pointless to change them */
-module_param(debug_level, int, 0444);
-module_param(dumb_switch, int, 0444);
-
-MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable");
-MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
-
-#define CPMAC_VERSION "0.5.2"
-/* frame size + 802.1q tag + FCS size */
-#define CPMAC_SKB_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
-#define CPMAC_QUEUES 8
-
-/* Ethernet registers */
-#define CPMAC_TX_CONTROL 0x0004
-#define CPMAC_TX_TEARDOWN 0x0008
-#define CPMAC_RX_CONTROL 0x0014
-#define CPMAC_RX_TEARDOWN 0x0018
-#define CPMAC_MBP 0x0100
-#define MBP_RXPASSCRC 0x40000000
-#define MBP_RXQOS 0x20000000
-#define MBP_RXNOCHAIN 0x10000000
-#define MBP_RXCMF 0x01000000
-#define MBP_RXSHORT 0x00800000
-#define MBP_RXCEF 0x00400000
-#define MBP_RXPROMISC 0x00200000
-#define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16)
-#define MBP_RXBCAST 0x00002000
-#define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8)
-#define MBP_RXMCAST 0x00000020
-#define MBP_MCASTCHAN(channel) ((channel) & 0x7)
-#define CPMAC_UNICAST_ENABLE 0x0104
-#define CPMAC_UNICAST_CLEAR 0x0108
-#define CPMAC_MAX_LENGTH 0x010c
-#define CPMAC_BUFFER_OFFSET 0x0110
-#define CPMAC_MAC_CONTROL 0x0160
-#define MAC_TXPTYPE 0x00000200
-#define MAC_TXPACE 0x00000040
-#define MAC_MII 0x00000020
-#define MAC_TXFLOW 0x00000010
-#define MAC_RXFLOW 0x00000008
-#define MAC_MTEST 0x00000004
-#define MAC_LOOPBACK 0x00000002
-#define MAC_FDX 0x00000001
-#define CPMAC_MAC_STATUS 0x0164
-#define MAC_STATUS_QOS 0x00000004
-#define MAC_STATUS_RXFLOW 0x00000002
-#define MAC_STATUS_TXFLOW 0x00000001
-#define CPMAC_TX_INT_ENABLE 0x0178
-#define CPMAC_TX_INT_CLEAR 0x017c
-#define CPMAC_MAC_INT_VECTOR 0x0180
-#define MAC_INT_STATUS 0x00080000
-#define MAC_INT_HOST 0x00040000
-#define MAC_INT_RX 0x00020000
-#define MAC_INT_TX 0x00010000
-#define CPMAC_MAC_EOI_VECTOR 0x0184
-#define CPMAC_RX_INT_ENABLE 0x0198
-#define CPMAC_RX_INT_CLEAR 0x019c
-#define CPMAC_MAC_INT_ENABLE 0x01a8
-#define CPMAC_MAC_INT_CLEAR 0x01ac
-#define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4)
-#define CPMAC_MAC_ADDR_MID 0x01d0
-#define CPMAC_MAC_ADDR_HI 0x01d4
-#define CPMAC_MAC_HASH_LO 0x01d8
-#define CPMAC_MAC_HASH_HI 0x01dc
-#define CPMAC_TX_PTR(channel) (0x0600 + (channel) * 4)
-#define CPMAC_RX_PTR(channel) (0x0620 + (channel) * 4)
-#define CPMAC_TX_ACK(channel) (0x0640 + (channel) * 4)
-#define CPMAC_RX_ACK(channel) (0x0660 + (channel) * 4)
-#define CPMAC_REG_END 0x0680
-
-/* Rx/Tx statistics
- * TODO: use some of them to fill stats in cpmac_stats()
- */
-#define CPMAC_STATS_RX_GOOD 0x0200
-#define CPMAC_STATS_RX_BCAST 0x0204
-#define CPMAC_STATS_RX_MCAST 0x0208
-#define CPMAC_STATS_RX_PAUSE 0x020c
-#define CPMAC_STATS_RX_CRC 0x0210
-#define CPMAC_STATS_RX_ALIGN 0x0214
-#define CPMAC_STATS_RX_OVER 0x0218
-#define CPMAC_STATS_RX_JABBER 0x021c
-#define CPMAC_STATS_RX_UNDER 0x0220
-#define CPMAC_STATS_RX_FRAG 0x0224
-#define CPMAC_STATS_RX_FILTER 0x0228
-#define CPMAC_STATS_RX_QOSFILTER 0x022c
-#define CPMAC_STATS_RX_OCTETS 0x0230
-
-#define CPMAC_STATS_TX_GOOD 0x0234
-#define CPMAC_STATS_TX_BCAST 0x0238
-#define CPMAC_STATS_TX_MCAST 0x023c
-#define CPMAC_STATS_TX_PAUSE 0x0240
-#define CPMAC_STATS_TX_DEFER 0x0244
-#define CPMAC_STATS_TX_COLLISION 0x0248
-#define CPMAC_STATS_TX_SINGLECOLL 0x024c
-#define CPMAC_STATS_TX_MULTICOLL 0x0250
-#define CPMAC_STATS_TX_EXCESSCOLL 0x0254
-#define CPMAC_STATS_TX_LATECOLL 0x0258
-#define CPMAC_STATS_TX_UNDERRUN 0x025c
-#define CPMAC_STATS_TX_CARRIERSENSE 0x0260
-#define CPMAC_STATS_TX_OCTETS 0x0264
-
-#define cpmac_read(base, reg) (readl((void __iomem *)(base) + (reg)))
-#define cpmac_write(base, reg, val) (writel(val, (void __iomem *)(base) + \
- (reg)))
-
-/* MDIO bus */
-#define CPMAC_MDIO_VERSION 0x0000
-#define CPMAC_MDIO_CONTROL 0x0004
-#define MDIOC_IDLE 0x80000000
-#define MDIOC_ENABLE 0x40000000
-#define MDIOC_PREAMBLE 0x00100000
-#define MDIOC_FAULT 0x00080000
-#define MDIOC_FAULTDETECT 0x00040000
-#define MDIOC_INTTEST 0x00020000
-#define MDIOC_CLKDIV(div) ((div) & 0xff)
-#define CPMAC_MDIO_ALIVE 0x0008
-#define CPMAC_MDIO_LINK 0x000c
-#define CPMAC_MDIO_ACCESS(channel) (0x0080 + (channel) * 8)
-#define MDIO_BUSY 0x80000000
-#define MDIO_WRITE 0x40000000
-#define MDIO_REG(reg) (((reg) & 0x1f) << 21)
-#define MDIO_PHY(phy) (((phy) & 0x1f) << 16)
-#define MDIO_DATA(data) ((data) & 0xffff)
-#define CPMAC_MDIO_PHYSEL(channel) (0x0084 + (channel) * 8)
-#define PHYSEL_LINKSEL 0x00000040
-#define PHYSEL_LINKINT 0x00000020
-
-struct cpmac_desc {
- u32 hw_next;
- u32 hw_data;
- u16 buflen;
- u16 bufflags;
- u16 datalen;
- u16 dataflags;
-#define CPMAC_SOP 0x8000
-#define CPMAC_EOP 0x4000
-#define CPMAC_OWN 0x2000
-#define CPMAC_EOQ 0x1000
- struct sk_buff *skb;
- struct cpmac_desc *next;
- struct cpmac_desc *prev;
- dma_addr_t mapping;
- dma_addr_t data_mapping;
-};
-
-struct cpmac_priv {
- spinlock_t lock;
- spinlock_t rx_lock;
- struct cpmac_desc *rx_head;
- int ring_size;
- struct cpmac_desc *desc_ring;
- dma_addr_t dma_ring;
- void __iomem *regs;
- struct mii_bus *mii_bus;
- char phy_name[MII_BUS_ID_SIZE + 3];
- int oldlink, oldspeed, oldduplex;
- u32 msg_enable;
- struct net_device *dev;
- struct work_struct reset_work;
- struct platform_device *pdev;
- struct napi_struct napi;
- atomic_t reset_pending;
-};
-
-static irqreturn_t cpmac_irq(int, void *);
-static void cpmac_hw_start(struct net_device *dev);
-static void cpmac_hw_stop(struct net_device *dev);
-static int cpmac_stop(struct net_device *dev);
-static int cpmac_open(struct net_device *dev);
-
-static void cpmac_dump_regs(struct net_device *dev)
-{
- int i;
- struct cpmac_priv *priv = netdev_priv(dev);
-
- for (i = 0; i < CPMAC_REG_END; i += 4) {
- if (i % 16 == 0) {
- if (i)
- printk("\n");
- printk("%s: reg[%p]:", dev->name, priv->regs + i);
- }
- printk(" %08x", cpmac_read(priv->regs, i));
- }
- printk("\n");
-}
-
-static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
-{
- int i;
-
- printk("%s: desc[%p]:", dev->name, desc);
- for (i = 0; i < sizeof(*desc) / 4; i++)
- printk(" %08x", ((u32 *)desc)[i]);
- printk("\n");
-}
-
-static void cpmac_dump_all_desc(struct net_device *dev)
-{
- struct cpmac_priv *priv = netdev_priv(dev);
- struct cpmac_desc *dump = priv->rx_head;
-
- do {
- cpmac_dump_desc(dev, dump);
- dump = dump->next;
- } while (dump != priv->rx_head);
-}
-
-static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
-{
- int i;
-
- printk("%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len);
- for (i = 0; i < skb->len; i++) {
- if (i % 16 == 0) {
- if (i)
- printk("\n");
- printk("%s: data[%p]:", dev->name, skb->data + i);
- }
- printk(" %02x", ((u8 *)skb->data)[i]);
- }
- printk("\n");
-}
-
-static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
-{
- u32 val;
-
- while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
- cpu_relax();
- cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) |
- MDIO_PHY(phy_id));
- while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY)
- cpu_relax();
-
- return MDIO_DATA(val);
-}
-
-static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
- int reg, u16 val)
-{
- while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
- cpu_relax();
- cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE |
- MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val));
-
- return 0;
-}
-
-static int cpmac_mdio_reset(struct mii_bus *bus)
-{
- struct clk *cpmac_clk;
-
- cpmac_clk = clk_get(&bus->dev, "cpmac");
- if (IS_ERR(cpmac_clk)) {
- pr_err("unable to get cpmac clock\n");
- return -1;
- }
- ar7_device_reset(AR7_RESET_BIT_MDIO);
- cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE |
- MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1));
-
- return 0;
-}
-
-static struct mii_bus *cpmac_mii;
-
-static void cpmac_set_multicast_list(struct net_device *dev)
-{
- struct netdev_hw_addr *ha;
- u8 tmp;
- u32 mbp, bit, hash[2] = { 0, };
- struct cpmac_priv *priv = netdev_priv(dev);
-
- mbp = cpmac_read(priv->regs, CPMAC_MBP);
- if (dev->flags & IFF_PROMISC) {
- cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) |
- MBP_RXPROMISC);
- } else {
- cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC);
- if (dev->flags & IFF_ALLMULTI) {
- /* enable all multicast mode */
- cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff);
- cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff);
- } else {
- /* cpmac uses some strange mac address hashing
- * (not crc32)
- */
- netdev_for_each_mc_addr(ha, dev) {
- bit = 0;
- tmp = ha->addr[0];
- bit ^= (tmp >> 2) ^ (tmp << 4);
- tmp = ha->addr[1];
- bit ^= (tmp >> 4) ^ (tmp << 2);
- tmp = ha->addr[2];
- bit ^= (tmp >> 6) ^ tmp;
- tmp = ha->addr[3];
- bit ^= (tmp >> 2) ^ (tmp << 4);
- tmp = ha->addr[4];
- bit ^= (tmp >> 4) ^ (tmp << 2);
- tmp = ha->addr[5];
- bit ^= (tmp >> 6) ^ tmp;
- bit &= 0x3f;
- hash[bit / 32] |= 1 << (bit % 32);
- }
-
- cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]);
- cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]);
- }
- }
-}
-
-static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
- struct cpmac_desc *desc)
-{
- struct sk_buff *skb, *result = NULL;
-
- if (unlikely(netif_msg_hw(priv)))
- cpmac_dump_desc(priv->dev, desc);
- cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
- if (unlikely(!desc->datalen)) {
- if (netif_msg_rx_err(priv) && net_ratelimit())
- netdev_warn(priv->dev, "rx: spurious interrupt\n");
-
- return NULL;
- }
-
- skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
- if (likely(skb)) {
- skb_put(desc->skb, desc->datalen);
- desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
- skb_checksum_none_assert(desc->skb);
- priv->dev->stats.rx_packets++;
- priv->dev->stats.rx_bytes += desc->datalen;
- result = desc->skb;
- dma_unmap_single(&priv->dev->dev, desc->data_mapping,
- CPMAC_SKB_SIZE, DMA_FROM_DEVICE);
- desc->skb = skb;
- desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data,
- CPMAC_SKB_SIZE,
- DMA_FROM_DEVICE);
- desc->hw_data = (u32)desc->data_mapping;
- if (unlikely(netif_msg_pktdata(priv))) {
- netdev_dbg(priv->dev, "received packet:\n");
- cpmac_dump_skb(priv->dev, result);
- }
- } else {
- if (netif_msg_rx_err(priv) && net_ratelimit())
- netdev_warn(priv->dev,
- "low on skbs, dropping packet\n");
-
- priv->dev->stats.rx_dropped++;
- }
-
- desc->buflen = CPMAC_SKB_SIZE;
- desc->dataflags = CPMAC_OWN;
-
- return result;
-}
-
-static int cpmac_poll(struct napi_struct *napi, int budget)
-{
- struct sk_buff *skb;
- struct cpmac_desc *desc, *restart;
- struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
- int received = 0, processed = 0;
-
- spin_lock(&priv->rx_lock);
- if (unlikely(!priv->rx_head)) {
- if (netif_msg_rx_err(priv) && net_ratelimit())
- netdev_warn(priv->dev, "rx: polling, but no queue\n");
-
- spin_unlock(&priv->rx_lock);
- napi_complete(napi);
- return 0;
- }
-
- desc = priv->rx_head;
- restart = NULL;
- while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
- processed++;
-
- if ((desc->dataflags & CPMAC_EOQ) != 0) {
- /* The last update to eoq->hw_next didn't happen
- * soon enough, and the receiver stopped here.
- * Remember this descriptor so we can restart
- * the receiver after freeing some space.
- */
- if (unlikely(restart)) {
- if (netif_msg_rx_err(priv))
- netdev_err(priv->dev, "poll found a"
- " duplicate EOQ: %p and %p\n",
- restart, desc);
- goto fatal_error;
- }
-
- restart = desc->next;
- }
-
- skb = cpmac_rx_one(priv, desc);
- if (likely(skb)) {
- netif_receive_skb(skb);
- received++;
- }
- desc = desc->next;
- }
-
- if (desc != priv->rx_head) {
- /* We freed some buffers, but not the whole ring,
- * add what we did free to the rx list
- */
- desc->prev->hw_next = (u32)0;
- priv->rx_head->prev->hw_next = priv->rx_head->mapping;
- }
-
- /* Optimization: If we did not actually process an EOQ (perhaps because
- * of quota limits), check to see if the tail of the queue has EOQ set.
- * We should immediately restart in that case so that the receiver can
- * restart and run in parallel with more packet processing.
- * This lets us handle slightly larger bursts before running
- * out of ring space (assuming dev->weight < ring_size)
- */
-
- if (!restart &&
- (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
- == CPMAC_EOQ &&
- (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
- /* reset EOQ so the poll loop (above) doesn't try to
- * restart this when it eventually gets to this descriptor.
- */
- priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
- restart = priv->rx_head;
- }
-
- if (restart) {
- priv->dev->stats.rx_errors++;
- priv->dev->stats.rx_fifo_errors++;
- if (netif_msg_rx_err(priv) && net_ratelimit())
- netdev_warn(priv->dev, "rx dma ring overrun\n");
-
- if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
- if (netif_msg_drv(priv))
- netdev_err(priv->dev, "cpmac_poll is trying "
- "to restart rx from a descriptor "
- "that's not free: %p\n", restart);
- goto fatal_error;
- }
-
- cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
- }
-
- priv->rx_head = desc;
- spin_unlock(&priv->rx_lock);
- if (unlikely(netif_msg_rx_status(priv)))
- netdev_dbg(priv->dev, "poll processed %d packets\n", received);
-
- if (processed == 0) {
- /* we ran out of packets to read,
- * revert to interrupt-driven mode
- */
- napi_complete(napi);
- cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
- return 0;
- }
-
- return 1;
-
-fatal_error:
- /* Something went horribly wrong.
- * Reset hardware to try to recover rather than wedging.
- */
- if (netif_msg_drv(priv)) {
- netdev_err(priv->dev, "cpmac_poll is confused. "
- "Resetting hardware\n");
- cpmac_dump_all_desc(priv->dev);
- netdev_dbg(priv->dev, "RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
- cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
- cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
- }
-
- spin_unlock(&priv->rx_lock);
- napi_complete(napi);
- netif_tx_stop_all_queues(priv->dev);
- napi_disable(&priv->napi);
-
- atomic_inc(&priv->reset_pending);
- cpmac_hw_stop(priv->dev);
- if (!schedule_work(&priv->reset_work))
- atomic_dec(&priv->reset_pending);
-
- return 0;
-
-}
-
-static netdev_tx_t cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- int queue;
- unsigned int len;
- struct cpmac_desc *desc;
- struct cpmac_priv *priv = netdev_priv(dev);
-
- if (unlikely(atomic_read(&priv->reset_pending)))
- return NETDEV_TX_BUSY;
-
- if (unlikely(skb_padto(skb, ETH_ZLEN)))
- return NETDEV_TX_OK;
-
- len = max_t(unsigned int, skb->len, ETH_ZLEN);
- queue = skb_get_queue_mapping(skb);
- netif_stop_subqueue(dev, queue);
-
- desc = &priv->desc_ring[queue];
- if (unlikely(desc->dataflags & CPMAC_OWN)) {
- if (netif_msg_tx_err(priv) && net_ratelimit())
- netdev_warn(dev, "tx dma ring full\n");
-
- return NETDEV_TX_BUSY;
- }
-
- spin_lock(&priv->lock);
- spin_unlock(&priv->lock);
- desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
- desc->skb = skb;
- desc->data_mapping = dma_map_single(&dev->dev, skb->data, len,
- DMA_TO_DEVICE);
- desc->hw_data = (u32)desc->data_mapping;
- desc->datalen = len;
- desc->buflen = len;
- if (unlikely(netif_msg_tx_queued(priv)))
- netdev_dbg(dev, "sending 0x%p, len=%d\n", skb, skb->len);
- if (unlikely(netif_msg_hw(priv)))
- cpmac_dump_desc(dev, desc);
- if (unlikely(netif_msg_pktdata(priv)))
- cpmac_dump_skb(dev, skb);
- cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);
-
- return NETDEV_TX_OK;
-}
-
-static void cpmac_end_xmit(struct net_device *dev, int queue)
-{
- struct cpmac_desc *desc;
- struct cpmac_priv *priv = netdev_priv(dev);
-
- desc = &priv->desc_ring[queue];
- cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping);
- if (likely(desc->skb)) {
- spin_lock(&priv->lock);
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += desc->skb->len;
- spin_unlock(&priv->lock);
- dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len,
- DMA_TO_DEVICE);
-
- if (unlikely(netif_msg_tx_done(priv)))
- netdev_dbg(dev, "sent 0x%p, len=%d\n",
- desc->skb, desc->skb->len);
-
- dev_consume_skb_irq(desc->skb);
- desc->skb = NULL;
- if (__netif_subqueue_stopped(dev, queue))
- netif_wake_subqueue(dev, queue);
- } else {
- if (netif_msg_tx_err(priv) && net_ratelimit())
- netdev_warn(dev, "end_xmit: spurious interrupt\n");
- if (__netif_subqueue_stopped(dev, queue))
- netif_wake_subqueue(dev, queue);
- }
-}
-
-static void cpmac_hw_stop(struct net_device *dev)
-{
- int i;
- struct cpmac_priv *priv = netdev_priv(dev);
- struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
-
- ar7_device_reset(pdata->reset_bit);
- cpmac_write(priv->regs, CPMAC_RX_CONTROL,
- cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1);
- cpmac_write(priv->regs, CPMAC_TX_CONTROL,
- cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1);
- for (i = 0; i < 8; i++) {
- cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
- cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
- }
- cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
- cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
- cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
- cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
- cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
- cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII);
-}
-
-static void cpmac_hw_start(struct net_device *dev)
-{
- int i;
- struct cpmac_priv *priv = netdev_priv(dev);
- struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
-
- ar7_device_reset(pdata->reset_bit);
- for (i = 0; i < 8; i++) {
- cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
- cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
- }
- cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping);
-
- cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST |
- MBP_RXMCAST);
- cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0);
- for (i = 0; i < 8; i++)
- cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]);
- cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]);
- cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] |
- (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) |
- (dev->dev_addr[3] << 24));
- cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE);
- cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
- cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
- cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
- cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
- cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1);
- cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
- cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff);
- cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
-
- cpmac_write(priv->regs, CPMAC_RX_CONTROL,
- cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1);
- cpmac_write(priv->regs, CPMAC_TX_CONTROL,
- cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1);
- cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
- cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII |
- MAC_FDX);
-}
-
-static void cpmac_clear_rx(struct net_device *dev)
-{
- struct cpmac_priv *priv = netdev_priv(dev);
- struct cpmac_desc *desc;
- int i;
-
- if (unlikely(!priv->rx_head))
- return;
- desc = priv->rx_head;
- for (i = 0; i < priv->ring_size; i++) {
- if ((desc->dataflags & CPMAC_OWN) == 0) {
- if (netif_msg_rx_err(priv) && net_ratelimit())
- netdev_warn(dev, "packet dropped\n");
- if (unlikely(netif_msg_hw(priv)))
- cpmac_dump_desc(dev, desc);
- desc->dataflags = CPMAC_OWN;
- dev->stats.rx_dropped++;
- }
- desc->hw_next = desc->next->mapping;
- desc = desc->next;
- }
- priv->rx_head->prev->hw_next = 0;
-}
-
-static void cpmac_clear_tx(struct net_device *dev)
-{
- struct cpmac_priv *priv = netdev_priv(dev);
- int i;
-
- if (unlikely(!priv->desc_ring))
- return;
- for (i = 0; i < CPMAC_QUEUES; i++) {
- priv->desc_ring[i].dataflags = 0;
- if (priv->desc_ring[i].skb) {
- dev_kfree_skb_any(priv->desc_ring[i].skb);
- priv->desc_ring[i].skb = NULL;
- }
- }
-}
-
-static void cpmac_hw_error(struct work_struct *work)
-{
- struct cpmac_priv *priv =
- container_of(work, struct cpmac_priv, reset_work);
-
- spin_lock(&priv->rx_lock);
- cpmac_clear_rx(priv->dev);
- spin_unlock(&priv->rx_lock);
- cpmac_clear_tx(priv->dev);
- cpmac_hw_start(priv->dev);
- barrier();
- atomic_dec(&priv->reset_pending);
-
- netif_tx_wake_all_queues(priv->dev);
- cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
-}
-
-static void cpmac_check_status(struct net_device *dev)
-{
- struct cpmac_priv *priv = netdev_priv(dev);
-
- u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS);
- int rx_channel = (macstatus >> 8) & 7;
- int rx_code = (macstatus >> 12) & 15;
- int tx_channel = (macstatus >> 16) & 7;
- int tx_code = (macstatus >> 20) & 15;
-
- if (rx_code || tx_code) {
- if (netif_msg_drv(priv) && net_ratelimit()) {
- /* Can't find any documentation on what these
- * error codes actually are. So just log them and hope..
- */
- if (rx_code)
- netdev_warn(dev, "host error %d on rx "
- "channel %d (macstatus %08x), resetting\n",
- rx_code, rx_channel, macstatus);
- if (tx_code)
- netdev_warn(dev, "host error %d on tx "
- "channel %d (macstatus %08x), resetting\n",
- tx_code, tx_channel, macstatus);
- }
-
- netif_tx_stop_all_queues(dev);
- cpmac_hw_stop(dev);
- if (schedule_work(&priv->reset_work))
- atomic_inc(&priv->reset_pending);
- if (unlikely(netif_msg_hw(priv)))
- cpmac_dump_regs(dev);
- }
- cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
-}
-
-static irqreturn_t cpmac_irq(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct cpmac_priv *priv;
- int queue;
- u32 status;
-
- priv = netdev_priv(dev);
-
- status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);
-
- if (unlikely(netif_msg_intr(priv)))
- netdev_dbg(dev, "interrupt status: 0x%08x\n", status);
-
- if (status & MAC_INT_TX)
- cpmac_end_xmit(dev, (status & 7));
-
- if (status & MAC_INT_RX) {
- queue = (status >> 8) & 7;
- if (napi_schedule_prep(&priv->napi)) {
- cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
- __napi_schedule(&priv->napi);
- }
- }
-
- cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
-
- if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS)))
- cpmac_check_status(dev);
-
- return IRQ_HANDLED;
-}
-
-static void cpmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
-{
- struct cpmac_priv *priv = netdev_priv(dev);
-
- spin_lock(&priv->lock);
- dev->stats.tx_errors++;
- spin_unlock(&priv->lock);
- if (netif_msg_tx_err(priv) && net_ratelimit())
- netdev_warn(dev, "transmit timeout\n");
-
- atomic_inc(&priv->reset_pending);
- barrier();
- cpmac_clear_tx(dev);
- barrier();
- atomic_dec(&priv->reset_pending);
-
- netif_tx_wake_all_queues(priv->dev);
-}
-
-static void cpmac_get_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring,
- struct kernel_ethtool_ringparam *kernel_ring,
- struct netlink_ext_ack *extack)
-{
- struct cpmac_priv *priv = netdev_priv(dev);
-
- ring->rx_max_pending = 1024;
- ring->rx_mini_max_pending = 1;
- ring->rx_jumbo_max_pending = 1;
- ring->tx_max_pending = 1;
-
- ring->rx_pending = priv->ring_size;
- ring->rx_mini_pending = 1;
- ring->rx_jumbo_pending = 1;
- ring->tx_pending = 1;
-}
-
-static int cpmac_set_ringparam(struct net_device *dev,
- struct ethtool_ringparam *ring,
- struct kernel_ethtool_ringparam *kernel_ring,
- struct netlink_ext_ack *extack)
-{
- struct cpmac_priv *priv = netdev_priv(dev);
-
- if (netif_running(dev))
- return -EBUSY;
- priv->ring_size = ring->rx_pending;
-
- return 0;
-}
-
-static void cpmac_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
-{
- strscpy(info->driver, "cpmac", sizeof(info->driver));
- strscpy(info->version, CPMAC_VERSION, sizeof(info->version));
- snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac");
-}
-
-static const struct ethtool_ops cpmac_ethtool_ops = {
- .get_drvinfo = cpmac_get_drvinfo,
- .get_link = ethtool_op_get_link,
- .get_ringparam = cpmac_get_ringparam,
- .set_ringparam = cpmac_set_ringparam,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
-};
-
-static void cpmac_adjust_link(struct net_device *dev)
-{
- struct cpmac_priv *priv = netdev_priv(dev);
- int new_state = 0;
-
- spin_lock(&priv->lock);
- if (dev->phydev->link) {
- netif_tx_start_all_queues(dev);
- if (dev->phydev->duplex != priv->oldduplex) {
- new_state = 1;
- priv->oldduplex = dev->phydev->duplex;
- }
-
- if (dev->phydev->speed != priv->oldspeed) {
- new_state = 1;
- priv->oldspeed = dev->phydev->speed;
- }
-
- if (!priv->oldlink) {
- new_state = 1;
- priv->oldlink = 1;
- }
- } else if (priv->oldlink) {
- new_state = 1;
- priv->oldlink = 0;
- priv->oldspeed = 0;
- priv->oldduplex = -1;
- }
-
- if (new_state && netif_msg_link(priv) && net_ratelimit())
- phy_print_status(dev->phydev);
-
- spin_unlock(&priv->lock);
-}
-
-static int cpmac_open(struct net_device *dev)
-{
- int i, size, res;
- struct cpmac_priv *priv = netdev_priv(dev);
- struct resource *mem;
- struct cpmac_desc *desc;
- struct sk_buff *skb;
-
- mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
- if (!request_mem_region(mem->start, resource_size(mem), dev->name)) {
- if (netif_msg_drv(priv))
- netdev_err(dev, "failed to request registers\n");
-
- res = -ENXIO;
- goto fail_reserve;
- }
-
- priv->regs = ioremap(mem->start, resource_size(mem));
- if (!priv->regs) {
- if (netif_msg_drv(priv))
- netdev_err(dev, "failed to remap registers\n");
-
- res = -ENXIO;
- goto fail_remap;
- }
-
- size = priv->ring_size + CPMAC_QUEUES;
- priv->desc_ring = dma_alloc_coherent(&dev->dev,
- sizeof(struct cpmac_desc) * size,
- &priv->dma_ring,
- GFP_KERNEL);
- if (!priv->desc_ring) {
- res = -ENOMEM;
- goto fail_alloc;
- }
-
- for (i = 0; i < size; i++)
- priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i;
-
- priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
- for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
- skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
- if (unlikely(!skb)) {
- res = -ENOMEM;
- goto fail_desc;
- }
- desc->skb = skb;
- desc->data_mapping = dma_map_single(&dev->dev, skb->data,
- CPMAC_SKB_SIZE,
- DMA_FROM_DEVICE);
- desc->hw_data = (u32)desc->data_mapping;
- desc->buflen = CPMAC_SKB_SIZE;
- desc->dataflags = CPMAC_OWN;
- desc->next = &priv->rx_head[(i + 1) % priv->ring_size];
- desc->next->prev = desc;
- desc->hw_next = (u32)desc->next->mapping;
- }
-
- priv->rx_head->prev->hw_next = (u32)0;
-
- res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev);
- if (res) {
- if (netif_msg_drv(priv))
- netdev_err(dev, "failed to obtain irq\n");
-
- goto fail_irq;
- }
-
- atomic_set(&priv->reset_pending, 0);
- INIT_WORK(&priv->reset_work, cpmac_hw_error);
- cpmac_hw_start(dev);
-
- napi_enable(&priv->napi);
- phy_start(dev->phydev);
-
- return 0;
-
-fail_irq:
-fail_desc:
- for (i = 0; i < priv->ring_size; i++) {
- if (priv->rx_head[i].skb) {
- dma_unmap_single(&dev->dev,
- priv->rx_head[i].data_mapping,
- CPMAC_SKB_SIZE,
- DMA_FROM_DEVICE);
- kfree_skb(priv->rx_head[i].skb);
- }
- }
- dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) * size,
- priv->desc_ring, priv->dma_ring);
-
-fail_alloc:
- iounmap(priv->regs);
-
-fail_remap:
- release_mem_region(mem->start, resource_size(mem));
-
-fail_reserve:
- return res;
-}
-
-static int cpmac_stop(struct net_device *dev)
-{
- int i;
- struct cpmac_priv *priv = netdev_priv(dev);
- struct resource *mem;
-
- netif_tx_stop_all_queues(dev);
-
- cancel_work_sync(&priv->reset_work);
- napi_disable(&priv->napi);
- phy_stop(dev->phydev);
-
- cpmac_hw_stop(dev);
-
- for (i = 0; i < 8; i++)
- cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
- cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0);
- cpmac_write(priv->regs, CPMAC_MBP, 0);
-
- free_irq(dev->irq, dev);
- iounmap(priv->regs);
- mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
- release_mem_region(mem->start, resource_size(mem));
- priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
- for (i = 0; i < priv->ring_size; i++) {
- if (priv->rx_head[i].skb) {
- dma_unmap_single(&dev->dev,
- priv->rx_head[i].data_mapping,
- CPMAC_SKB_SIZE,
- DMA_FROM_DEVICE);
- kfree_skb(priv->rx_head[i].skb);
- }
- }
-
- dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) *
- (CPMAC_QUEUES + priv->ring_size),
- priv->desc_ring, priv->dma_ring);
-
- return 0;
-}
-
-static const struct net_device_ops cpmac_netdev_ops = {
- .ndo_open = cpmac_open,
- .ndo_stop = cpmac_stop,
- .ndo_start_xmit = cpmac_start_xmit,
- .ndo_tx_timeout = cpmac_tx_timeout,
- .ndo_set_rx_mode = cpmac_set_multicast_list,
- .ndo_eth_ioctl = phy_do_ioctl_running,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
-};
-
-static int external_switch;
-
-static int cpmac_probe(struct platform_device *pdev)
-{
- int rc, phy_id;
- char mdio_bus_id[MII_BUS_ID_SIZE];
- struct resource *mem;
- struct cpmac_priv *priv;
- struct net_device *dev;
- struct plat_cpmac_data *pdata;
- struct phy_device *phydev = NULL;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- if (external_switch || dumb_switch) {
- strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
- phy_id = pdev->id;
- } else {
- for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
- if (!(pdata->phy_mask & (1 << phy_id)))
- continue;
- if (!mdiobus_get_phy(cpmac_mii, phy_id))
- continue;
- strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE);
- break;
- }
- }
-
- if (phy_id == PHY_MAX_ADDR) {
- dev_err(&pdev->dev, "no PHY present, falling back "
- "to switch on MDIO bus 0\n");
- strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
- phy_id = pdev->id;
- }
- mdio_bus_id[sizeof(mdio_bus_id) - 1] = '\0';
-
- dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES);
- if (!dev)
- return -ENOMEM;
-
- SET_NETDEV_DEV(dev, &pdev->dev);
- platform_set_drvdata(pdev, dev);
- priv = netdev_priv(dev);
-
- priv->pdev = pdev;
- mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
- if (!mem) {
- rc = -ENODEV;
- goto fail;
- }
-
- dev->irq = platform_get_irq_byname(pdev, "irq");
-
- dev->netdev_ops = &cpmac_netdev_ops;
- dev->ethtool_ops = &cpmac_ethtool_ops;
-
- netif_napi_add(dev, &priv->napi, cpmac_poll);
-
- spin_lock_init(&priv->lock);
- spin_lock_init(&priv->rx_lock);
- priv->dev = dev;
- priv->ring_size = 64;
- priv->msg_enable = netif_msg_init(debug_level, 0xff);
- eth_hw_addr_set(dev, pdata->dev_addr);
-
- snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
- mdio_bus_id, phy_id);
-
- phydev = phy_connect(dev, priv->phy_name, cpmac_adjust_link,
- PHY_INTERFACE_MODE_MII);
-
- if (IS_ERR(phydev)) {
- if (netif_msg_drv(priv))
- dev_err(&pdev->dev, "Could not attach to PHY\n");
-
- rc = PTR_ERR(phydev);
- goto fail;
- }
-
- rc = register_netdev(dev);
- if (rc) {
- dev_err(&pdev->dev, "Could not register net device\n");
- goto fail;
- }
-
- if (netif_msg_probe(priv)) {
- dev_info(&pdev->dev, "regs: %p, irq: %d, phy: %s, "
- "mac: %pM\n", (void *)mem->start, dev->irq,
- priv->phy_name, dev->dev_addr);
- }
-
- return 0;
-
-fail:
- free_netdev(dev);
- return rc;
-}
-
-static int cpmac_remove(struct platform_device *pdev)
-{
- struct net_device *dev = platform_get_drvdata(pdev);
-
- unregister_netdev(dev);
- free_netdev(dev);
-
- return 0;
-}
-
-static struct platform_driver cpmac_driver = {
- .driver = {
- .name = "cpmac",
- },
- .probe = cpmac_probe,
- .remove = cpmac_remove,
-};
-
-int __init cpmac_init(void)
-{
- u32 mask;
- int i, res;
-
- cpmac_mii = mdiobus_alloc();
- if (cpmac_mii == NULL)
- return -ENOMEM;
-
- cpmac_mii->name = "cpmac-mii";
- cpmac_mii->read = cpmac_mdio_read;
- cpmac_mii->write = cpmac_mdio_write;
- cpmac_mii->reset = cpmac_mdio_reset;
-
- cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256);
-
- if (!cpmac_mii->priv) {
- pr_err("Can't ioremap mdio registers\n");
- res = -ENXIO;
- goto fail_alloc;
- }
-
- /* FIXME: unhardcode gpio&reset bits */
- ar7_gpio_disable(26);
- ar7_gpio_disable(27);
- ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
- ar7_device_reset(AR7_RESET_BIT_CPMAC_HI);
- ar7_device_reset(AR7_RESET_BIT_EPHY);
-
- cpmac_mii->reset(cpmac_mii);
-
- for (i = 0; i < 300; i++) {
- mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE);
- if (mask)
- break;
- else
- msleep(10);
- }
-
- mask &= 0x7fffffff;
- if (mask & (mask - 1)) {
- external_switch = 1;
- mask = 0;
- }
-
- cpmac_mii->phy_mask = ~(mask | 0x80000000);
- snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "cpmac-1");
-
- res = mdiobus_register(cpmac_mii);
- if (res)
- goto fail_mii;
-
- res = platform_driver_register(&cpmac_driver);
- if (res)
- goto fail_cpmac;
-
- return 0;
-
-fail_cpmac:
- mdiobus_unregister(cpmac_mii);
-
-fail_mii:
- iounmap(cpmac_mii->priv);
-
-fail_alloc:
- mdiobus_free(cpmac_mii);
-
- return res;
-}
-
-void __exit cpmac_exit(void)
-{
- platform_driver_unregister(&cpmac_driver);
- mdiobus_unregister(cpmac_mii);
- iounmap(cpmac_mii->priv);
- mdiobus_free(cpmac_mii);
-}
-
-module_init(cpmac_init);
-module_exit(cpmac_exit);
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 0ec85635dfd6..764ed298b570 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -1360,7 +1360,7 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
* particular hardware is sharing a common queue, so the
* incoming device might change per packet.
*/
- xdp_do_flush_map();
+ xdp_do_flush();
break;
default:
bpf_warn_invalid_xdp_action(ndev, prog, act);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 2eb9d5a32588..b0950a318c42 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -38,6 +38,7 @@
#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/semaphore.h>
#include <linux/phy.h>
@@ -47,10 +48,7 @@
#include <linux/pm_runtime.h>
#include <linux/davinci_emac.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
#include <linux/of_mdio.h>
-#include <linux/of_irq.h>
#include <linux/of_net.h>
#include <linux/mfd/syscon.h>
@@ -1726,13 +1724,10 @@ static const struct net_device_ops emac_netdev_ops = {
#endif
};
-static const struct of_device_id davinci_emac_of_match[];
-
static struct emac_platform_data *
davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
{
struct device_node *np;
- const struct of_device_id *match;
const struct emac_platform_data *auxdata;
struct emac_platform_data *pdata = NULL;
@@ -1779,9 +1774,8 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
pdata->interrupt_disable = auxdata->interrupt_disable;
}
- match = of_match_device(davinci_emac_of_match, &pdev->dev);
- if (match && match->data) {
- auxdata = match->data;
+ auxdata = device_get_match_data(&pdev->dev);
+ if (auxdata) {
pdata->version = auxdata->version;
pdata->hw_ram_addr = auxdata->hw_ram_addr;
}
@@ -1934,18 +1928,20 @@ static int davinci_emac_probe(struct platform_device *pdev)
goto err_free_rxchan;
ndev->irq = rc;
- rc = davinci_emac_try_get_mac(pdev, res_ctrl ? 0 : 1, priv->mac_addr);
- if (!rc)
- eth_hw_addr_set(ndev, priv->mac_addr);
-
+ /* If the MAC address is not present, read the registers from the SoC */
if (!is_valid_ether_addr(priv->mac_addr)) {
- /* Use random MAC if still none obtained. */
- eth_hw_addr_random(ndev);
- memcpy(priv->mac_addr, ndev->dev_addr, ndev->addr_len);
- dev_warn(&pdev->dev, "using random MAC addr: %pM\n",
- priv->mac_addr);
+ rc = davinci_emac_try_get_mac(pdev, res_ctrl ? 0 : 1, priv->mac_addr);
+ if (!rc)
+ eth_hw_addr_set(ndev, priv->mac_addr);
+
+ if (!is_valid_ether_addr(priv->mac_addr)) {
+ /* Use random MAC if still none obtained. */
+ eth_hw_addr_random(ndev);
+ memcpy(priv->mac_addr, ndev->dev_addr, ndev->addr_len);
+ dev_warn(&pdev->dev, "using random MAC addr: %pM\n",
+ priv->mac_addr);
+ }
}
-
ndev->netdev_ops = &emac_netdev_ops;
ndev->ethtool_ops = &ethtool_ops;
netif_napi_add(ndev, &priv->napi, emac_poll);
@@ -2002,7 +1998,7 @@ err_free_netdev:
* Called when removing the device driver. We disable clock usage and release
* the resources taken up by the driver and unregister network device
*/
-static int davinci_emac_remove(struct platform_device *pdev)
+static void davinci_emac_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct emac_priv *priv = netdev_priv(ndev);
@@ -2022,8 +2018,6 @@ static int davinci_emac_remove(struct platform_device *pdev)
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
free_netdev(ndev);
-
- return 0;
}
static int davinci_emac_suspend(struct device *dev)
@@ -2076,7 +2070,7 @@ static struct platform_driver davinci_emac_driver = {
.of_match_table = davinci_emac_of_match,
},
.probe = davinci_emac_probe,
- .remove = davinci_emac_remove,
+ .remove_new = davinci_emac_remove,
};
/**
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 89b6d23e9937..628c87dc1d28 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -673,7 +673,7 @@ bail_out:
return ret;
}
-static int davinci_mdio_remove(struct platform_device *pdev)
+static void davinci_mdio_remove(struct platform_device *pdev)
{
struct davinci_mdio_data *data = platform_get_drvdata(pdev);
@@ -686,8 +686,6 @@ static int davinci_mdio_remove(struct platform_device *pdev)
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
#ifdef CONFIG_PM
@@ -766,7 +764,7 @@ static struct platform_driver davinci_mdio_driver = {
.of_match_table = of_match_ptr(davinci_mdio_of_mtable),
},
.probe = davinci_mdio_probe,
- .remove = davinci_mdio_remove,
+ .remove_new = davinci_mdio_remove,
};
static int __init davinci_mdio_init(void)
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
index b272361e378f..99de8a40ed60 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
@@ -433,6 +433,17 @@ int emac_set_port_state(struct prueth_emac *emac,
return ret;
}
+void icssg_config_half_duplex(struct prueth_emac *emac)
+{
+ u32 val;
+
+ if (!emac->half_duplex)
+ return;
+
+ val = get_random_u32();
+ writel(val, emac->dram.va + HD_RAND_SEED_OFFSET);
+}
+
void icssg_config_set_speed(struct prueth_emac *emac)
{
u8 fw_speed;
@@ -453,5 +464,8 @@ void icssg_config_set_speed(struct prueth_emac *emac)
return;
}
+ if (emac->duplex == DUPLEX_HALF)
+ fw_speed |= FW_LINK_SPEED_HD;
+
writeb(fw_speed, emac->dram.va + PORT_LINK_SPEED_OFFSET);
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 4914d0ef58e9..6c4b64227ac8 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -19,11 +19,11 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/phy.h>
+#include <linux/property.h>
#include <linux/remoteproc/pruss.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
@@ -1029,6 +1029,8 @@ static void emac_adjust_link(struct net_device *ndev)
* values
*/
if (emac->link) {
+ if (emac->duplex == DUPLEX_HALF)
+ icssg_config_half_duplex(emac);
/* Set the RGMII cfg for gig en and full duplex */
icssg_update_rgmii_cfg(prueth->miig_rt, emac);
@@ -1147,9 +1149,13 @@ static int emac_phy_connect(struct prueth_emac *emac)
return -ENODEV;
}
+ if (!emac->half_duplex) {
+ dev_dbg(prueth->dev, "half duplex mode is not supported\n");
+ phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ }
+
/* remove unsupported modes */
- phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
- phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
@@ -1653,6 +1659,19 @@ static void emac_ndo_get_stats64(struct net_device *ndev,
stats->tx_dropped = ndev->stats.tx_dropped;
}
+static int emac_ndo_get_phys_port_name(struct net_device *ndev, char *name,
+ size_t len)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int ret;
+
+ ret = snprintf(name, len, "p%d", emac->port_id);
+ if (ret >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
static const struct net_device_ops emac_netdev_ops = {
.ndo_open = emac_ndo_open,
.ndo_stop = emac_ndo_stop,
@@ -1663,6 +1682,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_set_rx_mode = emac_ndo_set_rx_mode,
.ndo_eth_ioctl = emac_ndo_ioctl,
.ndo_get_stats64 = emac_ndo_get_stats64,
+ .ndo_get_phys_port_name = emac_ndo_get_phys_port_name,
};
/* get emac_port corresponding to eth_node name */
@@ -1928,8 +1948,6 @@ static void prueth_put_cores(struct prueth *prueth, int slice)
pru_rproc_put(prueth->pru[slice]);
}
-static const struct of_device_id prueth_dt_match[];
-
static int prueth_probe(struct platform_device *pdev)
{
struct device_node *eth_node, *eth_ports_node;
@@ -1938,7 +1956,6 @@ static int prueth_probe(struct platform_device *pdev)
struct genpool_data_align gp_data = {
.align = SZ_64K,
};
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct device_node *np;
struct prueth *prueth;
@@ -1948,17 +1965,13 @@ static int prueth_probe(struct platform_device *pdev)
np = dev->of_node;
- match = of_match_device(prueth_dt_match, dev);
- if (!match)
- return -ENODEV;
-
prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
if (!prueth)
return -ENOMEM;
dev_set_drvdata(dev, prueth);
prueth->pdev = pdev;
- prueth->pdata = *(const struct prueth_pdata *)match->data;
+ prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev);
prueth->dev = dev;
eth_ports_node = of_get_child_by_name(np, "ethernet-ports");
@@ -2113,6 +2126,10 @@ static int prueth_probe(struct platform_device *pdev)
eth0_node->name);
goto exit_iep;
}
+
+ if (of_find_property(eth0_node, "ti,half-duplex-capable", NULL))
+ prueth->emac[PRUETH_MAC0]->half_duplex = 1;
+
prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
}
@@ -2124,6 +2141,9 @@ static int prueth_probe(struct platform_device *pdev)
goto netdev_exit;
}
+ if (of_find_property(eth1_node, "ti,half-duplex-capable", NULL))
+ prueth->emac[PRUETH_MAC1]->half_duplex = 1;
+
prueth->emac[PRUETH_MAC1]->iep = prueth->iep0;
}
@@ -2313,8 +2333,13 @@ static const struct prueth_pdata am654_icssg_pdata = {
.quirk_10m_link_issue = 1,
};
+static const struct prueth_pdata am64x_icssg_pdata = {
+ .fdqring_mode = K3_RINGACC_RING_MODE_RING,
+};
+
static const struct of_device_id prueth_dt_match[] = {
{ .compatible = "ti,am654-icssg-prueth", .data = &am654_icssg_pdata },
+ { .compatible = "ti,am642-icssg-prueth", .data = &am64x_icssg_pdata },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, prueth_dt_match);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index 3fe80a8758d3..8b6d6b497010 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -145,6 +145,7 @@ struct prueth_emac {
struct icss_iep *iep;
unsigned int rx_ts_enabled : 1;
unsigned int tx_ts_enabled : 1;
+ unsigned int half_duplex : 1;
/* DMA related */
struct prueth_tx_chn tx_chns[PRUETH_MAX_TX_QUEUES];
@@ -271,6 +272,7 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac,
int emac_set_port_state(struct prueth_emac *emac,
enum icssg_port_state_cmd state);
void icssg_config_set_speed(struct prueth_emac *emac);
+void icssg_config_half_duplex(struct prueth_emac *emac);
/* Buffer queue helpers */
int icssg_queue_pop(struct prueth *prueth, u8 queue);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index d829113c16ee..11b90e1da0c6 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -2228,7 +2228,7 @@ probe_quit:
return ret;
}
-static int netcp_remove(struct platform_device *pdev)
+static void netcp_remove(struct platform_device *pdev)
{
struct netcp_device *netcp_device = platform_get_drvdata(pdev);
struct netcp_intf *netcp_intf, *netcp_tmp;
@@ -2256,7 +2256,6 @@ static int netcp_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
platform_set_drvdata(pdev, NULL);
- return 0;
}
static const struct of_device_id of_match[] = {
@@ -2271,7 +2270,7 @@ static struct platform_driver netcp_driver = {
.of_match_table = of_match,
},
.probe = netcp_probe,
- .remove = netcp_remove,
+ .remove_new = netcp_remove,
};
module_platform_driver(netcp_driver);
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 2adf82a32bf6..02cb6474f6dc 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -1735,8 +1735,8 @@ static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
static void keystone_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
- strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
}
static u32 keystone_get_msglevel(struct net_device *ndev)
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 50d7eacfec58..87e67121477c 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -2332,7 +2332,7 @@ spider_net_alloc_card(void)
struct spider_net_card *card;
netdev = alloc_etherdev(struct_size(card, darray,
- tx_descriptors + rx_descriptors));
+ size_add(tx_descriptors, rx_descriptors)));
if (!netdev)
return NULL;
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 14cf6ecf6d0d..6e3758dfbdbd 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -1434,14 +1434,10 @@ static irqreturn_t tc35815_interrupt(int irq, void *dev_id)
u32 dmactl = tc_readl(&tr->DMA_Ctl);
if (!(dmactl & DMA_IntMask)) {
- /* disable interrupts */
- tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl);
- if (napi_schedule_prep(&lp->napi))
+ if (napi_schedule_prep(&lp->napi)) {
+ /* disable interrupts */
+ tc_writel(dmactl | DMA_IntMask, &tr->DMA_Ctl);
__napi_schedule(&lp->napi);
- else {
- printk(KERN_ERR "%s: interrupt taken in poll\n",
- dev->name);
- BUG();
}
(void)tc_readl(&tr->Int_Src); /* flush */
return IRQ_HANDLED;
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index d09d352e1c0a..554aff7c8f3b 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1660,7 +1660,7 @@ static void tsi108_timed_checker(struct timer_list *t)
mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL);
}
-static int tsi108_ether_remove(struct platform_device *pdev)
+static void tsi108_ether_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct tsi108_prv_data *priv = netdev_priv(dev);
@@ -1670,15 +1670,13 @@ static int tsi108_ether_remove(struct platform_device *pdev)
iounmap(priv->regs);
iounmap(priv->phyregs);
free_netdev(dev);
-
- return 0;
}
/* Structure for a device driver */
static struct platform_driver tsi_eth_driver = {
.probe = tsi108_init_one,
- .remove = tsi108_ether_remove,
+ .remove_new = tsi108_ether_remove,
.driver = {
.name = "tsi-ethernet",
},
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 3e09e5036490..e80c02948801 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -2443,7 +2443,7 @@ static void rhine_remove_one_pci(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static int rhine_remove_one_platform(struct platform_device *pdev)
+static void rhine_remove_one_platform(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct rhine_private *rp = netdev_priv(dev);
@@ -2453,8 +2453,6 @@ static int rhine_remove_one_platform(struct platform_device *pdev)
iounmap(rp->base);
free_netdev(dev);
-
- return 0;
}
static void rhine_shutdown_pci(struct pci_dev *pdev)
@@ -2572,7 +2570,7 @@ static struct pci_driver rhine_driver_pci = {
static struct platform_driver rhine_driver_platform = {
.probe = rhine_init_one_platform,
- .remove = rhine_remove_one_platform,
+ .remove_new = rhine_remove_one_platform,
.driver = {
.name = DRV_NAME,
.of_match_table = rhine_of_tbl,
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 731f689412e6..1c6b2a9bba08 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2957,11 +2957,9 @@ static int velocity_platform_probe(struct platform_device *pdev)
return velocity_probe(&pdev->dev, irq, info, BUS_PLATFORM);
}
-static int velocity_platform_remove(struct platform_device *pdev)
+static void velocity_platform_remove(struct platform_device *pdev)
{
velocity_remove(&pdev->dev);
-
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -3249,7 +3247,7 @@ static struct pci_driver velocity_pci_driver = {
static struct platform_driver velocity_platform_driver = {
.probe = velocity_platform_probe,
- .remove = velocity_platform_remove,
+ .remove_new = velocity_platform_remove,
.driver = {
.name = "via-velocity",
.of_match_table = velocity_of_ids,
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
index 93cb6f2294e7..ddc5f6d20b9c 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
@@ -3,9 +3,171 @@
#include <linux/pci.h>
#include <linux/phy.h>
+#include <linux/ethtool.h>
#include "wx_type.h"
#include "wx_ethtool.h"
+#include "wx_hw.h"
+
+struct wx_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ size_t sizeof_stat;
+ off_t stat_offset;
+};
+
+#define WX_STAT(str, m) { \
+ .stat_string = str, \
+ .sizeof_stat = sizeof(((struct wx *)0)->m), \
+ .stat_offset = offsetof(struct wx, m) }
+
+static const struct wx_stats wx_gstrings_stats[] = {
+ WX_STAT("rx_dma_pkts", stats.gprc),
+ WX_STAT("tx_dma_pkts", stats.gptc),
+ WX_STAT("rx_dma_bytes", stats.gorc),
+ WX_STAT("tx_dma_bytes", stats.gotc),
+ WX_STAT("rx_total_pkts", stats.tpr),
+ WX_STAT("tx_total_pkts", stats.tpt),
+ WX_STAT("rx_long_length_count", stats.roc),
+ WX_STAT("rx_short_length_count", stats.ruc),
+ WX_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
+ WX_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
+ WX_STAT("os2bmc_tx_by_host", stats.o2bspc),
+ WX_STAT("os2bmc_rx_by_host", stats.b2ogprc),
+ WX_STAT("rx_no_dma_resources", stats.rdmdrop),
+ WX_STAT("tx_busy", tx_busy),
+ WX_STAT("non_eop_descs", non_eop_descs),
+ WX_STAT("tx_restart_queue", restart_queue),
+ WX_STAT("rx_csum_offload_good_count", hw_csum_rx_good),
+ WX_STAT("rx_csum_offload_errors", hw_csum_rx_error),
+ WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
+};
+
+/* drivers allocates num_tx_queues and num_rx_queues symmetrically so
+ * we set the num_rx_queues to evaluate to num_tx_queues. This is
+ * used because we do not have a good way to get the max number of
+ * rx queues with CONFIG_RPS disabled.
+ */
+#define WX_NUM_RX_QUEUES netdev->num_tx_queues
+#define WX_NUM_TX_QUEUES netdev->num_tx_queues
+
+#define WX_QUEUE_STATS_LEN ( \
+ (WX_NUM_TX_QUEUES + WX_NUM_RX_QUEUES) * \
+ (sizeof(struct wx_queue_stats) / sizeof(u64)))
+#define WX_GLOBAL_STATS_LEN ARRAY_SIZE(wx_gstrings_stats)
+#define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN)
+
+int wx_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return WX_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL(wx_get_sset_count);
+
+void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < WX_GLOBAL_STATS_LEN; i++)
+ ethtool_sprintf(&p, wx_gstrings_stats[i].stat_string);
+ for (i = 0; i < netdev->num_tx_queues; i++) {
+ ethtool_sprintf(&p, "tx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
+ }
+ for (i = 0; i < WX_NUM_RX_QUEUES; i++) {
+ ethtool_sprintf(&p, "rx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
+ }
+ break;
+ }
+}
+EXPORT_SYMBOL(wx_get_strings);
+
+void wx_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct wx *wx = netdev_priv(netdev);
+ struct wx_ring *ring;
+ unsigned int start;
+ int i, j;
+ char *p;
+
+ wx_update_stats(wx);
+
+ for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) {
+ p = (char *)wx + wx_gstrings_stats[i].stat_offset;
+ data[i] = (wx_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+
+ for (j = 0; j < netdev->num_tx_queues; j++) {
+ ring = wx->tx_ring[j];
+ if (!ring) {
+ data[i++] = 0;
+ data[i++] = 0;
+ continue;
+ }
+
+ do {
+ start = u64_stats_fetch_begin(&ring->syncp);
+ data[i] = ring->stats.packets;
+ data[i + 1] = ring->stats.bytes;
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
+ i += 2;
+ }
+ for (j = 0; j < WX_NUM_RX_QUEUES; j++) {
+ ring = wx->rx_ring[j];
+ if (!ring) {
+ data[i++] = 0;
+ data[i++] = 0;
+ continue;
+ }
+
+ do {
+ start = u64_stats_fetch_begin(&ring->syncp);
+ data[i] = ring->stats.packets;
+ data[i + 1] = ring->stats.bytes;
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
+ i += 2;
+ }
+}
+EXPORT_SYMBOL(wx_get_ethtool_stats);
+
+void wx_get_mac_stats(struct net_device *netdev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct wx *wx = netdev_priv(netdev);
+ struct wx_hw_stats *hwstats;
+
+ wx_update_stats(wx);
+
+ hwstats = &wx->stats;
+ mac_stats->MulticastFramesXmittedOK = hwstats->mptc;
+ mac_stats->BroadcastFramesXmittedOK = hwstats->bptc;
+ mac_stats->MulticastFramesReceivedOK = hwstats->mprc;
+ mac_stats->BroadcastFramesReceivedOK = hwstats->bprc;
+}
+EXPORT_SYMBOL(wx_get_mac_stats);
+
+void wx_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *stats)
+{
+ struct wx *wx = netdev_priv(netdev);
+ struct wx_hw_stats *hwstats;
+
+ wx_update_stats(wx);
+
+ hwstats = &wx->stats;
+ stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc;
+ stats->rx_pause_frames = hwstats->lxonoffrxc;
+}
+EXPORT_SYMBOL(wx_get_pause_stats);
void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
{
@@ -14,5 +176,12 @@ void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
strscpy(info->driver, wx->driver_name, sizeof(info->driver));
strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version));
strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info));
+ if (wx->num_tx_queues <= WX_NUM_TX_QUEUES) {
+ info->n_stats = WX_STATS_LEN -
+ (WX_NUM_TX_QUEUES - wx->num_tx_queues) *
+ (sizeof(struct wx_queue_stats) / sizeof(u64)) * 2;
+ } else {
+ info->n_stats = WX_STATS_LEN;
+ }
}
EXPORT_SYMBOL(wx_get_drvinfo);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
index e85538c69454..16d1a09369a6 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
@@ -4,5 +4,13 @@
#ifndef _WX_ETHTOOL_H_
#define _WX_ETHTOOL_H_
+int wx_get_sset_count(struct net_device *netdev, int sset);
+void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data);
+void wx_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data);
+void wx_get_mac_stats(struct net_device *netdev,
+ struct ethtool_eth_mac_stats *mac_stats);
+void wx_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *stats);
void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info);
#endif /* _WX_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 85dc16faca54..a3c5de9d547a 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -12,6 +12,98 @@
#include "wx_lib.h"
#include "wx_hw.h"
+static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
+{
+ struct wx *wx = bus->priv;
+ u32 command, val;
+ int ret;
+
+ /* setup and write the address cycle command */
+ command = WX_MSCA_RA(regnum) |
+ WX_MSCA_PA(phy_addr) |
+ WX_MSCA_DA(devnum);
+ wr32(wx, WX_MSCA, command);
+
+ command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | WX_MSCC_BUSY;
+ if (wx->mac.type == wx_mac_em)
+ command |= WX_MDIO_CLK(6);
+ wr32(wx, WX_MSCC, command);
+
+ /* wait to complete */
+ ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
+ 100000, false, wx, WX_MSCC);
+ if (ret) {
+ wx_err(wx, "Mdio read c22 command did not complete.\n");
+ return ret;
+ }
+
+ return (u16)rd32(wx, WX_MSCC);
+}
+
+static int wx_phy_write_reg_mdi(struct mii_bus *bus, int phy_addr,
+ int devnum, int regnum, u16 value)
+{
+ struct wx *wx = bus->priv;
+ u32 command, val;
+ int ret;
+
+ /* setup and write the address cycle command */
+ command = WX_MSCA_RA(regnum) |
+ WX_MSCA_PA(phy_addr) |
+ WX_MSCA_DA(devnum);
+ wr32(wx, WX_MSCA, command);
+
+ command = value | WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | WX_MSCC_BUSY;
+ if (wx->mac.type == wx_mac_em)
+ command |= WX_MDIO_CLK(6);
+ wr32(wx, WX_MSCC, command);
+
+ /* wait to complete */
+ ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
+ 100000, false, wx, WX_MSCC);
+ if (ret)
+ wx_err(wx, "Mdio write c22 command did not complete.\n");
+
+ return ret;
+}
+
+int wx_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct wx *wx = bus->priv;
+
+ wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF);
+ return wx_phy_read_reg_mdi(bus, phy_addr, 0, regnum);
+}
+EXPORT_SYMBOL(wx_phy_read_reg_mdi_c22);
+
+int wx_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
+{
+ struct wx *wx = bus->priv;
+
+ wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF);
+ return wx_phy_write_reg_mdi(bus, phy_addr, 0, regnum, value);
+}
+EXPORT_SYMBOL(wx_phy_write_reg_mdi_c22);
+
+int wx_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
+{
+ struct wx *wx = bus->priv;
+
+ wr32(wx, WX_MDIO_CLAUSE_SELECT, 0);
+ return wx_phy_read_reg_mdi(bus, phy_addr, devnum, regnum);
+}
+EXPORT_SYMBOL(wx_phy_read_reg_mdi_c45);
+
+int wx_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr,
+ int devnum, int regnum, u16 value)
+{
+ struct wx *wx = bus->priv;
+
+ wr32(wx, WX_MDIO_CLAUSE_SELECT, 0);
+ return wx_phy_write_reg_mdi(bus, phy_addr, devnum, regnum, value);
+}
+EXPORT_SYMBOL(wx_phy_write_reg_mdi_c45);
+
static void wx_intr_disable(struct wx *wx, u64 qmask)
{
u32 mask;
@@ -1910,6 +2002,105 @@ int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
EXPORT_SYMBOL(wx_vlan_rx_kill_vid);
/**
+ * wx_update_stats - Update the board statistics counters.
+ * @wx: board private structure
+ **/
+void wx_update_stats(struct wx *wx)
+{
+ struct wx_hw_stats *hwstats = &wx->stats;
+
+ u64 non_eop_descs = 0, alloc_rx_buff_failed = 0;
+ u64 hw_csum_rx_good = 0, hw_csum_rx_error = 0;
+ u64 restart_queue = 0, tx_busy = 0;
+ u32 i;
+
+ /* gather some stats to the wx struct that are per queue */
+ for (i = 0; i < wx->num_rx_queues; i++) {
+ struct wx_ring *rx_ring = wx->rx_ring[i];
+
+ non_eop_descs += rx_ring->rx_stats.non_eop_descs;
+ alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
+ hw_csum_rx_good += rx_ring->rx_stats.csum_good_cnt;
+ hw_csum_rx_error += rx_ring->rx_stats.csum_err;
+ }
+ wx->non_eop_descs = non_eop_descs;
+ wx->alloc_rx_buff_failed = alloc_rx_buff_failed;
+ wx->hw_csum_rx_error = hw_csum_rx_error;
+ wx->hw_csum_rx_good = hw_csum_rx_good;
+
+ for (i = 0; i < wx->num_tx_queues; i++) {
+ struct wx_ring *tx_ring = wx->tx_ring[i];
+
+ restart_queue += tx_ring->tx_stats.restart_queue;
+ tx_busy += tx_ring->tx_stats.tx_busy;
+ }
+ wx->restart_queue = restart_queue;
+ wx->tx_busy = tx_busy;
+
+ hwstats->gprc += rd32(wx, WX_RDM_PKT_CNT);
+ hwstats->gptc += rd32(wx, WX_TDM_PKT_CNT);
+ hwstats->gorc += rd64(wx, WX_RDM_BYTE_CNT_LSB);
+ hwstats->gotc += rd64(wx, WX_TDM_BYTE_CNT_LSB);
+ hwstats->tpr += rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L);
+ hwstats->tpt += rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L);
+ hwstats->crcerrs += rd64(wx, WX_RX_CRC_ERROR_FRAMES_L);
+ hwstats->rlec += rd64(wx, WX_RX_LEN_ERROR_FRAMES_L);
+ hwstats->bprc += rd64(wx, WX_RX_BC_FRAMES_GOOD_L);
+ hwstats->bptc += rd64(wx, WX_TX_BC_FRAMES_GOOD_L);
+ hwstats->mprc += rd64(wx, WX_RX_MC_FRAMES_GOOD_L);
+ hwstats->mptc += rd64(wx, WX_TX_MC_FRAMES_GOOD_L);
+ hwstats->roc += rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD);
+ hwstats->ruc += rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD);
+ hwstats->lxonoffrxc += rd32(wx, WX_MAC_LXONOFFRXC);
+ hwstats->lxontxc += rd32(wx, WX_RDB_LXONTXC);
+ hwstats->lxofftxc += rd32(wx, WX_RDB_LXOFFTXC);
+ hwstats->o2bgptc += rd32(wx, WX_TDM_OS2BMC_CNT);
+ hwstats->b2ospc += rd32(wx, WX_MNG_BMC2OS_CNT);
+ hwstats->o2bspc += rd32(wx, WX_MNG_OS2BMC_CNT);
+ hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT);
+ hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT);
+
+ for (i = 0; i < wx->mac.max_rx_queues; i++)
+ hwstats->qmprc += rd32(wx, WX_PX_MPRC(i));
+}
+EXPORT_SYMBOL(wx_update_stats);
+
+/**
+ * wx_clear_hw_cntrs - Generic clear hardware counters
+ * @wx: board private structure
+ *
+ * Clears all hardware statistics counters by reading them from the hardware
+ * Statistics counters are clear on read.
+ **/
+void wx_clear_hw_cntrs(struct wx *wx)
+{
+ u16 i = 0;
+
+ for (i = 0; i < wx->mac.max_rx_queues; i++)
+ wr32(wx, WX_PX_MPRC(i), 0);
+
+ rd32(wx, WX_RDM_PKT_CNT);
+ rd32(wx, WX_TDM_PKT_CNT);
+ rd64(wx, WX_RDM_BYTE_CNT_LSB);
+ rd32(wx, WX_TDM_BYTE_CNT_LSB);
+ rd32(wx, WX_RDM_DRP_PKT);
+ rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD);
+ rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD);
+ rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L);
+ rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L);
+ rd64(wx, WX_RX_MC_FRAMES_GOOD_L);
+ rd64(wx, WX_TX_MC_FRAMES_GOOD_L);
+ rd64(wx, WX_RX_BC_FRAMES_GOOD_L);
+ rd64(wx, WX_TX_BC_FRAMES_GOOD_L);
+ rd64(wx, WX_RX_CRC_ERROR_FRAMES_L);
+ rd64(wx, WX_RX_LEN_ERROR_FRAMES_L);
+ rd32(wx, WX_RDB_LXONTXC);
+ rd32(wx, WX_RDB_LXOFFTXC);
+ rd32(wx, WX_MAC_LXONOFFRXC);
+}
+EXPORT_SYMBOL(wx_clear_hw_cntrs);
+
+/**
* wx_start_hw - Prepare hardware for Tx/Rx
* @wx: pointer to hardware structure
*
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index 0b3447bc6f2f..12c20a7c364d 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -4,6 +4,13 @@
#ifndef _WX_HW_H_
#define _WX_HW_H_
+#include <linux/phy.h>
+
+int wx_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum);
+int wx_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value);
+int wx_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum);
+int wx_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr,
+ int devnum, int regnum, u16 value);
void wx_intr_enable(struct wx *wx, u64 qmask);
void wx_irq_disable(struct wx *wx);
int wx_check_flash_load(struct wx *wx, u32 check_bit);
@@ -34,5 +41,7 @@ int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count);
int wx_sw_init(struct wx *wx);
int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
+void wx_update_stats(struct wx *wx);
+void wx_clear_hw_cntrs(struct wx *wx);
#endif /* _WX_HW_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index e04d4a5eed7b..2823861e5a92 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -488,6 +488,7 @@ static bool wx_is_non_eop(struct wx_ring *rx_ring,
return false;
rx_ring->rx_buffer_info[ntc].skb = skb;
+ rx_ring->rx_stats.non_eop_descs++;
return true;
}
@@ -721,6 +722,7 @@ static int wx_clean_rx_irq(struct wx_q_vector *q_vector,
/* exit if we failed to retrieve a buffer */
if (!skb) {
+ rx_ring->rx_stats.alloc_rx_buff_failed++;
rx_buffer->pagecnt_bias++;
break;
}
@@ -877,9 +879,11 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector,
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
- netif_running(tx_ring->netdev))
+ netif_running(tx_ring->netdev)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ }
}
return !!budget;
@@ -956,6 +960,7 @@ static int wx_maybe_stop_tx(struct wx_ring *tx_ring, u16 size)
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
return 0;
}
@@ -1533,8 +1538,10 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->
frags[f]));
- if (wx_maybe_stop_tx(tx_ring, count + 3))
+ if (wx_maybe_stop_tx(tx_ring, count + 3)) {
+ tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY;
+ }
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
@@ -2665,8 +2672,11 @@ void wx_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct wx *wx = netdev_priv(netdev);
+ struct wx_hw_stats *hwstats;
int i;
+ wx_update_stats(wx);
+
rcu_read_lock();
for (i = 0; i < wx->num_rx_queues; i++) {
struct wx_ring *ring = READ_ONCE(wx->rx_ring[i]);
@@ -2702,6 +2712,12 @@ void wx_get_stats64(struct net_device *netdev,
}
rcu_read_unlock();
+
+ hwstats = &wx->stats;
+ stats->rx_errors = hwstats->crcerrs + hwstats->rlec;
+ stats->multicast = hwstats->qmprc;
+ stats->rx_length_errors = hwstats->rlec;
+ stats->rx_crc_errors = hwstats->crcerrs;
}
EXPORT_SYMBOL(wx_get_stats64);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index c5cbd177ef62..165e82de772e 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -59,6 +59,25 @@
#define WX_TS_ALARM_ST_DALARM BIT(1)
#define WX_TS_ALARM_ST_ALARM BIT(0)
+/* statistic */
+#define WX_TX_FRAME_CNT_GOOD_BAD_L 0x1181C
+#define WX_TX_BC_FRAMES_GOOD_L 0x11824
+#define WX_TX_MC_FRAMES_GOOD_L 0x1182C
+#define WX_RX_FRAME_CNT_GOOD_BAD_L 0x11900
+#define WX_RX_BC_FRAMES_GOOD_L 0x11918
+#define WX_RX_MC_FRAMES_GOOD_L 0x11920
+#define WX_RX_CRC_ERROR_FRAMES_L 0x11928
+#define WX_RX_LEN_ERROR_FRAMES_L 0x11978
+#define WX_RX_UNDERSIZE_FRAMES_GOOD 0x11938
+#define WX_RX_OVERSIZE_FRAMES_GOOD 0x1193C
+#define WX_MAC_LXONOFFRXC 0x11E0C
+
+/*********************** Receive DMA registers **************************/
+#define WX_RDM_DRP_PKT 0x12500
+#define WX_RDM_PKT_CNT 0x12504
+#define WX_RDM_BYTE_CNT_LSB 0x12508
+#define WX_RDM_BMC2OS_CNT 0x12510
+
/************************* Port Registers ************************************/
/* port cfg Registers */
#define WX_CFG_PORT_CTL 0x14400
@@ -94,6 +113,9 @@
#define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */
#define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4))
#define WX_TDM_RP_IDX 0x1820C
+#define WX_TDM_PKT_CNT 0x18308
+#define WX_TDM_BYTE_CNT_LSB 0x1830C
+#define WX_TDM_OS2BMC_CNT 0x18314
#define WX_TDM_RP_RATE 0x18404
/***************************** RDB registers *********************************/
@@ -106,6 +128,8 @@
/* statistic */
#define WX_RDB_PFCMACDAL 0x19210
#define WX_RDB_PFCMACDAH 0x19214
+#define WX_RDB_LXOFFTXC 0x19218
+#define WX_RDB_LXONTXC 0x1921C
/* ring assignment */
#define WX_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4))
#define WX_RDB_PL_CFG_L4HDR BIT(1)
@@ -218,6 +242,8 @@
#define WX_MNG_MBOX_CTL 0x1E044
#define WX_MNG_MBOX_CTL_SWRDY BIT(0)
#define WX_MNG_MBOX_CTL_FWRDY BIT(2)
+#define WX_MNG_BMC2OS_CNT 0x1E090
+#define WX_MNG_OS2BMC_CNT 0x1E094
/************************************* ETH MAC *****************************/
#define WX_MAC_TX_CFG 0x11000
@@ -251,6 +277,7 @@ enum WX_MSCA_CMD_value {
#define WX_MSCC_SADDR BIT(18)
#define WX_MSCC_BUSY BIT(22)
#define WX_MDIO_CLK(v) FIELD_PREP(GENMASK(21, 19), v)
+#define WX_MDIO_CLAUSE_SELECT 0x11220
#define WX_MMC_CONTROL 0x11800
#define WX_MMC_CONTROL_RSTONRD BIT(2) /* reset on read */
@@ -300,6 +327,7 @@ enum WX_MSCA_CMD_value {
#define WX_PX_RR_WP(_i) (0x01008 + ((_i) * 0x40))
#define WX_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40))
#define WX_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40))
+#define WX_PX_MPRC(_i) (0x01020 + ((_i) * 0x40))
/* PX_RR_CFG bit definitions */
#define WX_PX_RR_CFG_VLAN BIT(31)
#define WX_PX_RR_CFG_SPLIT_MODE BIT(26)
@@ -767,9 +795,16 @@ struct wx_queue_stats {
u64 bytes;
};
+struct wx_tx_queue_stats {
+ u64 restart_queue;
+ u64 tx_busy;
+};
+
struct wx_rx_queue_stats {
+ u64 non_eop_descs;
u64 csum_good_cnt;
u64 csum_err;
+ u64 alloc_rx_buff_failed;
};
/* iterator for handling rings in ring container */
@@ -813,6 +848,7 @@ struct wx_ring {
struct wx_queue_stats stats;
struct u64_stats_sync syncp;
union {
+ struct wx_tx_queue_stats tx_stats;
struct wx_rx_queue_stats rx_stats;
};
} ____cacheline_internodealigned_in_smp;
@@ -844,6 +880,33 @@ enum wx_isb_idx {
WX_ISB_MAX
};
+/* Statistics counters collected by the MAC */
+struct wx_hw_stats {
+ u64 gprc;
+ u64 gptc;
+ u64 gorc;
+ u64 gotc;
+ u64 tpr;
+ u64 tpt;
+ u64 bprc;
+ u64 bptc;
+ u64 mprc;
+ u64 mptc;
+ u64 roc;
+ u64 ruc;
+ u64 lxonoffrxc;
+ u64 lxontxc;
+ u64 lxofftxc;
+ u64 o2bgptc;
+ u64 b2ospc;
+ u64 o2bspc;
+ u64 b2ogprc;
+ u64 rdmdrop;
+ u64 crcerrs;
+ u64 rlec;
+ u64 qmprc;
+};
+
struct wx {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -919,6 +982,14 @@ struct wx {
u32 wol;
u16 bd_number;
+
+ struct wx_hw_stats stats;
+ u64 tx_busy;
+ u64 non_eop_descs;
+ u64 restart_queue;
+ u64 hw_csum_rx_good;
+ u64 hw_csum_rx_error;
+ u64 alloc_rx_buff_failed;
};
#define WX_INTR_ALL (~0ULL)
@@ -952,6 +1023,17 @@ wr32m(struct wx *wx, u32 reg, u32 mask, u32 field)
wr32(wx, reg, val);
}
+static inline u64
+rd64(struct wx *wx, u32 reg)
+{
+ u64 lsb, msb;
+
+ lsb = rd32(wx, reg);
+ msb = rd32(wx, reg + 4);
+
+ return (lsb | msb << 32);
+}
+
/* On some domestic CPU platforms, sometimes IO is not synchronized with
* flushing memory, here use readl() to flush PCI read and write.
*/
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
index ec0e869e9aac..afbdf6919071 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
@@ -49,6 +49,11 @@ static const struct ethtool_ops ngbe_ethtool_ops = {
.nway_reset = phy_ethtool_nway_reset,
.get_wol = ngbe_get_wol,
.set_wol = ngbe_set_wol,
+ .get_sset_count = wx_get_sset_count,
+ .get_strings = wx_get_strings,
+ .get_ethtool_stats = wx_get_ethtool_stats,
+ .get_eth_mac_stats = wx_get_mac_stats,
+ .get_pause_stats = wx_get_pause_stats,
};
void ngbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c
index 6562a2de9527..6459bc1d7c22 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c
@@ -85,6 +85,8 @@ int ngbe_reset_hw(struct wx *wx)
}
ngbe_reset_misc(wx);
+ wx_clear_hw_cntrs(wx);
+
/* Store the permanent mac address */
wx_get_mac_addr(wx, wx->mac.perm_addr);
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index 2b431db6085a..3d43f808c86b 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -332,6 +332,8 @@ static void ngbe_disable_device(struct wx *wx)
wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH);
}
+
+ wx_update_stats(wx);
}
static void ngbe_down(struct wx *wx)
@@ -677,11 +679,6 @@ static int ngbe_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, wx);
- netif_info(wx, probe, netdev,
- "PHY: %s, PBA No: Wang Xun GbE Family Controller\n",
- wx->mac_type == em_mac_type_mdi ? "Internal" : "External");
- netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr);
-
return 0;
err_register:
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
index 591f5b7b6da6..6302ecca71bb 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
@@ -29,117 +29,6 @@ static int ngbe_phy_write_reg_internal(struct mii_bus *bus, int phy_addr, int re
return 0;
}
-static int ngbe_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum)
-{
- u32 command, val, device_type = 0;
- struct wx *wx = bus->priv;
- int ret;
-
- wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF);
- /* setup and write the address cycle command */
- command = WX_MSCA_RA(regnum) |
- WX_MSCA_PA(phy_addr) |
- WX_MSCA_DA(device_type);
- wr32(wx, WX_MSCA, command);
- command = WX_MSCC_CMD(WX_MSCA_CMD_READ) |
- WX_MSCC_BUSY |
- WX_MDIO_CLK(6);
- wr32(wx, WX_MSCC, command);
-
- /* wait to complete */
- ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
- 100000, false, wx, WX_MSCC);
- if (ret) {
- wx_err(wx, "Mdio read c22 command did not complete.\n");
- return ret;
- }
-
- return (u16)rd32(wx, WX_MSCC);
-}
-
-static int ngbe_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
-{
- u32 command, val, device_type = 0;
- struct wx *wx = bus->priv;
- int ret;
-
- wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF);
- /* setup and write the address cycle command */
- command = WX_MSCA_RA(regnum) |
- WX_MSCA_PA(phy_addr) |
- WX_MSCA_DA(device_type);
- wr32(wx, WX_MSCA, command);
- command = value |
- WX_MSCC_CMD(WX_MSCA_CMD_WRITE) |
- WX_MSCC_BUSY |
- WX_MDIO_CLK(6);
- wr32(wx, WX_MSCC, command);
-
- /* wait to complete */
- ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
- 100000, false, wx, WX_MSCC);
- if (ret)
- wx_err(wx, "Mdio write c22 command did not complete.\n");
-
- return ret;
-}
-
-static int ngbe_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
-{
- struct wx *wx = bus->priv;
- u32 val, command;
- int ret;
-
- wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0);
- /* setup and write the address cycle command */
- command = WX_MSCA_RA(regnum) |
- WX_MSCA_PA(phy_addr) |
- WX_MSCA_DA(devnum);
- wr32(wx, WX_MSCA, command);
- command = WX_MSCC_CMD(WX_MSCA_CMD_READ) |
- WX_MSCC_BUSY |
- WX_MDIO_CLK(6);
- wr32(wx, WX_MSCC, command);
-
- /* wait to complete */
- ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
- 100000, false, wx, WX_MSCC);
- if (ret) {
- wx_err(wx, "Mdio read c45 command did not complete.\n");
- return ret;
- }
-
- return (u16)rd32(wx, WX_MSCC);
-}
-
-static int ngbe_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr,
- int devnum, int regnum, u16 value)
-{
- struct wx *wx = bus->priv;
- int ret, command;
- u16 val;
-
- wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0);
- /* setup and write the address cycle command */
- command = WX_MSCA_RA(regnum) |
- WX_MSCA_PA(phy_addr) |
- WX_MSCA_DA(devnum);
- wr32(wx, WX_MSCA, command);
- command = value |
- WX_MSCC_CMD(WX_MSCA_CMD_WRITE) |
- WX_MSCC_BUSY |
- WX_MDIO_CLK(6);
- wr32(wx, WX_MSCC, command);
-
- /* wait to complete */
- ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
- 100000, false, wx, WX_MSCC);
- if (ret)
- wx_err(wx, "Mdio write c45 command did not complete.\n");
-
- return ret;
-}
-
static int ngbe_phy_read_reg_c22(struct mii_bus *bus, int phy_addr, int regnum)
{
struct wx *wx = bus->priv;
@@ -148,7 +37,7 @@ static int ngbe_phy_read_reg_c22(struct mii_bus *bus, int phy_addr, int regnum)
if (wx->mac_type == em_mac_type_mdi)
phy_data = ngbe_phy_read_reg_internal(bus, phy_addr, regnum);
else
- phy_data = ngbe_phy_read_reg_mdi_c22(bus, phy_addr, regnum);
+ phy_data = wx_phy_read_reg_mdi_c22(bus, phy_addr, regnum);
return phy_data;
}
@@ -162,7 +51,7 @@ static int ngbe_phy_write_reg_c22(struct mii_bus *bus, int phy_addr,
if (wx->mac_type == em_mac_type_mdi)
ret = ngbe_phy_write_reg_internal(bus, phy_addr, regnum, value);
else
- ret = ngbe_phy_write_reg_mdi_c22(bus, phy_addr, regnum, value);
+ ret = wx_phy_write_reg_mdi_c22(bus, phy_addr, regnum, value);
return ret;
}
@@ -262,8 +151,8 @@ int ngbe_mdio_init(struct wx *wx)
mii_bus->priv = wx;
if (wx->mac_type == em_mac_type_rgmii) {
- mii_bus->read_c45 = ngbe_phy_read_reg_mdi_c45;
- mii_bus->write_c45 = ngbe_phy_write_reg_mdi_c45;
+ mii_bus->read_c45 = wx_phy_read_reg_mdi_c45;
+ mii_bus->write_c45 = wx_phy_write_reg_mdi_c45;
}
snprintf(mii_bus->id, MII_BUS_ID_SIZE, "ngbe-%x", pci_dev_id(pdev));
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
index 72c8cd2d5575..ff754d69bdf6 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
@@ -59,9 +59,6 @@
#define NGBE_EEPROM_VERSION_L 0x1D
#define NGBE_EEPROM_VERSION_H 0x1E
-/* Media-dependent registers. */
-#define NGBE_MDIO_CLAUSE_SELECT 0x11220
-
/* GPIO Registers */
#define NGBE_GPIO_DR 0x14800
#define NGBE_GPIO_DDR 0x14804
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
index 859da112586a..3f336a088e43 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
@@ -39,6 +39,11 @@ static const struct ethtool_ops txgbe_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_link_ksettings = txgbe_get_link_ksettings,
.set_link_ksettings = txgbe_set_link_ksettings,
+ .get_sset_count = wx_get_sset_count,
+ .get_strings = wx_get_strings,
+ .get_ethtool_stats = wx_get_ethtool_stats,
+ .get_eth_mac_stats = wx_get_mac_stats,
+ .get_pause_stats = wx_get_pause_stats,
};
void txgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index 372745250270..d6b2b3c781b6 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -71,114 +71,6 @@ static void txgbe_init_thermal_sensor_thresh(struct wx *wx)
}
/**
- * txgbe_read_pba_string - Reads part number string from EEPROM
- * @wx: pointer to hardware structure
- * @pba_num: stores the part number string from the EEPROM
- * @pba_num_size: part number string buffer length
- *
- * Reads the part number string from the EEPROM.
- **/
-int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size)
-{
- u16 pba_ptr, offset, length, data;
- int ret_val;
-
- if (!pba_num) {
- wx_err(wx, "PBA string buffer was null\n");
- return -EINVAL;
- }
-
- ret_val = wx_read_ee_hostif(wx,
- wx->eeprom.sw_region_offset + TXGBE_PBANUM0_PTR,
- &data);
- if (ret_val != 0) {
- wx_err(wx, "NVM Read Error\n");
- return ret_val;
- }
-
- ret_val = wx_read_ee_hostif(wx,
- wx->eeprom.sw_region_offset + TXGBE_PBANUM1_PTR,
- &pba_ptr);
- if (ret_val != 0) {
- wx_err(wx, "NVM Read Error\n");
- return ret_val;
- }
-
- /* if data is not ptr guard the PBA must be in legacy format which
- * means pba_ptr is actually our second data word for the PBA number
- * and we can decode it into an ascii string
- */
- if (data != TXGBE_PBANUM_PTR_GUARD) {
- wx_err(wx, "NVM PBA number is not stored as string\n");
-
- /* we will need 11 characters to store the PBA */
- if (pba_num_size < 11) {
- wx_err(wx, "PBA string buffer too small\n");
- return -ENOMEM;
- }
-
- /* extract hex string from data and pba_ptr */
- pba_num[0] = (data >> 12) & 0xF;
- pba_num[1] = (data >> 8) & 0xF;
- pba_num[2] = (data >> 4) & 0xF;
- pba_num[3] = data & 0xF;
- pba_num[4] = (pba_ptr >> 12) & 0xF;
- pba_num[5] = (pba_ptr >> 8) & 0xF;
- pba_num[6] = '-';
- pba_num[7] = 0;
- pba_num[8] = (pba_ptr >> 4) & 0xF;
- pba_num[9] = pba_ptr & 0xF;
-
- /* put a null character on the end of our string */
- pba_num[10] = '\0';
-
- /* switch all the data but the '-' to hex char */
- for (offset = 0; offset < 10; offset++) {
- if (pba_num[offset] < 0xA)
- pba_num[offset] += '0';
- else if (pba_num[offset] < 0x10)
- pba_num[offset] += 'A' - 0xA;
- }
-
- return 0;
- }
-
- ret_val = wx_read_ee_hostif(wx, pba_ptr, &length);
- if (ret_val != 0) {
- wx_err(wx, "NVM Read Error\n");
- return ret_val;
- }
-
- if (length == 0xFFFF || length == 0) {
- wx_err(wx, "NVM PBA number section invalid length\n");
- return -EINVAL;
- }
-
- /* check if pba_num buffer is big enough */
- if (pba_num_size < (((u32)length * 2) - 1)) {
- wx_err(wx, "PBA string buffer too small\n");
- return -ENOMEM;
- }
-
- /* trim pba length from start of string */
- pba_ptr++;
- length--;
-
- for (offset = 0; offset < length; offset++) {
- ret_val = wx_read_ee_hostif(wx, pba_ptr + offset, &data);
- if (ret_val != 0) {
- wx_err(wx, "NVM Read Error\n");
- return ret_val;
- }
- pba_num[offset * 2] = (u8)(data >> 8);
- pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
- }
- pba_num[offset * 2] = '\0';
-
- return 0;
-}
-
-/**
* txgbe_calc_eeprom_checksum - Calculates and returns the checksum
* @wx: pointer to hardware structure
* @checksum: pointer to cheksum
@@ -306,6 +198,8 @@ int txgbe_reset_hw(struct wx *wx)
txgbe_reset_misc(wx);
+ wx_clear_hw_cntrs(wx);
+
/* Store the permanent mac address */
wx_get_mac_addr(wx, wx->mac.perm_addr);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
index abc729eb187a..1f3ecf60e3c4 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
@@ -6,7 +6,6 @@
int txgbe_disable_sec_tx_path(struct wx *wx);
void txgbe_enable_sec_tx_path(struct wx *wx);
-int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size);
int txgbe_validate_eeprom_checksum(struct wx *wx, u16 *checksum_val);
int txgbe_reset_hw(struct wx *wx);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 5c3aed516ac2..70f0b5c01dac 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -286,6 +286,8 @@ static void txgbe_disable_device(struct wx *wx)
/* Disable the Tx DMA engine */
wr32m(wx, WX_TDM_CTL, WX_TDM_CTL_TE, 0);
+
+ wx_update_stats(wx);
}
static void txgbe_down(struct wx *wx)
@@ -538,7 +540,6 @@ static int txgbe_probe(struct pci_dev *pdev,
u16 eeprom_verh = 0, eeprom_verl = 0, offset = 0;
u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0;
u16 build = 0, major = 0, patch = 0;
- u8 part_str[TXGBE_PBANUM_LENGTH];
u32 etrack_id = 0;
err = pci_enable_device_mem(pdev);
@@ -736,13 +737,6 @@ static int txgbe_probe(struct pci_dev *pdev,
else
dev_warn(&pdev->dev, "Failed to enumerate PF devices.\n");
- /* First try to read PBA as a string */
- err = txgbe_read_pba_string(wx, part_str, TXGBE_PBANUM_LENGTH);
- if (err)
- strncpy(part_str, "Unknown", TXGBE_PBANUM_LENGTH);
-
- netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr);
-
return 0;
err_remove_phy:
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 4159c84035fd..b6c06adb8656 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -647,58 +647,6 @@ static int txgbe_sfp_register(struct txgbe *txgbe)
return 0;
}
-static int txgbe_phy_read(struct mii_bus *bus, int phy_addr,
- int devnum, int regnum)
-{
- struct wx *wx = bus->priv;
- u32 val, command;
- int ret;
-
- /* setup and write the address cycle command */
- command = WX_MSCA_RA(regnum) |
- WX_MSCA_PA(phy_addr) |
- WX_MSCA_DA(devnum);
- wr32(wx, WX_MSCA, command);
-
- command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | WX_MSCC_BUSY;
- wr32(wx, WX_MSCC, command);
-
- /* wait to complete */
- ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
- 100000, false, wx, WX_MSCC);
- if (ret) {
- wx_err(wx, "Mdio read c45 command did not complete.\n");
- return ret;
- }
-
- return (u16)rd32(wx, WX_MSCC);
-}
-
-static int txgbe_phy_write(struct mii_bus *bus, int phy_addr,
- int devnum, int regnum, u16 value)
-{
- struct wx *wx = bus->priv;
- int ret, command;
- u16 val;
-
- /* setup and write the address cycle command */
- command = WX_MSCA_RA(regnum) |
- WX_MSCA_PA(phy_addr) |
- WX_MSCA_DA(devnum);
- wr32(wx, WX_MSCA, command);
-
- command = value | WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | WX_MSCC_BUSY;
- wr32(wx, WX_MSCC, command);
-
- /* wait to complete */
- ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
- 100000, false, wx, WX_MSCC);
- if (ret)
- wx_err(wx, "Mdio write c45 command did not complete.\n");
-
- return ret;
-}
-
static int txgbe_ext_phy_init(struct txgbe *txgbe)
{
struct phy_device *phydev;
@@ -715,8 +663,8 @@ static int txgbe_ext_phy_init(struct txgbe *txgbe)
return -ENOMEM;
mii_bus->name = "txgbe_mii_bus";
- mii_bus->read_c45 = &txgbe_phy_read;
- mii_bus->write_c45 = &txgbe_phy_write;
+ mii_bus->read_c45 = &wx_phy_read_reg_mdi_c45;
+ mii_bus->write_c45 = &wx_phy_write_reg_mdi_c45;
mii_bus->parent = &pdev->dev;
mii_bus->phy_mask = GENMASK(31, 1);
mii_bus->priv = wx;
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 51199c355f95..3ba9ce43f394 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -88,9 +88,6 @@
#define TXGBE_XPCS_IDA_ADDR 0x13000
#define TXGBE_XPCS_IDA_DATA 0x13004
-/* Part Number String Length */
-#define TXGBE_PBANUM_LENGTH 32
-
/* Checksum and EEPROM pointers */
#define TXGBE_EEPROM_LAST_WORD 0x800
#define TXGBE_EEPROM_CHECKSUM 0x2F
@@ -98,9 +95,6 @@
#define TXGBE_EEPROM_VERSION_L 0x1D
#define TXGBE_EEPROM_VERSION_H 0x1E
#define TXGBE_ISCSI_BOOT_CONFIG 0x07
-#define TXGBE_PBANUM0_PTR 0x05
-#define TXGBE_PBANUM1_PTR 0x06
-#define TXGBE_PBANUM_PTR_GUARD 0xFAFA
#define TXGBE_MAX_MSIX_VECTORS 64
#define TXGBE_MAX_FDIR_INDICES 63
diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c
index 7c52796273a4..990a3cce8c0f 100644
--- a/drivers/net/ethernet/wiznet/w5100-spi.c
+++ b/drivers/net/ethernet/wiznet/w5100-spi.c
@@ -14,8 +14,8 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
+#include <linux/of.h>
#include <linux/of_net.h>
-#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include "w5100.h"
@@ -420,7 +420,6 @@ MODULE_DEVICE_TABLE(of, w5100_of_match);
static int w5100_spi_probe(struct spi_device *spi)
{
- const struct of_device_id *of_id;
const struct w5100_ops *ops;
kernel_ulong_t driver_data;
const void *mac = NULL;
@@ -432,14 +431,7 @@ static int w5100_spi_probe(struct spi_device *spi)
if (!ret)
mac = tmpmac;
- if (spi->dev.of_node) {
- of_id = of_match_device(w5100_of_match, &spi->dev);
- if (!of_id)
- return -ENODEV;
- driver_data = (kernel_ulong_t)of_id->data;
- } else {
- driver_data = spi_get_device_id(spi)->driver_data;
- }
+ driver_data = (uintptr_t)spi_get_device_match_data(spi);
switch (driver_data) {
case W5100:
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 634946e87e5f..b26fd15c25ae 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -930,8 +930,8 @@ static irqreturn_t w5100_interrupt(int irq, void *ndev_instance)
if (priv->ops->may_sleep)
queue_work(priv->xfer_wq, &priv->rx_work);
- else if (napi_schedule_prep(&priv->napi))
- __napi_schedule(&priv->napi);
+ else
+ napi_schedule(&priv->napi);
}
return IRQ_HANDLED;
@@ -1062,11 +1062,9 @@ static int w5100_mmio_probe(struct platform_device *pdev)
mac_addr, irq, data ? data->link_gpio : -EINVAL);
}
-static int w5100_mmio_remove(struct platform_device *pdev)
+static void w5100_mmio_remove(struct platform_device *pdev)
{
w5100_remove(&pdev->dev);
-
- return 0;
}
void *w5100_ops_priv(const struct net_device *ndev)
@@ -1273,6 +1271,6 @@ static struct platform_driver w5100_mmio_driver = {
.pm = &w5100_pm_ops,
},
.probe = w5100_mmio_probe,
- .remove = w5100_mmio_remove,
+ .remove_new = w5100_mmio_remove,
};
module_platform_driver(w5100_mmio_driver);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index b0958fe8111e..3318b50a5911 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -627,7 +627,7 @@ err_register:
return err;
}
-static int w5300_remove(struct platform_device *pdev)
+static void w5300_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct w5300_priv *priv = netdev_priv(ndev);
@@ -639,7 +639,6 @@ static int w5300_remove(struct platform_device *pdev)
unregister_netdev(ndev);
free_netdev(ndev);
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -683,7 +682,7 @@ static struct platform_driver w5300_driver = {
.pm = &w5300_pm_ops,
},
.probe = w5300_probe,
- .remove = w5300_remove,
+ .remove_new = w5300_remove,
};
module_platform_driver(w5300_driver);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 1444b855e7aa..9df39cf8b097 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1626,7 +1626,7 @@ err_sysfs_create:
return rc;
}
-static int temac_remove(struct platform_device *pdev)
+static void temac_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct temac_local *lp = netdev_priv(ndev);
@@ -1636,7 +1636,6 @@ static int temac_remove(struct platform_device *pdev)
if (lp->phy_node)
of_node_put(lp->phy_node);
temac_mdio_teardown(lp);
- return 0;
}
static const struct of_device_id temac_of_match[] = {
@@ -1650,7 +1649,7 @@ MODULE_DEVICE_TABLE(of, temac_of_match);
static struct platform_driver temac_driver = {
.probe = temac_probe,
- .remove = temac_remove,
+ .remove_new = temac_remove,
.driver = {
.name = "xilinx_temac",
.of_match_table = temac_of_match,
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index b7ec4dafae90..82d0d44b2b02 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -2183,7 +2183,7 @@ free_netdev:
return ret;
}
-static int axienet_remove(struct platform_device *pdev)
+static void axienet_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct axienet_local *lp = netdev_priv(ndev);
@@ -2202,8 +2202,6 @@ static int axienet_remove(struct platform_device *pdev)
clk_disable_unprepare(lp->axi_clk);
free_netdev(ndev);
-
- return 0;
}
static void axienet_shutdown(struct platform_device *pdev)
@@ -2256,7 +2254,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
static struct platform_driver axienet_driver = {
.probe = axienet_probe,
- .remove = axienet_remove,
+ .remove_new = axienet_remove,
.shutdown = axienet_shutdown,
.driver = {
.name = "xilinx_axienet",
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index b358ecc67227..765aa516aada 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1180,10 +1180,8 @@ error:
* This function is called if a device is physically removed from the system or
* if the driver module is being unloaded. It frees any resources allocated to
* the device.
- *
- * Return: 0, always.
*/
-static int xemaclite_of_remove(struct platform_device *of_dev)
+static void xemaclite_of_remove(struct platform_device *of_dev)
{
struct net_device *ndev = platform_get_drvdata(of_dev);
@@ -1202,8 +1200,6 @@ static int xemaclite_of_remove(struct platform_device *of_dev)
lp->phy_node = NULL;
free_netdev(ndev);
-
- return 0;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1262,7 +1258,7 @@ static struct platform_driver xemaclite_of_driver = {
.of_match_table = xemaclite_of_match,
},
.probe = xemaclite_of_probe,
- .remove = xemaclite_of_remove,
+ .remove_new = xemaclite_of_remove,
};
module_platform_driver(xemaclite_of_driver);
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 3b0c5f177447..531bf919aef5 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -24,6 +24,7 @@
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/net_tstamp.h>
@@ -63,7 +64,15 @@
#define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
#define REGS_SIZE 0x1000
-#define MAX_MRU 1536 /* 0x600 */
+
+/* MRU is said to be 14320 in a code dump, the SW manual says that
+ * MRU/MTU is 16320 and includes VLAN and ethernet headers.
+ * See "IXP400 Software Programmer's Guide" section 10.3.2, page 161.
+ *
+ * FIXME: we have chosen the safe default (14320) but if you can test
+ * jumboframes, experiment with 16320 and see what happens!
+ */
+#define MAX_MRU (14320 - VLAN_ETH_HLEN)
#define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4)
#define NAPI_WEIGHT 16
@@ -714,9 +723,9 @@ static int eth_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
qmgr_enable_irq(rxq);
if (!qmgr_stat_below_low_watermark(rxq) &&
- napi_reschedule(napi)) { /* not empty again */
+ napi_schedule(napi)) { /* not empty again */
#if DEBUG_RX
- netdev_debug(dev, "eth_poll napi_reschedule succeeded\n");
+ netdev_debug(dev, "eth_poll napi_schedule succeeded\n");
#endif
qmgr_disable_irq(rxq);
continue;
@@ -1182,6 +1191,54 @@ static void destroy_queues(struct port *port)
}
}
+static int ixp4xx_do_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct port *port = netdev_priv(dev);
+ struct npe *npe = port->npe;
+ int framesize, chunks;
+ struct msg msg = {};
+
+ /* adjust for ethernet headers */
+ framesize = new_mtu + VLAN_ETH_HLEN;
+ /* max rx/tx 64 byte chunks */
+ chunks = DIV_ROUND_UP(framesize, 64);
+
+ msg.cmd = NPE_SETMAXFRAMELENGTHS;
+ msg.eth_id = port->id;
+
+ /* Firmware wants to know buffer size in 64 byte chunks */
+ msg.byte2 = chunks << 8;
+ msg.byte3 = chunks << 8;
+
+ msg.byte4 = msg.byte6 = framesize >> 8;
+ msg.byte5 = msg.byte7 = framesize & 0xff;
+
+ if (npe_send_recv_message(npe, &msg, "ETH_SET_MAX_FRAME_LENGTH"))
+ return -EIO;
+ netdev_dbg(dev, "set MTU on NPE %s to %d bytes\n",
+ npe_name(npe), new_mtu);
+
+ return 0;
+}
+
+static int ixp4xx_eth_change_mtu(struct net_device *dev, int new_mtu)
+{
+ int ret;
+
+ /* MTU can only be changed when the interface is up. We also
+ * set the MTU from dev->mtu when opening the device.
+ */
+ if (dev->flags & IFF_UP) {
+ ret = ixp4xx_do_change_mtu(dev, new_mtu);
+ if (ret < 0)
+ return ret;
+ }
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
static int eth_open(struct net_device *dev)
{
struct port *port = netdev_priv(dev);
@@ -1232,6 +1289,8 @@ static int eth_open(struct net_device *dev)
if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE"))
return -EIO;
+ ixp4xx_do_change_mtu(dev, dev->mtu);
+
if ((err = request_queues(port)) != 0)
return err;
@@ -1374,6 +1433,7 @@ static int eth_close(struct net_device *dev)
static const struct net_device_ops ixp4xx_netdev_ops = {
.ndo_open = eth_open,
.ndo_stop = eth_close,
+ .ndo_change_mtu = ixp4xx_eth_change_mtu,
.ndo_start_xmit = eth_xmit,
.ndo_set_rx_mode = eth_set_mcast_list,
.ndo_eth_ioctl = eth_ioctl,
@@ -1488,6 +1548,9 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
ndev->dev.dma_mask = dev->dma_mask;
ndev->dev.coherent_dma_mask = dev->coherent_dma_mask;
+ ndev->min_mtu = ETH_MIN_MTU;
+ ndev->max_mtu = MAX_MRU;
+
netif_napi_add_weight(ndev, &port->napi, eth_poll, NAPI_WEIGHT);
if (!(port->npe = npe_request(NPE_ID(port->id))))
@@ -1533,7 +1596,7 @@ err_free_mem:
return err;
}
-static int ixp4xx_eth_remove(struct platform_device *pdev)
+static void ixp4xx_eth_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct phy_device *phydev = ndev->phydev;
@@ -1544,7 +1607,6 @@ static int ixp4xx_eth_remove(struct platform_device *pdev)
ixp4xx_mdio_remove();
npe_port_tab[NPE_ID(port->id)] = NULL;
npe_release(port->npe);
- return 0;
}
static const struct of_device_id ixp4xx_eth_of_match[] = {
@@ -1560,7 +1622,7 @@ static struct platform_driver ixp4xx_eth_driver = {
.of_match_table = of_match_ptr(ixp4xx_eth_of_match),
},
.probe = ixp4xx_eth_probe,
- .remove = ixp4xx_eth_remove,
+ .remove_new = ixp4xx_eth_remove,
};
module_platform_driver(ixp4xx_eth_driver);